summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2016-01-20 09:45:45 -0800
committerAli Ijaz Sheikh <ofrobots@google.com>2016-01-21 16:53:58 -0800
commitef4170ea03a80b21b2d8a65ce432efaa370fe2fa (patch)
treee382b1b38b729cd8155b56b441c3a563914854a3 /deps/v8/src
parent5f6dfab832979999d2f806fc1a2f1c11a25b0f35 (diff)
downloadnode-new-ef4170ea03a80b21b2d8a65ce432efaa370fe2fa.tar.gz
deps: upgrade to V8 4.8.271.17
Pick up V8 4.8 branch-head. This branch brings in @@isConcatSpreadable, @@toPrimitive and ToLength ES6 changes. For full details see: http://v8project.blogspot.de/2015/11/v8-release-48.html https://github.com/v8/v8/commit/fa163e2 Ref: https://github.com/nodejs/node/pull/4399 PR-URL: https://github.com/nodejs/node/pull/4785 Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/accessors.cc60
-rw-r--r--deps/v8/src/accessors.h3
-rw-r--r--deps/v8/src/address-map.cc38
-rw-r--r--deps/v8/src/address-map.h184
-rw-r--r--deps/v8/src/allocation-site-scopes.h5
-rw-r--r--deps/v8/src/allocation.h3
-rw-r--r--deps/v8/src/api-natives.cc130
-rw-r--r--deps/v8/src/api-natives.h9
-rw-r--r--deps/v8/src/api.cc342
-rw-r--r--deps/v8/src/api.h11
-rw-r--r--deps/v8/src/arguments.h10
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h47
-rw-r--r--deps/v8/src/arm/assembler-arm.cc65
-rw-r--r--deps/v8/src/arm/assembler-arm.h254
-rw-r--r--deps/v8/src/arm/builtins-arm.cc296
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc186
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h3
-rw-r--r--deps/v8/src/arm/codegen-arm.h3
-rw-r--r--deps/v8/src/arm/constants-arm.cc11
-rw-r--r--deps/v8/src/arm/constants-arm.h3
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc19
-rw-r--r--deps/v8/src/arm/disasm-arm.cc2
-rw-r--r--deps/v8/src/arm/frames-arm.h3
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc46
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.h4
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc28
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h32
-rw-r--r--deps/v8/src/arm/simulator-arm.cc3
-rw-r--r--deps/v8/src/arm/simulator-arm.h6
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h9
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc40
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h283
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc236
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc189
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h3
-rw-r--r--deps/v8/src/arm64/codegen-arm64.h3
-rw-r--r--deps/v8/src/arm64/constants-arm64.h60
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h3
-rw-r--r--deps/v8/src/arm64/decoder-arm64.h3
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc9
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc164
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h13
-rw-r--r--deps/v8/src/arm64/frames-arm64.h3
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h3
-rw-r--r--deps/v8/src/arm64/instrument-arm64.h3
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc47
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.h4
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h3
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc46
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h7
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h9
-rw-r--r--deps/v8/src/arm64/utils-arm64.h9
-rw-r--r--deps/v8/src/assembler.cc58
-rw-r--r--deps/v8/src/assembler.h21
-rw-r--r--deps/v8/src/assert-scope.h3
-rw-r--r--deps/v8/src/ast-expression-visitor.cc59
-rw-r--r--deps/v8/src/ast-expression-visitor.h11
-rw-r--r--deps/v8/src/ast-literal-reindexer.cc9
-rw-r--r--deps/v8/src/ast-literal-reindexer.h6
-rw-r--r--deps/v8/src/ast-numbering.cc33
-rw-r--r--deps/v8/src/ast-value-factory.cc2
-rw-r--r--deps/v8/src/ast-value-factory.h4
-rw-r--r--deps/v8/src/ast.cc80
-rw-r--r--deps/v8/src/ast.h276
-rw-r--r--deps/v8/src/background-parsing-task.h4
-rw-r--r--deps/v8/src/bailout-reason.h35
-rw-r--r--deps/v8/src/base.isolate46
-rw-r--r--deps/v8/src/base/atomicops.h3
-rw-r--r--deps/v8/src/base/atomicops_internals_arm64_gcc.h3
-rw-r--r--deps/v8/src/base/atomicops_internals_arm_gcc.h6
-rw-r--r--deps/v8/src/base/atomicops_internals_atomicword_compat.h3
-rw-r--r--deps/v8/src/base/atomicops_internals_mac.h3
-rw-r--r--deps/v8/src/base/atomicops_internals_mips64_gcc.h3
-rw-r--r--deps/v8/src/base/atomicops_internals_mips_gcc.h3
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h4
-rw-r--r--deps/v8/src/base/atomicops_internals_ppc_gcc.h4
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_gcc.cc3
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_gcc.h3
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_msvc.h3
-rw-r--r--deps/v8/src/base/build_config.h4
-rw-r--r--deps/v8/src/base/cpu.cc3
-rw-r--r--deps/v8/src/base/cpu.h3
-rw-r--r--deps/v8/src/base/lazy-instance.h3
-rw-r--r--deps/v8/src/base/logging.h7
-rw-r--r--deps/v8/src/base/macros.h5
-rw-r--r--deps/v8/src/base/once.cc3
-rw-r--r--deps/v8/src/base/once.h3
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc3
-rw-r--r--deps/v8/src/base/platform/condition-variable.h3
-rw-r--r--deps/v8/src/base/platform/elapsed-timer.h3
-rw-r--r--deps/v8/src/base/platform/mutex.cc3
-rw-r--r--deps/v8/src/base/platform/mutex.h3
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc4
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc3
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc3
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc3
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc3
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc3
-rw-r--r--deps/v8/src/base/platform/platform-qnx.cc3
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc3
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc23
-rw-r--r--deps/v8/src/base/platform/platform.h5
-rw-r--r--deps/v8/src/base/platform/semaphore.cc7
-rw-r--r--deps/v8/src/base/platform/semaphore.h3
-rw-r--r--deps/v8/src/base/platform/time.cc3
-rw-r--r--deps/v8/src/base/platform/time.h3
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc3
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h3
-rw-r--r--deps/v8/src/bignum-dtoa.h3
-rw-r--r--deps/v8/src/bignum.cc4
-rw-r--r--deps/v8/src/bignum.h3
-rw-r--r--deps/v8/src/bootstrapper.cc973
-rw-r--r--deps/v8/src/bootstrapper.h7
-rw-r--r--deps/v8/src/builtins.cc319
-rw-r--r--deps/v8/src/builtins.h72
-rw-r--r--deps/v8/src/cached-powers.h3
-rw-r--r--deps/v8/src/char-predicates-inl.h3
-rw-r--r--deps/v8/src/char-predicates.h3
-rw-r--r--deps/v8/src/checks.h3
-rw-r--r--deps/v8/src/code-factory.cc112
-rw-r--r--deps/v8/src/code-factory.h34
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc90
-rw-r--r--deps/v8/src/code-stubs.cc20
-rw-r--r--deps/v8/src/code-stubs.h145
-rw-r--r--deps/v8/src/codegen.cc10
-rw-r--r--deps/v8/src/codegen.h3
-rw-r--r--deps/v8/src/compilation-cache.h3
-rw-r--r--deps/v8/src/compilation-dependencies.cc18
-rw-r--r--deps/v8/src/compilation-dependencies.h6
-rw-r--r--deps/v8/src/compiler.cc135
-rw-r--r--deps/v8/src/compiler.h82
-rw-r--r--deps/v8/src/compiler/OWNERS1
-rw-r--r--deps/v8/src/compiler/access-builder.cc105
-rw-r--r--deps/v8/src/compiler/access-builder.h26
-rw-r--r--deps/v8/src/compiler/access-info.cc413
-rw-r--r--deps/v8/src/compiler/access-info.h146
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc77
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h3
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc250
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc202
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h10
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc402
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc828
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h65
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc16
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.h7
-rw-r--r--deps/v8/src/compiler/binary-operator-reducer.cc128
-rw-r--r--deps/v8/src/compiler/binary-operator-reducer.h52
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc269
-rw-r--r--deps/v8/src/compiler/branch-elimination.h97
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc398
-rw-r--r--deps/v8/src/compiler/c-linkage.cc9
-rw-r--r--deps/v8/src/compiler/change-lowering.cc77
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h12
-rw-r--r--deps/v8/src/compiler/code-generator.cc32
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc12
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h1
-rw-r--r--deps/v8/src/compiler/common-operator.cc74
-rw-r--r--deps/v8/src/compiler/common-operator.h12
-rw-r--r--deps/v8/src/compiler/control-builders.cc10
-rw-r--r--deps/v8/src/compiler/control-builders.h1
-rw-r--r--deps/v8/src/compiler/frame.h6
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc2
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc9
-rw-r--r--deps/v8/src/compiler/graph-reducer.h5
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc13
-rw-r--r--deps/v8/src/compiler/greedy-allocator.cc66
-rw-r--r--deps/v8/src/compiler/greedy-allocator.h8
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc92
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h3
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc283
-rw-r--r--deps/v8/src/compiler/instruction-codes.h66
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h53
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc406
-rw-r--r--deps/v8/src/compiler/instruction-selector.h16
-rw-r--r--deps/v8/src/compiler/instruction.cc66
-rw-r--r--deps/v8/src/compiler/instruction.h211
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.cc270
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.h51
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc55
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h11
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc3
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.cc5
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc145
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc300
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.h84
-rw-r--r--deps/v8/src/compiler/js-graph.cc13
-rw-r--r--deps/v8/src/compiler/js-graph.h13
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc125
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h62
-rw-r--r--deps/v8/src/compiler/js-inlining.cc175
-rw-r--r--deps/v8/src/compiler/js-inlining.h19
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc157
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h22
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc898
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h112
-rw-r--r--deps/v8/src/compiler/js-operator.cc388
-rw-r--r--deps/v8/src/compiler/js-operator.h303
-rw-r--r--deps/v8/src/compiler/js-type-feedback-lowering.cc119
-rw-r--r--deps/v8/src/compiler/js-type-feedback-lowering.h66
-rw-r--r--deps/v8/src/compiler/js-type-feedback.cc364
-rw-r--r--deps/v8/src/compiler/js-type-feedback.h119
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc963
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h35
-rw-r--r--deps/v8/src/compiler/linkage.cc60
-rw-r--r--deps/v8/src/compiler/linkage.h20
-rw-r--r--deps/v8/src/compiler/live-range-separator.cc159
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc1
-rw-r--r--deps/v8/src/compiler/machine-operator.cc44
-rw-r--r--deps/v8/src/compiler/machine-operator.h27
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc216
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h7
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc366
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc234
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h10
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc390
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc58
-rw-r--r--deps/v8/src/compiler/node-properties.cc21
-rw-r--r--deps/v8/src/compiler/node-properties.h5
-rw-r--r--deps/v8/src/compiler/opcodes.h22
-rw-r--r--deps/v8/src/compiler/operator-properties.cc15
-rw-r--r--deps/v8/src/compiler/pipeline.cc170
-rw-r--r--deps/v8/src/compiler/pipeline.h4
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc115
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h8
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc325
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc54
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h45
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc34
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h1
-rw-r--r--deps/v8/src/compiler/register-allocator.cc437
-rw-r--r--deps/v8/src/compiler/register-allocator.h64
-rw-r--r--deps/v8/src/compiler/register-configuration.cc76
-rw-r--r--deps/v8/src/compiler/register-configuration.h56
-rw-r--r--deps/v8/src/compiler/representation-change.h13
-rw-r--r--deps/v8/src/compiler/scheduler.cc30
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc221
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h6
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc4
-rw-r--r--deps/v8/src/compiler/simplified-operator.h6
-rw-r--r--deps/v8/src/compiler/typer.cc392
-rw-r--r--deps/v8/src/compiler/typer.h22
-rw-r--r--deps/v8/src/compiler/verifier.cc31
-rw-r--r--deps/v8/src/compiler/verifier.h6
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc250
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h8
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc307
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc108
-rw-r--r--deps/v8/src/compiler/x87/instruction-codes-x87.h2
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc277
-rw-r--r--deps/v8/src/context-measure.cc4
-rw-r--r--deps/v8/src/context-measure.h10
-rw-r--r--deps/v8/src/contexts-inl.h6
-rw-r--r--deps/v8/src/contexts.cc34
-rw-r--r--deps/v8/src/contexts.h29
-rw-r--r--deps/v8/src/conversions-inl.h5
-rw-r--r--deps/v8/src/counters.h14
-rw-r--r--deps/v8/src/crankshaft/OWNERS7
-rw-r--r--deps/v8/src/crankshaft/arm/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.cc (renamed from deps/v8/src/arm/lithium-arm.cc)59
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.h (renamed from deps/v8/src/arm/lithium-arm.h)68
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc (renamed from deps/v8/src/arm/lithium-codegen-arm.cc)116
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.h (renamed from deps/v8/src/arm/lithium-codegen-arm.h)16
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc (renamed from deps/v8/src/arm/lithium-gap-resolver-arm.cc)4
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h (renamed from deps/v8/src/arm/lithium-gap-resolver-arm.h)11
-rw-r--r--deps/v8/src/crankshaft/arm64/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h (renamed from deps/v8/src/arm64/delayed-masm-arm64-inl.h)11
-rw-r--r--deps/v8/src/crankshaft/arm64/delayed-masm-arm64.cc (renamed from deps/v8/src/arm64/delayed-masm-arm64.cc)4
-rw-r--r--deps/v8/src/crankshaft/arm64/delayed-masm-arm64.h (renamed from deps/v8/src/arm64/delayed-masm-arm64.h)11
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.cc (renamed from deps/v8/src/arm64/lithium-arm64.cc)57
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.h (renamed from deps/v8/src/arm64/lithium-arm64.h)97
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc (renamed from deps/v8/src/arm64/lithium-codegen-arm64.cc)112
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h (renamed from deps/v8/src/arm64/lithium-codegen-arm64.h)15
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc (renamed from deps/v8/src/arm64/lithium-gap-resolver-arm64.cc)6
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h (renamed from deps/v8/src/arm64/lithium-gap-resolver-arm64.h)13
-rw-r--r--deps/v8/src/crankshaft/hydrogen-alias-analysis.h (renamed from deps/v8/src/hydrogen-alias-analysis.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bce.cc (renamed from deps/v8/src/hydrogen-bce.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bce.h (renamed from deps/v8/src/hydrogen-bce.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bch.cc (renamed from deps/v8/src/hydrogen-bch.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bch.h (renamed from deps/v8/src/hydrogen-bch.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-canonicalize.cc (renamed from deps/v8/src/hydrogen-canonicalize.cc)5
-rw-r--r--deps/v8/src/crankshaft/hydrogen-canonicalize.h (renamed from deps/v8/src/hydrogen-canonicalize.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-check-elimination.cc (renamed from deps/v8/src/hydrogen-check-elimination.cc)6
-rw-r--r--deps/v8/src/crankshaft/hydrogen-check-elimination.h (renamed from deps/v8/src/hydrogen-check-elimination.h)13
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dce.cc (renamed from deps/v8/src/hydrogen-dce.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dce.h (renamed from deps/v8/src/hydrogen-dce.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dehoist.cc (renamed from deps/v8/src/hydrogen-dehoist.cc)3
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dehoist.h (renamed from deps/v8/src/hydrogen-dehoist.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-environment-liveness.cc (renamed from deps/v8/src/hydrogen-environment-liveness.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-environment-liveness.h (renamed from deps/v8/src/hydrogen-environment-liveness.h)12
-rw-r--r--deps/v8/src/crankshaft/hydrogen-escape-analysis.cc (renamed from deps/v8/src/hydrogen-escape-analysis.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-escape-analysis.h (renamed from deps/v8/src/hydrogen-escape-analysis.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-flow-engine.h (renamed from deps/v8/src/hydrogen-flow-engine.h)13
-rw-r--r--deps/v8/src/crankshaft/hydrogen-gvn.cc (renamed from deps/v8/src/hydrogen-gvn.cc)5
-rw-r--r--deps/v8/src/crankshaft/hydrogen-gvn.h (renamed from deps/v8/src/hydrogen-gvn.h)13
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-representation.cc (renamed from deps/v8/src/hydrogen-infer-representation.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-representation.h (renamed from deps/v8/src/hydrogen-infer-representation.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-types.cc (renamed from deps/v8/src/hydrogen-infer-types.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-types.h (renamed from deps/v8/src/hydrogen-infer-types.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc (renamed from deps/v8/src/hydrogen-instructions.cc)56
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h (renamed from deps/v8/src/hydrogen-instructions.h)296
-rw-r--r--deps/v8/src/crankshaft/hydrogen-load-elimination.cc (renamed from deps/v8/src/hydrogen-load-elimination.cc)9
-rw-r--r--deps/v8/src/crankshaft/hydrogen-load-elimination.h (renamed from deps/v8/src/hydrogen-load-elimination.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-mark-deoptimize.cc (renamed from deps/v8/src/hydrogen-mark-deoptimize.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-mark-deoptimize.h (renamed from deps/v8/src/hydrogen-mark-deoptimize.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc (renamed from deps/v8/src/hydrogen-mark-unreachable.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-mark-unreachable.h (renamed from deps/v8/src/hydrogen-mark-unreachable.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.cc (renamed from deps/v8/src/hydrogen-osr.cc)5
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.h (renamed from deps/v8/src/hydrogen-osr.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-range-analysis.cc (renamed from deps/v8/src/hydrogen-range-analysis.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-range-analysis.h (renamed from deps/v8/src/hydrogen-range-analysis.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-redundant-phi.cc (renamed from deps/v8/src/hydrogen-redundant-phi.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-redundant-phi.h (renamed from deps/v8/src/hydrogen-redundant-phi.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-removable-simulates.cc (renamed from deps/v8/src/hydrogen-removable-simulates.cc)7
-rw-r--r--deps/v8/src/crankshaft/hydrogen-removable-simulates.h (renamed from deps/v8/src/hydrogen-removable-simulates.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-representation-changes.cc (renamed from deps/v8/src/hydrogen-representation-changes.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-representation-changes.h (renamed from deps/v8/src/hydrogen-representation-changes.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-sce.cc (renamed from deps/v8/src/hydrogen-sce.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-sce.h (renamed from deps/v8/src/hydrogen-sce.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen-store-elimination.cc (renamed from deps/v8/src/hydrogen-store-elimination.cc)5
-rw-r--r--deps/v8/src/crankshaft/hydrogen-store-elimination.h (renamed from deps/v8/src/hydrogen-store-elimination.h)13
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.cc (renamed from deps/v8/src/hydrogen-types.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.h (renamed from deps/v8/src/hydrogen-types.h)9
-rw-r--r--deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc (renamed from deps/v8/src/hydrogen-uint32-analysis.cc)2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-uint32-analysis.h (renamed from deps/v8/src/hydrogen-uint32-analysis.h)11
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc (renamed from deps/v8/src/hydrogen.cc)688
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h (renamed from deps/v8/src/hydrogen.h)86
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc (renamed from deps/v8/src/ia32/lithium-codegen-ia32.cc)114
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h (renamed from deps/v8/src/ia32/lithium-codegen-ia32.h)16
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc (renamed from deps/v8/src/ia32/lithium-gap-resolver-ia32.cc)48
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h (renamed from deps/v8/src/ia32/lithium-gap-resolver-ia32.h)15
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.cc (renamed from deps/v8/src/ia32/lithium-ia32.cc)61
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.h (renamed from deps/v8/src/ia32/lithium-ia32.h)68
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator-inl.h (renamed from deps/v8/src/lithium-allocator-inl.h)27
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.cc (renamed from deps/v8/src/lithium-allocator.cc)105
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.h (renamed from deps/v8/src/lithium-allocator.h)17
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc (renamed from deps/v8/src/lithium-codegen.cc)34
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.h (renamed from deps/v8/src/lithium-codegen.h)9
-rw-r--r--deps/v8/src/crankshaft/lithium-inl.h (renamed from deps/v8/src/lithium-inl.h)28
-rw-r--r--deps/v8/src/crankshaft/lithium.cc (renamed from deps/v8/src/lithium.cc)54
-rw-r--r--deps/v8/src/crankshaft/lithium.h (renamed from deps/v8/src/lithium.h)11
-rw-r--r--deps/v8/src/crankshaft/mips/OWNERS5
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc (renamed from deps/v8/src/mips/lithium-codegen-mips.cc)110
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.h (renamed from deps/v8/src/mips/lithium-codegen-mips.h)15
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc (renamed from deps/v8/src/mips/lithium-gap-resolver-mips.cc)5
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h (renamed from deps/v8/src/mips/lithium-gap-resolver-mips.h)11
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.cc (renamed from deps/v8/src/mips/lithium-mips.cc)59
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.h (renamed from deps/v8/src/mips/lithium-mips.h)68
-rw-r--r--deps/v8/src/crankshaft/mips64/OWNERS5
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc (renamed from deps/v8/src/mips64/lithium-codegen-mips64.cc)110
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h (renamed from deps/v8/src/mips64/lithium-codegen-mips64.h)15
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc (renamed from deps/v8/src/mips64/lithium-gap-resolver-mips64.cc)5
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h (renamed from deps/v8/src/mips64/lithium-gap-resolver-mips64.h)11
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.cc (renamed from deps/v8/src/mips64/lithium-mips64.cc)59
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.h (renamed from deps/v8/src/mips64/lithium-mips64.h)68
-rw-r--r--deps/v8/src/crankshaft/ppc/OWNERS5
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc (renamed from deps/v8/src/ppc/lithium-codegen-ppc.cc)136
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h (renamed from deps/v8/src/ppc/lithium-codegen-ppc.h)19
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc (renamed from deps/v8/src/ppc/lithium-gap-resolver-ppc.cc)5
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h (renamed from deps/v8/src/ppc/lithium-gap-resolver-ppc.h)12
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.cc (renamed from deps/v8/src/ppc/lithium-ppc.cc)59
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.h (renamed from deps/v8/src/ppc/lithium-ppc.h)69
-rw-r--r--deps/v8/src/crankshaft/typing.cc (renamed from deps/v8/src/typing.cc)32
-rw-r--r--deps/v8/src/crankshaft/typing.h (renamed from deps/v8/src/typing.h)14
-rw-r--r--deps/v8/src/crankshaft/unique.h (renamed from deps/v8/src/unique.h)10
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc (renamed from deps/v8/src/x64/lithium-codegen-x64.cc)323
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.h (renamed from deps/v8/src/x64/lithium-codegen-x64.h)15
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc (renamed from deps/v8/src/x64/lithium-gap-resolver-x64.cc)35
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h (renamed from deps/v8/src/x64/lithium-gap-resolver-x64.h)11
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc (renamed from deps/v8/src/x64/lithium-x64.cc)59
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.h (renamed from deps/v8/src/x64/lithium-x64.h)68
-rw-r--r--deps/v8/src/crankshaft/x87/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc (renamed from deps/v8/src/x87/lithium-codegen-x87.cc)110
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.h (renamed from deps/v8/src/x87/lithium-codegen-x87.h)15
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc (renamed from deps/v8/src/x87/lithium-gap-resolver-x87.cc)49
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h (renamed from deps/v8/src/x87/lithium-gap-resolver-x87.h)15
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.cc (renamed from deps/v8/src/x87/lithium-x87.cc)61
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.h (renamed from deps/v8/src/x87/lithium-x87.h)68
-rw-r--r--deps/v8/src/d8.gyp2
-rw-r--r--deps/v8/src/date.h3
-rw-r--r--deps/v8/src/dateparser-inl.h3
-rw-r--r--deps/v8/src/dateparser.h3
-rw-r--r--deps/v8/src/debug/debug-frames.cc2
-rw-r--r--deps/v8/src/debug/debug-scopes.cc19
-rw-r--r--deps/v8/src/debug/debug-scopes.h3
-rw-r--r--deps/v8/src/debug/debug.cc60
-rw-r--r--deps/v8/src/debug/debug.h38
-rw-r--r--deps/v8/src/debug/debug.js68
-rw-r--r--deps/v8/src/debug/liveedit.cc10
-rw-r--r--deps/v8/src/debug/liveedit.h3
-rw-r--r--deps/v8/src/debug/mirrors.js96
-rw-r--r--deps/v8/src/deoptimizer.cc57
-rw-r--r--deps/v8/src/deoptimizer.h22
-rw-r--r--deps/v8/src/disassembler.cc4
-rw-r--r--deps/v8/src/disassembler.h3
-rw-r--r--deps/v8/src/diy-fp.h3
-rw-r--r--deps/v8/src/double.h3
-rw-r--r--deps/v8/src/dtoa.h3
-rw-r--r--deps/v8/src/effects.h3
-rw-r--r--deps/v8/src/elements-kind.h3
-rw-r--r--deps/v8/src/elements.cc204
-rw-r--r--deps/v8/src/elements.h62
-rw-r--r--deps/v8/src/execution.cc16
-rw-r--r--deps/v8/src/execution.h5
-rw-r--r--deps/v8/src/expression-classifier.h27
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h3
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.h3
-rw-r--r--deps/v8/src/extensions/gc-extension.h3
-rw-r--r--deps/v8/src/extensions/statistics-extension.h3
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.h3
-rw-r--r--deps/v8/src/factory.cc77
-rw-r--r--deps/v8/src/factory.h24
-rw-r--r--deps/v8/src/fast-dtoa.h3
-rw-r--r--deps/v8/src/field-index-inl.h6
-rw-r--r--deps/v8/src/field-index.h5
-rw-r--r--deps/v8/src/fixed-dtoa.h3
-rw-r--r--deps/v8/src/flag-definitions.h81
-rw-r--r--deps/v8/src/flags.h3
-rw-r--r--deps/v8/src/frames-inl.h7
-rw-r--r--deps/v8/src/frames.cc40
-rw-r--r--deps/v8/src/frames.h143
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc358
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc363
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc142
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h198
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc366
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc368
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc368
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc359
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc358
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc361
-rw-r--r--deps/v8/src/func-name-inferrer.h3
-rw-r--r--deps/v8/src/futex-emulation.h4
-rw-r--r--deps/v8/src/global-handles.cc124
-rw-r--r--deps/v8/src/global-handles.h22
-rw-r--r--deps/v8/src/globals.h67
-rw-r--r--deps/v8/src/handles-inl.h54
-rw-r--r--deps/v8/src/handles.cc46
-rw-r--r--deps/v8/src/handles.h52
-rw-r--r--deps/v8/src/harmony-array.js300
-rw-r--r--deps/v8/src/harmony-concat-spreadable.js19
-rw-r--r--deps/v8/src/harmony-object-observe.js14
-rw-r--r--deps/v8/src/harmony-reflect.js20
-rw-r--r--deps/v8/src/harmony-regexp.js37
-rw-r--r--deps/v8/src/harmony-tostring.js19
-rw-r--r--deps/v8/src/harmony-typedarray.js414
-rw-r--r--deps/v8/src/hashmap.h3
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h4
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc4
-rw-r--r--deps/v8/src/heap/gc-tracer.cc299
-rw-r--r--deps/v8/src/heap/gc-tracer.h47
-rw-r--r--deps/v8/src/heap/heap-inl.h7
-rw-r--r--deps/v8/src/heap/heap.cc261
-rw-r--r--deps/v8/src/heap/heap.h288
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h89
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc2
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h4
-rw-r--r--deps/v8/src/heap/incremental-marking.cc181
-rw-r--r--deps/v8/src/heap/incremental-marking.h50
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h28
-rw-r--r--deps/v8/src/heap/mark-compact.cc699
-rw-r--r--deps/v8/src/heap/mark-compact.h59
-rw-r--r--deps/v8/src/heap/memory-reducer.cc87
-rw-r--r--deps/v8/src/heap/memory-reducer.h24
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h116
-rw-r--r--deps/v8/src/heap/objects-visiting.cc5
-rw-r--r--deps/v8/src/heap/objects-visiting.h104
-rw-r--r--deps/v8/src/heap/scavenge-job.cc4
-rw-r--r--deps/v8/src/heap/scavenge-job.h4
-rw-r--r--deps/v8/src/heap/scavenger.cc4
-rw-r--r--deps/v8/src/heap/scavenger.h4
-rw-r--r--deps/v8/src/heap/spaces-inl.h15
-rw-r--r--deps/v8/src/heap/spaces.cc535
-rw-r--r--deps/v8/src/heap/spaces.h566
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h4
-rw-r--r--deps/v8/src/heap/store-buffer.h4
-rw-r--r--deps/v8/src/i18n.h3
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h3
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc8
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h199
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc341
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc272
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h19
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h3
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc31
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc8
-rw-r--r--deps/v8/src/ia32/frames-ia32.h3
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc52
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc61
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h37
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h3
-rw-r--r--deps/v8/src/ic/access-compiler.h4
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc6
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc6
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc6
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc5
-rw-r--r--deps/v8/src/ic/call-optimization.h4
-rw-r--r--deps/v8/src/ic/handler-compiler.cc31
-rw-r--r--deps/v8/src/ic/handler-compiler.h9
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc24
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc6
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc2
-rw-r--r--deps/v8/src/ic/ic-compiler.h4
-rw-r--r--deps/v8/src/ic/ic-inl.h29
-rw-r--r--deps/v8/src/ic/ic-state.cc16
-rw-r--r--deps/v8/src/ic/ic-state.h34
-rw-r--r--deps/v8/src/ic/ic.cc121
-rw-r--r--deps/v8/src/ic/ic.h24
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc6
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc48
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc6
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc5
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc7
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc6
-rw-r--r--deps/v8/src/ic/stub-cache.h4
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc6
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc6
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc24
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc6
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc2
-rw-r--r--deps/v8/src/icu_util.h3
-rw-r--r--deps/v8/src/identity-map.cc47
-rw-r--r--deps/v8/src/identity-map.h30
-rw-r--r--deps/v8/src/interface-descriptors.cc71
-rw-r--r--deps/v8/src/interface-descriptors.h88
-rw-r--r--deps/v8/src/interpreter/OWNERS5
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc927
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h216
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc40
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc1962
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h104
-rw-r--r--deps/v8/src/interpreter/bytecode-traits.h180
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc220
-rw-r--r--deps/v8/src/interpreter/bytecodes.h295
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc95
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h126
-rw-r--r--deps/v8/src/interpreter/interpreter.cc1044
-rw-r--r--deps/v8/src/interpreter/interpreter.h36
-rw-r--r--deps/v8/src/isolate-inl.h4
-rw-r--r--deps/v8/src/isolate.cc64
-rw-r--r--deps/v8/src/isolate.h27
-rw-r--r--deps/v8/src/js/array-iterator.js (renamed from deps/v8/src/array-iterator.js)14
-rw-r--r--deps/v8/src/js/array.js (renamed from deps/v8/src/array.js)390
-rw-r--r--deps/v8/src/js/arraybuffer.js (renamed from deps/v8/src/arraybuffer.js)24
-rw-r--r--deps/v8/src/js/code-stubs.js (renamed from deps/v8/src/code-stubs.js)2
-rw-r--r--deps/v8/src/js/collection-iterator.js (renamed from deps/v8/src/collection-iterator.js)31
-rw-r--r--deps/v8/src/js/collection.js (renamed from deps/v8/src/collection.js)28
-rw-r--r--deps/v8/src/js/date.js (renamed from deps/v8/src/date.js)153
-rw-r--r--deps/v8/src/js/generator.js (renamed from deps/v8/src/generator.js)14
-rw-r--r--deps/v8/src/js/harmony-array-includes.js (renamed from deps/v8/src/harmony-array-includes.js)12
-rw-r--r--deps/v8/src/js/harmony-atomics.js (renamed from deps/v8/src/harmony-atomics.js)34
-rw-r--r--deps/v8/src/js/harmony-object-observe.js17
-rw-r--r--deps/v8/src/js/harmony-reflect.js37
-rw-r--r--deps/v8/src/js/harmony-regexp.js66
-rw-r--r--deps/v8/src/js/harmony-sharedarraybuffer.js (renamed from deps/v8/src/harmony-sharedarraybuffer.js)9
-rw-r--r--deps/v8/src/js/harmony-simd.js (renamed from deps/v8/src/harmony-simd.js)69
-rw-r--r--deps/v8/src/js/i18n.js (renamed from deps/v8/src/i18n.js)118
-rw-r--r--deps/v8/src/js/iterator-prototype.js (renamed from deps/v8/src/iterator-prototype.js)5
-rw-r--r--deps/v8/src/js/json.js (renamed from deps/v8/src/json.js)18
-rw-r--r--deps/v8/src/js/macros.py (renamed from deps/v8/src/macros.py)32
-rw-r--r--deps/v8/src/js/math.js (renamed from deps/v8/src/math.js)97
-rw-r--r--deps/v8/src/js/messages.js (renamed from deps/v8/src/messages.js)102
-rw-r--r--deps/v8/src/js/object-observe.js (renamed from deps/v8/src/object-observe.js)45
-rw-r--r--deps/v8/src/js/prologue.js (renamed from deps/v8/src/prologue.js)74
-rw-r--r--deps/v8/src/js/promise.js (renamed from deps/v8/src/promise.js)59
-rw-r--r--deps/v8/src/js/proxy.js (renamed from deps/v8/src/proxy.js)9
-rw-r--r--deps/v8/src/js/regexp.js (renamed from deps/v8/src/regexp.js)313
-rw-r--r--deps/v8/src/js/runtime.js (renamed from deps/v8/src/runtime.js)151
-rw-r--r--deps/v8/src/js/spread.js (renamed from deps/v8/src/harmony-spread.js)5
-rw-r--r--deps/v8/src/js/string-iterator.js (renamed from deps/v8/src/string-iterator.js)8
-rw-r--r--deps/v8/src/js/string.js (renamed from deps/v8/src/string.js)176
-rw-r--r--deps/v8/src/js/symbol.js (renamed from deps/v8/src/symbol.js)9
-rw-r--r--deps/v8/src/js/templates.js (renamed from deps/v8/src/templates.js)6
-rw-r--r--deps/v8/src/js/typedarray.js (renamed from deps/v8/src/typedarray.js)424
-rw-r--r--deps/v8/src/js/uri.js (renamed from deps/v8/src/uri.js)6
-rw-r--r--deps/v8/src/js/v8natives.js (renamed from deps/v8/src/v8natives.js)217
-rw-r--r--deps/v8/src/js/weak-collection.js (renamed from deps/v8/src/weak-collection.js)26
-rw-r--r--deps/v8/src/json-parser.h3
-rw-r--r--deps/v8/src/json-stringifier.h8
-rw-r--r--deps/v8/src/key-accumulator.cc263
-rw-r--r--deps/v8/src/key-accumulator.h92
-rw-r--r--deps/v8/src/layout-descriptor-inl.h8
-rw-r--r--deps/v8/src/layout-descriptor.h4
-rw-r--r--deps/v8/src/libplatform/default-platform.cc3
-rw-r--r--deps/v8/src/libplatform/default-platform.h19
-rw-r--r--deps/v8/src/libplatform/task-queue.cc3
-rw-r--r--deps/v8/src/libplatform/task-queue.h3
-rw-r--r--deps/v8/src/libplatform/worker-thread.cc3
-rw-r--r--deps/v8/src/libplatform/worker-thread.h3
-rw-r--r--deps/v8/src/list-inl.h3
-rw-r--r--deps/v8/src/list.h3
-rw-r--r--deps/v8/src/log-inl.h3
-rw-r--r--deps/v8/src/log-utils.h5
-rw-r--r--deps/v8/src/log.cc4
-rw-r--r--deps/v8/src/log.h3
-rw-r--r--deps/v8/src/lookup-inl.h142
-rw-r--r--deps/v8/src/lookup.cc183
-rw-r--r--deps/v8/src/lookup.h14
-rw-r--r--deps/v8/src/macro-assembler.h3
-rw-r--r--deps/v8/src/messages.cc43
-rw-r--r--deps/v8/src/messages.h28
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h55
-rw-r--r--deps/v8/src/mips/assembler-mips.cc667
-rw-r--r--deps/v8/src/mips/assembler-mips.h547
-rw-r--r--deps/v8/src/mips/builtins-mips.cc281
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc190
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h3
-rw-r--r--deps/v8/src/mips/codegen-mips.h3
-rw-r--r--deps/v8/src/mips/constants-mips.cc36
-rw-r--r--deps/v8/src/mips/constants-mips.h567
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc29
-rw-r--r--deps/v8/src/mips/disasm-mips.cc40
-rw-r--r--deps/v8/src/mips/frames-mips.h3
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc46
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc1623
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h68
-rw-r--r--deps/v8/src/mips/simulator-mips.cc312
-rw-r--r--deps/v8/src/mips/simulator-mips.h19
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h33
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc99
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h326
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc280
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc193
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h3
-rw-r--r--deps/v8/src/mips64/codegen-mips64.h3
-rw-r--r--deps/v8/src/mips64/constants-mips64.h9
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc29
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc16
-rw-r--r--deps/v8/src/mips64/frames-mips64.h3
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc46
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc45
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h28
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc103
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h6
-rw-r--r--deps/v8/src/modules.h3
-rw-r--r--deps/v8/src/objects-debug.cc25
-rw-r--r--deps/v8/src/objects-inl.h443
-rw-r--r--deps/v8/src/objects-printer.cc354
-rw-r--r--deps/v8/src/objects.cc2645
-rw-r--r--deps/v8/src/objects.h698
-rw-r--r--deps/v8/src/optimizing-compile-dispatcher.cc1
-rw-r--r--deps/v8/src/optimizing-compile-dispatcher.h4
-rw-r--r--deps/v8/src/parameter-initializer-rewriter.cc82
-rw-r--r--deps/v8/src/parameter-initializer-rewriter.h22
-rw-r--r--deps/v8/src/parser.cc466
-rw-r--r--deps/v8/src/parser.h32
-rw-r--r--deps/v8/src/pattern-rewriter.cc73
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h4
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc30
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h396
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc317
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc214
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h4
-rw-r--r--deps/v8/src/ppc/codegen-ppc.h4
-rw-r--r--deps/v8/src/ppc/constants-ppc.cc44
-rw-r--r--deps/v8/src/ppc/constants-ppc.h25
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc39
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc35
-rw-r--r--deps/v8/src/ppc/frames-ppc.h6
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc46
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc57
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h32
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc64
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h9
-rw-r--r--deps/v8/src/preparse-data-format.h3
-rw-r--r--deps/v8/src/preparse-data.h3
-rw-r--r--deps/v8/src/preparser.cc68
-rw-r--r--deps/v8/src/preparser.h220
-rw-r--r--deps/v8/src/prettyprinter.cc144
-rw-r--r--deps/v8/src/prettyprinter.h11
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h3
-rw-r--r--deps/v8/src/profiler/circular-queue-inl.h3
-rw-r--r--deps/v8/src/profiler/circular-queue.h3
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h3
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc6
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h3
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc7
-rw-r--r--deps/v8/src/profiler/heap-profiler.h3
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator-inl.h3
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc76
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h5
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h4
-rw-r--r--deps/v8/src/profiler/profile-generator.cc11
-rw-r--r--deps/v8/src/profiler/profile-generator.h12
-rw-r--r--deps/v8/src/profiler/sampler.h3
-rw-r--r--deps/v8/src/profiler/strings-storage.cc (renamed from deps/v8/src/strings-storage.cc)2
-rw-r--r--deps/v8/src/profiler/strings-storage.h (renamed from deps/v8/src/strings-storage.h)12
-rw-r--r--deps/v8/src/profiler/unbound-queue-inl.h3
-rw-r--r--deps/v8/src/profiler/unbound-queue.h3
-rw-r--r--deps/v8/src/property-descriptor.cc268
-rw-r--r--deps/v8/src/property-descriptor.h117
-rw-r--r--deps/v8/src/property-details.h30
-rw-r--r--deps/v8/src/property.h3
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h3
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h3
-rw-r--r--deps/v8/src/regexp/bytecodes-irregexp.h4
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h3
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.h3
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h3
-rw-r--r--deps/v8/src/regexp/jsregexp.cc30
-rw-r--r--deps/v8/src/regexp/jsregexp.h5
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h3
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h3
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h4
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h3
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.h3
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.h3
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h3
-rw-r--r--deps/v8/src/regexp/regexp-stack.h3
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h3
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h3
-rw-r--r--deps/v8/src/register-configuration.cc168
-rw-r--r--deps/v8/src/register-configuration.h95
-rw-r--r--deps/v8/src/rewriter.cc246
-rw-r--r--deps/v8/src/rewriter.h11
-rw-r--r--deps/v8/src/runtime-profiler.cc2
-rw-r--r--deps/v8/src/runtime-profiler.h3
-rw-r--r--deps/v8/src/runtime/runtime-array.cc17
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc4
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc39
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc107
-rw-r--r--deps/v8/src/runtime/runtime-function.cc137
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc4
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc16
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc80
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc9
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc20
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc8
-rw-r--r--deps/v8/src/runtime/runtime-object.cc287
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc120
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc63
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc24
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc4
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc14
-rw-r--r--deps/v8/src/runtime/runtime-utils.h4
-rw-r--r--deps/v8/src/runtime/runtime.cc26
-rw-r--r--deps/v8/src/runtime/runtime.h85
-rw-r--r--deps/v8/src/safepoint-table.h3
-rw-r--r--deps/v8/src/scanner-character-streams.h45
-rw-r--r--deps/v8/src/scanner.cc2
-rw-r--r--deps/v8/src/scanner.h9
-rw-r--r--deps/v8/src/scopeinfo.cc1
-rw-r--r--deps/v8/src/scopeinfo.h3
-rw-r--r--deps/v8/src/scopes.cc100
-rw-r--r--deps/v8/src/scopes.h70
-rw-r--r--deps/v8/src/small-pointer-list.h3
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc2
-rw-r--r--deps/v8/src/snapshot/natives.h3
-rw-r--r--deps/v8/src/snapshot/serialize.cc86
-rw-r--r--deps/v8/src/snapshot/serialize.h206
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h2
-rw-r--r--deps/v8/src/snapshot/snapshot.h3
-rw-r--r--deps/v8/src/splay-tree-inl.h3
-rw-r--r--deps/v8/src/splay-tree.h3
-rw-r--r--deps/v8/src/string-builder.cc2
-rw-r--r--deps/v8/src/string-builder.h10
-rw-r--r--deps/v8/src/string-search.h3
-rw-r--r--deps/v8/src/string-stream.h3
-rw-r--r--deps/v8/src/strtod.h3
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.cc69
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.h8
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.js204
-rw-r--r--deps/v8/src/token.h3
-rw-r--r--deps/v8/src/transitions-inl.h3
-rw-r--r--deps/v8/src/transitions.h11
-rw-r--r--deps/v8/src/type-cache.cc24
-rw-r--r--deps/v8/src/type-cache.h114
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h135
-rw-r--r--deps/v8/src/type-feedback-vector.cc317
-rw-r--r--deps/v8/src/type-feedback-vector.h361
-rw-r--r--deps/v8/src/type-info.cc52
-rw-r--r--deps/v8/src/type-info.h32
-rw-r--r--deps/v8/src/types-inl.h3
-rw-r--r--deps/v8/src/types.cc6
-rw-r--r--deps/v8/src/types.h96
-rw-r--r--deps/v8/src/typing-asm.cc161
-rw-r--r--deps/v8/src/typing-asm.h17
-rw-r--r--deps/v8/src/typing-reset.cc9
-rw-r--r--deps/v8/src/typing-reset.h6
-rw-r--r--deps/v8/src/utils.cc5
-rw-r--r--deps/v8/src/utils.h78
-rw-r--r--deps/v8/src/v8.cc3
-rw-r--r--deps/v8/src/v8.h3
-rw-r--r--deps/v8/src/v8memory.h3
-rw-r--r--deps/v8/src/v8threads.h3
-rw-r--r--deps/v8/src/variables.cc1
-rw-r--r--deps/v8/src/variables.h18
-rw-r--r--deps/v8/src/vector.h3
-rw-r--r--deps/v8/src/version.h3
-rw-r--r--deps/v8/src/vm-state-inl.h3
-rw-r--r--deps/v8/src/vm-state.h12
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h3
-rw-r--r--deps/v8/src/x64/assembler-x64.cc206
-rw-r--r--deps/v8/src/x64/assembler-x64.h553
-rw-r--r--deps/v8/src/x64/builtins-x64.cc314
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc285
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h19
-rw-r--r--deps/v8/src/x64/codegen-x64.cc63
-rw-r--r--deps/v8/src/x64/codegen-x64.h3
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc30
-rw-r--r--deps/v8/src/x64/disasm-x64.cc138
-rw-r--r--deps/v8/src/x64/frames-x64.h3
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc46
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc709
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h124
-rw-r--r--deps/v8/src/x64/simulator-x64.h3
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h3
-rw-r--r--deps/v8/src/x87/assembler-x87.cc8
-rw-r--r--deps/v8/src/x87/assembler-x87.h193
-rw-r--r--deps/v8/src/x87/builtins-x87.cc341
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc272
-rw-r--r--deps/v8/src/x87/code-stubs-x87.h19
-rw-r--r--deps/v8/src/x87/codegen-x87.cc23
-rw-r--r--deps/v8/src/x87/codegen-x87.h3
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc15
-rw-r--r--deps/v8/src/x87/frames-x87.h3
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc52
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc57
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h37
-rw-r--r--deps/v8/src/x87/simulator-x87.h3
-rw-r--r--deps/v8/src/zone-allocator.h3
-rw-r--r--deps/v8/src/zone-type-cache.h98
825 files changed, 39012 insertions, 26351 deletions
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index b89917f73e..73270d187c 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -198,18 +198,6 @@ void Accessors::ArrayLengthGetter(
}
-// Tries to non-observably convert |value| to a valid array length.
-// Returns false if it fails.
-static bool FastAsArrayLength(Isolate* isolate, Handle<Object> value,
- uint32_t* length) {
- if (value->ToArrayLength(length)) return true;
- // We don't support AsArrayLength, so use AsArrayIndex for now. This just
- // misses out on kMaxUInt32.
- if (value->IsString()) return String::cast(*value)->AsArrayIndex(length);
- return false;
-}
-
-
void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
@@ -222,26 +210,9 @@ void Accessors::ArrayLengthSetter(
Handle<Object> length_obj = Utils::OpenHandle(*val);
uint32_t length = 0;
- if (!FastAsArrayLength(isolate, length_obj, &length)) {
- Handle<Object> uint32_v;
- if (!Object::ToUint32(isolate, length_obj).ToHandle(&uint32_v)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
-
- Handle<Object> number_v;
- if (!Object::ToNumber(length_obj).ToHandle(&number_v)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
-
- if (uint32_v->Number() != number_v->Number()) {
- Handle<Object> exception = isolate->factory()->NewRangeError(
- MessageTemplate::kInvalidArrayLength);
- return isolate->ScheduleThrow(*exception);
- }
-
- CHECK(uint32_v->ToArrayLength(&length));
+ if (!JSArray::AnythingToArrayLength(isolate, length_obj, &length)) {
+ isolate->OptionalRescheduleException(false);
+ return;
}
if (JSArray::ObservableSetLength(array, length).is_null()) {
@@ -260,7 +231,6 @@ Handle<AccessorInfo> Accessors::ArrayLengthInfo(
}
-
//
// Accessors::StringLength
//
@@ -1074,7 +1044,12 @@ void Accessors::FunctionNameGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
- Handle<Object> result(function->shared()->name(), isolate);
+ Handle<Object> result;
+ if (function->shared()->name_should_print_as_anonymous()) {
+ result = isolate->factory()->anonymous_string();
+ } else {
+ result = handle(function->shared()->name(), isolate);
+ }
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@@ -1200,20 +1175,7 @@ Handle<Object> GetFunctionArguments(Isolate* isolate,
return ArgumentsForInlinedFunction(frame, function, function_index);
}
- if (!frame->is_optimized()) {
- // If there is an arguments variable in the stack, we return that.
- Handle<ScopeInfo> scope_info(function->shared()->scope_info());
- int index = scope_info->StackSlotIndex(
- isolate->heap()->arguments_string());
- if (index >= 0) {
- Handle<Object> arguments(frame->GetExpression(index), isolate);
- if (!arguments->IsArgumentsMarker()) return arguments;
- }
- }
-
- // If there is no arguments variable in the stack or we have an
- // optimized frame, we find the frame that holds the actual arguments
- // passed to the function.
+ // Find the frame that holds the actual arguments passed to the function.
it.AdvanceToArgumentsFrame();
frame = it.frame();
@@ -1359,7 +1321,7 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
// If caller is a built-in function and caller's caller is also built-in,
// use that instead.
JSFunction* potential_caller = caller;
- while (potential_caller != NULL && potential_caller->IsBuiltin()) {
+ while (potential_caller != NULL && potential_caller->shared()->IsBuiltin()) {
caller = potential_caller;
potential_caller = it.next();
}
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 3c0079de89..6c1765c404 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -106,6 +106,7 @@ class Accessors : public AllStatic {
Handle<ExecutableAccessorInfo> accessor);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ACCESSORS_H_
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc
new file mode 100644
index 0000000000..681661af29
--- /dev/null
+++ b/deps/v8/src/address-map.cc
@@ -0,0 +1,38 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/address-map.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+RootIndexMap::RootIndexMap(Isolate* isolate) {
+ map_ = isolate->root_index_map();
+ if (map_ != NULL) return;
+ map_ = new HashMap(HashMap::PointersMatch);
+ for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
+ Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
+ Object* root = isolate->heap()->root(root_index);
+ // Omit root entries that can be written after initialization. They must
+ // not be referenced through the root list in the snapshot.
+ if (root->IsHeapObject() &&
+ isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
+ HeapObject* heap_object = HeapObject::cast(root);
+ HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
+ if (entry != NULL) {
+ // Some are initialized to a previous value in the root list.
+ DCHECK_LT(GetValue(entry), i);
+ } else {
+ SetValue(LookupEntry(map_, heap_object, true), i);
+ }
+ }
+ }
+ isolate->set_root_index_map(map_);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/address-map.h b/deps/v8/src/address-map.h
new file mode 100644
index 0000000000..df32f89c1e
--- /dev/null
+++ b/deps/v8/src/address-map.h
@@ -0,0 +1,184 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ADDRESS_MAP_H_
+#define V8_ADDRESS_MAP_H_
+
+#include "src/assert-scope.h"
+#include "src/hashmap.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class AddressMapBase {
+ protected:
+ static void SetValue(HashMap::Entry* entry, uint32_t v) {
+ entry->value = reinterpret_cast<void*>(v);
+ }
+
+ static uint32_t GetValue(HashMap::Entry* entry) {
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+ }
+
+ inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
+ bool insert) {
+ if (insert) {
+ map->LookupOrInsert(Key(obj), Hash(obj));
+ }
+ return map->Lookup(Key(obj), Hash(obj));
+ }
+
+ private:
+ static uint32_t Hash(HeapObject* obj) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+ }
+
+ static void* Key(HeapObject* obj) {
+ return reinterpret_cast<void*>(obj->address());
+ }
+};
+
+
+class RootIndexMap : public AddressMapBase {
+ public:
+ explicit RootIndexMap(Isolate* isolate);
+
+ static const int kInvalidRootIndex = -1;
+
+ int Lookup(HeapObject* obj) {
+ HashMap::Entry* entry = LookupEntry(map_, obj, false);
+ if (entry) return GetValue(entry);
+ return kInvalidRootIndex;
+ }
+
+ private:
+ HashMap* map_;
+
+ DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
+};
+
+
+class BackReference {
+ public:
+ explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
+
+ BackReference() : bitfield_(kInvalidValue) {}
+
+ static BackReference SourceReference() { return BackReference(kSourceValue); }
+
+ static BackReference GlobalProxyReference() {
+ return BackReference(kGlobalProxyValue);
+ }
+
+ static BackReference LargeObjectReference(uint32_t index) {
+ return BackReference(SpaceBits::encode(LO_SPACE) |
+ ChunkOffsetBits::encode(index));
+ }
+
+ static BackReference DummyReference() { return BackReference(kDummyValue); }
+
+ static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
+ uint32_t chunk_offset) {
+ DCHECK(IsAligned(chunk_offset, kObjectAlignment));
+ DCHECK_NE(LO_SPACE, space);
+ return BackReference(
+ SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
+ ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
+ }
+
+ bool is_valid() const { return bitfield_ != kInvalidValue; }
+ bool is_source() const { return bitfield_ == kSourceValue; }
+ bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
+
+ AllocationSpace space() const {
+ DCHECK(is_valid());
+ return SpaceBits::decode(bitfield_);
+ }
+
+ uint32_t chunk_offset() const {
+ DCHECK(is_valid());
+ return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
+ }
+
+ uint32_t large_object_index() const {
+ DCHECK(is_valid());
+ DCHECK(chunk_index() == 0);
+ return ChunkOffsetBits::decode(bitfield_);
+ }
+
+ uint32_t chunk_index() const {
+ DCHECK(is_valid());
+ return ChunkIndexBits::decode(bitfield_);
+ }
+
+ uint32_t reference() const {
+ DCHECK(is_valid());
+ return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
+ }
+
+ uint32_t bitfield() const { return bitfield_; }
+
+ private:
+ static const uint32_t kInvalidValue = 0xFFFFFFFF;
+ static const uint32_t kSourceValue = 0xFFFFFFFE;
+ static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
+ static const uint32_t kDummyValue = 0xFFFFFFFC;
+ static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
+ static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
+
+ public:
+ static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
+
+ private:
+ class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
+ class ChunkIndexBits
+ : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
+ class SpaceBits
+ : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
+ };
+
+ uint32_t bitfield_;
+};
+
+
+// Mapping objects to their location after deserialization.
+// This is used during building, but not at runtime by V8.
+class BackReferenceMap : public AddressMapBase {
+ public:
+ BackReferenceMap()
+ : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
+
+ ~BackReferenceMap() { delete map_; }
+
+ BackReference Lookup(HeapObject* obj) {
+ HashMap::Entry* entry = LookupEntry(map_, obj, false);
+ return entry ? BackReference(GetValue(entry)) : BackReference();
+ }
+
+ void Add(HeapObject* obj, BackReference b) {
+ DCHECK(b.is_valid());
+ DCHECK_NULL(LookupEntry(map_, obj, false));
+ HashMap::Entry* entry = LookupEntry(map_, obj, true);
+ SetValue(entry, b.bitfield());
+ }
+
+ void AddSourceString(String* string) {
+ Add(string, BackReference::SourceReference());
+ }
+
+ void AddGlobalProxy(HeapObject* global_proxy) {
+ Add(global_proxy, BackReference::GlobalProxyReference());
+ }
+
+ private:
+ DisallowHeapAllocation no_allocation_;
+ HashMap* map_;
+ DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ADDRESS_MAP_H_
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h
index 9e287c1310..96e74c5ddb 100644
--- a/deps/v8/src/allocation-site-scopes.h
+++ b/deps/v8/src/allocation-site-scopes.h
@@ -36,7 +36,7 @@ class AllocationSiteContext {
void InitializeTraversal(Handle<AllocationSite> site) {
top_ = site;
- current_ = Handle<AllocationSite>(*top_, isolate());
+ current_ = Handle<AllocationSite>::New(*top_, isolate());
}
private:
@@ -95,6 +95,7 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ALLOCATION_SITE_SCOPES_H_
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 292e1fe23b..7c1e023b86 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -88,6 +88,7 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ALLOCATION_H_
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 051ea4a17b..d8dd151041 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -37,25 +37,6 @@ MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
}
-MaybeHandle<JSFunction> InstantiateFunctionOrMaybeDont(Isolate* isolate,
- Handle<Object> data) {
- DCHECK(data->IsFunctionTemplateInfo() || data->IsJSFunction());
- if (data->IsFunctionTemplateInfo()) {
- // A function template needs to be instantiated.
- return InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(data));
-#ifdef V8_JS_ACCESSORS
- } else if (data->IsJSFunction()) {
- // If we already have a proper function, we do not need additional work.
- // (This should only happen for JavaScript API accessors.)
- return Handle<JSFunction>::cast(data);
-#endif // V8_JS_ACCESSORS
- } else {
- UNREACHABLE();
- return MaybeHandle<JSFunction>();
- }
-}
-
MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> name,
@@ -63,14 +44,18 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
Handle<Object> setter,
PropertyAttributes attributes) {
if (!getter->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, getter,
- InstantiateFunctionOrMaybeDont(isolate, getter),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, getter,
+ InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(getter)),
+ Object);
}
if (!setter->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, setter,
- InstantiateFunctionOrMaybeDont(isolate, setter),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, setter,
+ InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(setter)),
+ Object);
}
RETURN_ON_EXCEPTION(isolate, JSObject::DefineAccessor(object, name, getter,
setter, attributes),
@@ -102,8 +87,10 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
}
#endif
- return Object::AddDataProperty(&it, value, attributes, STRICT,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ MAYBE_RETURN_NULL(
+ Object::AddDataProperty(&it, value, attributes, Object::THROW_ON_ERROR,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED));
+ return value;
}
@@ -148,6 +135,20 @@ class AccessCheckDisableScope {
};
+Object* GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
+ Handle<Context> native_context = isolate->native_context();
+ DCHECK(!native_context.is_null());
+ switch (intrinsic) {
+#define GET_INTRINSIC_VALUE(name, iname) \
+ case v8::k##name: \
+ return native_context->iname();
+ V8_INTRINSICS_LIST(GET_INTRINSIC_VALUE)
+#undef GET_INTRINSIC_VALUE
+ }
+ return nullptr;
+}
+
+
MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
Handle<TemplateInfo> data) {
auto property_list = handle(data->property_list(), isolate);
@@ -162,23 +163,40 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
int i = 0;
for (int c = 0; c < data->number_of_properties(); c++) {
auto name = handle(Name::cast(properties.get(i++)), isolate);
- PropertyDetails details(Smi::cast(properties.get(i++)));
- PropertyAttributes attributes = details.attributes();
- PropertyKind kind = details.kind();
+ auto bit = handle(properties.get(i++), isolate);
+ if (bit->IsSmi()) {
+ PropertyDetails details(Smi::cast(*bit));
+ PropertyAttributes attributes = details.attributes();
+ PropertyKind kind = details.kind();
+
+ if (kind == kData) {
+ auto prop_data = handle(properties.get(i++), isolate);
+
+ RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
+ prop_data, attributes),
+ JSObject);
+ } else {
+ auto getter = handle(properties.get(i++), isolate);
+ auto setter = handle(properties.get(i++), isolate);
+ RETURN_ON_EXCEPTION(isolate,
+ DefineAccessorProperty(isolate, obj, name, getter,
+ setter, attributes),
+ JSObject);
+ }
+ } else {
+ // Intrinsic data property --- Get appropriate value from the current
+ // context.
+ PropertyDetails details(Smi::cast(properties.get(i++)));
+ PropertyAttributes attributes = details.attributes();
+ DCHECK_EQ(kData, details.kind());
- if (kind == kData) {
- auto prop_data = handle(properties.get(i++), isolate);
+ v8::Intrinsic intrinsic =
+ static_cast<v8::Intrinsic>(Smi::cast(properties.get(i++))->value());
+ auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate);
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
prop_data, attributes),
JSObject);
- } else {
- auto getter = handle(properties.get(i++), isolate);
- auto setter = handle(properties.get(i++), isolate);
- RETURN_ON_EXCEPTION(isolate,
- DefineAccessorProperty(isolate, obj, name, getter,
- setter, attributes),
- JSObject);
}
}
return obj;
@@ -268,9 +286,9 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
JSObject::GetProperty(parent_instance,
isolate->factory()->prototype_string()),
JSFunction);
- RETURN_ON_EXCEPTION(
- isolate, JSObject::SetPrototype(prototype, parent_prototype, false),
- JSFunction);
+ MAYBE_RETURN(JSObject::SetPrototype(prototype, parent_prototype, false,
+ Object::THROW_ON_ERROR),
+ MaybeHandle<JSFunction>());
}
}
auto function = ApiNatives::CreateApiFunction(
@@ -377,21 +395,25 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
}
+void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
+ Handle<Name> name, v8::Intrinsic intrinsic,
+ PropertyAttributes attributes) {
+ const int kSize = 4;
+ auto value = handle(Smi::FromInt(intrinsic), isolate);
+ auto intrinsic_marker = isolate->factory()->true_value();
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ auto details_handle = handle(details.AsSmi(), isolate);
+ Handle<Object> data[kSize] = {name, intrinsic_marker, details_handle, value};
+ AddPropertyToPropertyList(isolate, info, kSize, data);
+}
+
+
void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<TemplateInfo> info,
- Handle<Name> name, Handle<Object> getter,
- Handle<Object> setter,
+ Handle<Name> name,
+ Handle<FunctionTemplateInfo> getter,
+ Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
-#ifdef V8_JS_ACCESSORS
- DCHECK(getter.is_null() || getter->IsFunctionTemplateInfo() ||
- getter->IsJSFunction());
- DCHECK(setter.is_null() || setter->IsFunctionTemplateInfo() ||
- setter->IsJSFunction());
-#else
- DCHECK(getter.is_null() || getter->IsFunctionTemplateInfo());
- DCHECK(setter.is_null() || setter->IsFunctionTemplateInfo());
-#endif // V8_JS_ACCESSORS
-
const int kSize = 4;
PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index 0639677b15..fcca4a5a17 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -44,9 +44,14 @@ class ApiNatives {
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes);
+ static void AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
+ Handle<Name> name, v8::Intrinsic intrinsic,
+ PropertyAttributes attributes);
+
static void AddAccessorProperty(Isolate* isolate, Handle<TemplateInfo> info,
- Handle<Name> name, Handle<Object> getter,
- Handle<Object> setter,
+ Handle<Name> name,
+ Handle<FunctionTemplateInfo> getter,
+ Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes);
static void AddNativeDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index fd9477e101..5d4c9c0c41 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -43,6 +43,7 @@
#include "src/profiler/profile-generator-inl.h"
#include "src/profiler/sampler.h"
#include "src/property.h"
+#include "src/property-descriptor.h"
#include "src/property-details.h"
#include "src/prototype.h"
#include "src/runtime/runtime.h"
@@ -158,6 +159,7 @@ class CallDepthScope {
do_callback_(do_callback) {
// TODO(dcarney): remove this when blink stops crashing.
DCHECK(!isolate_->external_caught_exception());
+ isolate_->IncrementJsCallsFromApiCounter();
isolate_->handle_scope_implementer()->IncrementCallDepth();
if (!context_.IsEmpty()) context_->Enter();
}
@@ -734,17 +736,17 @@ SealHandleScope::SealHandleScope(Isolate* isolate) {
i::HandleScopeData* current = internal_isolate->handle_scope_data();
prev_limit_ = current->limit;
current->limit = current->next;
- prev_level_ = current->level;
- current->level = 0;
+ prev_sealed_level_ = current->sealed_level;
+ current->sealed_level = current->level;
}
SealHandleScope::~SealHandleScope() {
i::HandleScopeData* current = isolate_->handle_scope_data();
- DCHECK_EQ(0, current->level);
- current->level = prev_level_;
DCHECK_EQ(current->next, current->limit);
current->limit = prev_limit_;
+ DCHECK_EQ(current->level, current->sealed_level);
+ current->sealed_level = prev_sealed_level_;
}
@@ -955,25 +957,6 @@ void Template::SetAccessorProperty(
}
-#ifdef V8_JS_ACCESSORS
-void Template::SetAccessorProperty(v8::Local<v8::Name> name,
- v8::Local<Function> getter,
- v8::Local<Function> setter,
- v8::PropertyAttribute attribute) {
- auto templ = Utils::OpenHandle(this);
- auto isolate = templ->GetIsolate();
- ENTER_V8(isolate);
- DCHECK(!name.IsEmpty());
- DCHECK(!getter.IsEmpty() || !setter.IsEmpty());
- i::HandleScope scope(isolate);
- i::ApiNatives::AddAccessorProperty(
- isolate, templ, Utils::OpenHandle(*name),
- Utils::OpenHandle(*getter, true), Utils::OpenHandle(*setter, true),
- static_cast<PropertyAttributes>(attribute));
-}
-#endif // V8_JS_ACCESSORS
-
-
// --- F u n c t i o n T e m p l a t e ---
static void InitializeFunctionTemplate(
i::Handle<i::FunctionTemplateInfo> info) {
@@ -1352,6 +1335,18 @@ void Template::SetNativeDataProperty(v8::Local<Name> name,
}
+void Template::SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
+ PropertyAttribute attribute) {
+ auto templ = Utils::OpenHandle(this);
+ i::Isolate* isolate = templ->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
+ intrinsic,
+ static_cast<PropertyAttributes>(attribute));
+}
+
+
void ObjectTemplate::SetAccessor(v8::Local<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter,
@@ -1440,6 +1435,33 @@ void ObjectTemplate::MarkAsUndetectable() {
}
+void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
+ Local<Value> data) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ auto cons = EnsureConstructor(isolate, this);
+ EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetAccessCheckCallback");
+
+ i::Handle<i::Struct> struct_info =
+ isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
+ i::Handle<i::AccessCheckInfo> info =
+ i::Handle<i::AccessCheckInfo>::cast(struct_info);
+
+ SET_FIELD_WRAPPED(info, set_callback, callback);
+ SET_FIELD_WRAPPED(info, set_named_callback, nullptr);
+ SET_FIELD_WRAPPED(info, set_indexed_callback, nullptr);
+
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+ info->set_data(*Utils::OpenHandle(*data));
+
+ cons->set_access_check_info(*info);
+ cons->set_needs_access_check(true);
+}
+
+
void ObjectTemplate::SetAccessCheckCallbacks(
NamedSecurityCallback named_callback,
IndexedSecurityCallback indexed_callback, Local<Value> data) {
@@ -1454,6 +1476,7 @@ void ObjectTemplate::SetAccessCheckCallbacks(
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
+ SET_FIELD_WRAPPED(info, set_callback, nullptr);
SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
@@ -1968,7 +1991,8 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Utils::OpenHandle(*v8_context->Global()), 0,
nullptr).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Function);
- RETURN_ESCAPED(Utils::ToLocal(i::Handle<i::JSFunction>::cast(result)));
+ RETURN_ESCAPED(
+ Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(result)));
}
@@ -2686,9 +2710,7 @@ bool Value::IsFalse() const {
}
-bool Value::IsFunction() const {
- return Utils::OpenHandle(this)->IsJSFunction();
-}
+bool Value::IsFunction() const { return Utils::OpenHandle(this)->IsCallable(); }
bool Value::IsName() const {
@@ -3027,8 +3049,7 @@ void v8::Object::CheckCast(Value* that) {
void v8::Function::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSFunction(),
- "v8::Function::Cast()",
+ Utils::ApiCheck(obj->IsCallable(), "v8::Function::Cast()",
"Could not convert to function");
}
@@ -3494,30 +3515,26 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
v8::PropertyAttribute attributes) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DefineOwnProperty()",
bool);
- auto self = Utils::OpenHandle(this);
- auto key_obj = Utils::OpenHandle(*key);
- auto value_obj = Utils::OpenHandle(*value);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- if (self->IsAccessCheckNeeded() && !isolate->MayAccess(self)) {
+ if (self->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), self)) {
isolate->ReportFailedAccessCheck(self);
return Nothing<bool>();
}
- i::Handle<i::FixedArray> desc = isolate->factory()->NewFixedArray(3);
- desc->set(0, isolate->heap()->ToBoolean(!(attributes & v8::ReadOnly)));
- desc->set(1, isolate->heap()->ToBoolean(!(attributes & v8::DontEnum)));
- desc->set(2, isolate->heap()->ToBoolean(!(attributes & v8::DontDelete)));
- i::Handle<i::JSArray> desc_array =
- isolate->factory()->NewJSArrayWithElements(desc, i::FAST_ELEMENTS, 3);
- i::Handle<i::Object> args[] = {self, key_obj, value_obj, desc_array};
- i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- i::Handle<i::JSFunction> fun = isolate->object_define_own_property();
- i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
- .ToHandle(&result);
+ i::PropertyDescriptor desc;
+ desc.set_writable(!(attributes & v8::ReadOnly));
+ desc.set_enumerable(!(attributes & v8::DontEnum));
+ desc.set_configurable(!(attributes & v8::DontDelete));
+ desc.set_value(value_obj);
+ bool success = i::JSReceiver::DefineOwnProperty(isolate, self, key_obj, &desc,
+ i::Object::DONT_THROW);
+ // Even though we said DONT_THROW, there might be accessors that do throw.
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(result->BooleanValue());
+ return Just(success);
}
@@ -3526,20 +3543,12 @@ static i::MaybeHandle<i::Object> DefineObjectProperty(
i::Handle<i::JSObject> js_object, i::Handle<i::Object> key,
i::Handle<i::Object> value, PropertyAttributes attrs) {
i::Isolate* isolate = js_object->GetIsolate();
- // Check if the given key is an array index.
- uint32_t index = 0;
- if (key->ToArrayIndex(&index)) {
- return i::JSObject::SetOwnElementIgnoreAttributes(js_object, index, value,
- attrs);
- }
-
- i::Handle<i::Name> name;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, name,
- i::Object::ToName(isolate, key),
- i::MaybeHandle<i::Object>());
+ bool success = false;
+ i::LookupIterator it = i::LookupIterator::PropertyOrElement(
+ isolate, js_object, key, &success, i::LookupIterator::OWN);
+ if (!success) return i::MaybeHandle<i::Object>();
- return i::JSObject::DefinePropertyOrElementIgnoreAttributes(js_object, name,
- value, attrs);
+ return i::JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs);
}
@@ -3575,6 +3584,13 @@ bool v8::Object::ForceSet(v8::Local<Value> key, v8::Local<Value> value,
}
+Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
+ Local<Value> value) {
+ return DefineOwnProperty(context, Local<Name>(reinterpret_cast<Name*>(*key)),
+ value, DontEnum);
+}
+
+
MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
Local<Value> key) {
PREPARE_FOR_EXECUTION(context, "v8::Object::Get()", Value);
@@ -3611,6 +3627,12 @@ Local<Value> v8::Object::Get(uint32_t index) {
}
+MaybeLocal<Value> v8::Object::GetPrivate(Local<Context> context,
+ Local<Private> key) {
+ return Get(context, Local<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(
@@ -3680,8 +3702,9 @@ Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
// We do not allow exceptions thrown while setting the prototype
// to propagate outside.
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- auto result = i::JSObject::SetPrototype(self, value_obj, false);
- has_pending_exception = result.is_null();
+ auto result = i::JSObject::SetPrototype(self, value_obj, false,
+ i::Object::THROW_ON_ERROR);
+ has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -3705,6 +3728,7 @@ Local<Object> v8::Object::FindInstanceInPrototypeChain(
return Local<Object>();
}
}
+ // IsTemplateFor() ensures that iter.GetCurrent() can't be a Proxy here.
return Utils::ToLocal(i::handle(iter.GetCurrent<i::JSObject>(), isolate));
}
@@ -3846,6 +3870,12 @@ bool v8::Object::Delete(v8::Local<Value> key) {
}
+Maybe<bool> v8::Object::DeletePrivate(Local<Context> context,
+ Local<Private> key) {
+ return Delete(context, Local<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Get()", bool);
auto self = Utils::OpenHandle(this);
@@ -3874,6 +3904,11 @@ bool v8::Object::Has(v8::Local<Value> key) {
}
+Maybe<bool> v8::Object::HasPrivate(Local<Context> context, Local<Private> key) {
+ return HasOwnProperty(context, Local<Name>(reinterpret_cast<Name*>(*key)));
+}
+
+
Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DeleteProperty()",
bool);
@@ -4072,13 +4107,14 @@ MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Local<Context> context, Local<Name> key) {
PREPARE_FOR_EXECUTION(
context, "v8::Object::GetRealNamedPropertyInPrototypeChain()", Value);
- auto self = Utils::OpenHandle(this);
- auto key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::PrototypeIterator iter(isolate, self);
if (iter.IsAtEnd()) return MaybeLocal<Value>();
- auto proto = i::PrototypeIterator::GetCurrent(iter);
+ i::Handle<i::JSReceiver> proto =
+ i::PrototypeIterator::GetCurrent<i::JSReceiver>(iter);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+ isolate, self, key_obj, proto,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(i::Object::GetProperty(&it), &result);
@@ -4102,22 +4138,20 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
PREPARE_FOR_EXECUTION_PRIMITIVE(
context, "v8::Object::GetRealNamedPropertyAttributesInPrototypeChain()",
PropertyAttribute);
- auto self = Utils::OpenHandle(this);
- auto key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::PrototypeIterator iter(isolate, self);
if (iter.IsAtEnd()) return Nothing<PropertyAttribute>();
- auto proto = i::PrototypeIterator::GetCurrent(iter);
+ i::Handle<i::JSReceiver> proto =
+ i::PrototypeIterator::GetCurrent<i::JSReceiver>(iter);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+ isolate, self, key_obj, proto,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- auto result = i::JSReceiver::GetPropertyAttributes(&it);
+ Maybe<PropertyAttributes> result = i::JSReceiver::GetPropertyAttributes(&it);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
if (!it.IsFound()) return Nothing<PropertyAttribute>();
- if (result.FromJust() == ABSENT) {
- return Just(static_cast<PropertyAttribute>(NONE));
- }
- return Just<PropertyAttribute>(
- static_cast<PropertyAttribute>(result.FromJust()));
+ if (result.FromJust() == ABSENT) return Just(None);
+ return Just(static_cast<PropertyAttribute>(result.FromJust()));
}
@@ -4206,13 +4240,16 @@ int v8::Object::GetIdentityHash() {
bool v8::Object::SetHiddenValue(v8::Local<v8::String> key,
v8::Local<v8::Value> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (value.IsEmpty()) return DeleteHiddenValue(key);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
+ if (value.IsEmpty()) {
+ i::JSObject::DeleteHiddenProperty(self, key_string);
+ return true;
+ }
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::Handle<i::Object> result =
i::JSObject::SetHiddenProperty(self, key_string, value_obj);
@@ -4336,8 +4373,8 @@ MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
Local<Object> result;
- has_pending_exception =
- !ToLocal<Object>(i::Execution::New(self, argc, args), &result);
+ has_pending_exception = !ToLocal<Object>(
+ i::Execution::New(isolate, self, self, argc, args), &result);
RETURN_ON_FAILED_EXECUTION(Object);
RETURN_ESCAPED(result);
}
@@ -4375,20 +4412,32 @@ Local<v8::Value> Function::Call(v8::Local<v8::Value> recv, int argc,
void Function::SetName(v8::Local<v8::String> name) {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) return;
+ auto func = i::Handle<i::JSFunction>::cast(self);
func->shared()->set_name(*Utils::OpenHandle(*name));
}
Local<Value> Function::GetName() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return ToApiHandle<Primitive>(
+ self->GetIsolate()->factory()->undefined_value());
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name(),
func->GetIsolate()));
}
Local<Value> Function::GetInferredName() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return ToApiHandle<Primitive>(
+ self->GetIsolate()->factory()->undefined_value());
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(),
func->GetIsolate()));
}
@@ -4397,7 +4446,11 @@ Local<Value> Function::GetInferredName() const {
Local<Value> Function::GetDisplayName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
i::Handle<i::String> property_name =
isolate->factory()->NewStringFromStaticChars("displayName");
i::Handle<i::Object> value =
@@ -4411,7 +4464,11 @@ Local<Value> Function::GetDisplayName() const {
ScriptOrigin Function::GetScriptOrigin() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return v8::ScriptOrigin(Local<Value>());
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return GetScriptOriginForScript(func->GetIsolate(), script);
@@ -4424,7 +4481,11 @@ const int Function::kLineOffsetNotFound = -1;
int Function::GetScriptLineNumber() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return kLineOffsetNotFound;
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return i::Script::GetLineNumber(script, func->shared()->start_position());
@@ -4434,7 +4495,11 @@ int Function::GetScriptLineNumber() const {
int Function::GetScriptColumnNumber() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return kLineOffsetNotFound;
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return i::Script::GetColumnNumber(script, func->shared()->start_position());
@@ -4444,13 +4509,21 @@ int Function::GetScriptColumnNumber() const {
bool Function::IsBuiltin() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return func->IsBuiltin();
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return false;
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
+ return func->shared()->IsBuiltin();
}
int Function::ScriptId() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return v8::UnboundScript::kNoScriptId;
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
if (!func->shared()->script()->IsScript()) {
return v8::UnboundScript::kNoScriptId;
}
@@ -4460,16 +4533,19 @@ int Function::ScriptId() const {
Local<v8::Value> Function::GetBoundFunction() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(self->GetIsolate()));
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
if (!func->shared()->bound()) {
return v8::Undefined(reinterpret_cast<v8::Isolate*>(func->GetIsolate()));
}
- i::Handle<i::FixedArray> bound_args = i::Handle<i::FixedArray>(
- i::FixedArray::cast(func->function_bindings()));
- i::Handle<i::Object> original(
- bound_args->get(i::JSFunction::kBoundFunctionIndex),
- func->GetIsolate());
- return Utils::ToLocal(i::Handle<i::JSFunction>::cast(original));
+ i::Handle<i::BindingsArray> bound_args = i::Handle<i::BindingsArray>(
+ i::BindingsArray::cast(func->function_bindings()));
+ i::Handle<i::Object> original(bound_args->bound_function(),
+ func->GetIsolate());
+ return Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(original));
}
@@ -5178,6 +5254,11 @@ Local<Value> Symbol::Name() const {
}
+Local<Value> Private::Name() const {
+ return reinterpret_cast<const Symbol*>(this)->Name();
+}
+
+
double Number::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
@@ -6047,6 +6128,8 @@ REGEXP_FLAG_ASSERT_EQ(kNone, NONE);
REGEXP_FLAG_ASSERT_EQ(kGlobal, GLOBAL);
REGEXP_FLAG_ASSERT_EQ(kIgnoreCase, IGNORE_CASE);
REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE);
+REGEXP_FLAG_ASSERT_EQ(kSticky, STICKY);
+REGEXP_FLAG_ASSERT_EQ(kUnicode, UNICODE_ESCAPES);
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
@@ -6750,7 +6833,8 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
i::Handle<i::String> name,
- i::Handle<i::String> part) {
+ i::Handle<i::String> part,
+ bool private_symbol) {
i::Handle<i::JSObject> registry = isolate->GetSymbolRegistry();
i::Handle<i::JSObject> symbols =
i::Handle<i::JSObject>::cast(
@@ -6759,7 +6843,10 @@ static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
i::Object::GetPropertyOrElement(symbols, name).ToHandleChecked();
if (!symbol->IsSymbol()) {
DCHECK(symbol->IsUndefined());
- symbol = isolate->factory()->NewSymbol();
+ if (private_symbol)
+ symbol = isolate->factory()->NewPrivateSymbol();
+ else
+ symbol = isolate->factory()->NewSymbol();
i::Handle<i::Symbol>::cast(symbol)->set_name(*name);
i::JSObject::SetProperty(symbols, name, symbol, i::STRICT).Assert();
}
@@ -6771,7 +6858,7 @@ Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
i::Handle<i::String> part = i_isolate->factory()->for_string();
- return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
+ return Utils::ToLocal(SymbolFor(i_isolate, i_name, part, false));
}
@@ -6779,7 +6866,7 @@ Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
i::Handle<i::String> part = i_isolate->factory()->for_api_string();
- return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
+ return Utils::ToLocal(SymbolFor(i_isolate, i_name, part, false));
}
@@ -6801,6 +6888,33 @@ Local<Symbol> v8::Symbol::GetToStringTag(Isolate* isolate) {
}
+Local<Symbol> v8::Symbol::GetIsConcatSpreadable(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ return Utils::ToLocal(i_isolate->factory()->is_concat_spreadable_symbol());
+}
+
+
+Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, "Private::New()");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
+ if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
+ Local<Symbol> result = Utils::ToLocal(symbol);
+ return v8::Local<Private>(reinterpret_cast<Private*>(*result));
+}
+
+
+Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::String> part = i_isolate->factory()->private_api_string();
+ Local<Symbol> result =
+ Utils::ToLocal(SymbolFor(i_isolate, i_name, part, true));
+ return v8::Local<Private>(reinterpret_cast<Private*>(*result));
+}
+
+
Local<Number> v8::Number::New(Isolate* isolate, double value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (std::isnan(value)) {
@@ -7175,6 +7289,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_available_size_ = heap->Available();
heap_statistics->used_heap_size_ = heap->SizeOfObjects();
heap_statistics->heap_size_limit_ = heap->MaxReserved();
+ heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
}
@@ -7369,6 +7484,18 @@ int Isolate::ContextDisposedNotification(bool dependant_context) {
}
+void Isolate::IsolateInForegroundNotification() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->heap()->SetOptimizeForLatency();
+}
+
+
+void Isolate::IsolateInBackgroundNotification() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->heap()->SetOptimizeForMemoryUsage();
+}
+
+
void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -7472,10 +7599,10 @@ class VisitorAdapter : public i::ObjectVisitor {
public:
explicit VisitorAdapter(PersistentHandleVisitor* visitor)
: visitor_(visitor) {}
- virtual void VisitPointers(i::Object** start, i::Object** end) {
+ void VisitPointers(i::Object** start, i::Object** end) override {
UNREACHABLE();
}
- virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
+ void VisitEmbedderReference(i::Object** p, uint16_t class_id) override {
Value* value = ToApi<Value>(i::Handle<i::Object>(p));
visitor_->VisitPersistentHandle(
reinterpret_cast<Persistent<Value>*>(&value), class_id);
@@ -7504,6 +7631,15 @@ void Isolate::VisitHandlesForPartialDependence(
}
+void Isolate::VisitWeakHandles(PersistentHandleVisitor* visitor) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::DisallowHeapAllocation no_allocation;
+ VisitorAdapter visitor_adapter(visitor);
+ isolate->global_handles()->IterateWeakRootsInNewSpaceWithClassIds(
+ &visitor_adapter);
+}
+
+
String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
: str_(NULL), length_(0) {
if (obj.IsEmpty()) return;
@@ -7679,7 +7815,7 @@ MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
auto name = isolate->factory()->NewStringFromStaticChars("MakeMirror");
auto fun_obj = i::Object::GetProperty(debug, name).ToHandleChecked();
- auto v8_fun = Utils::ToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
+ auto v8_fun = Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
const int kArgc = 1;
v8::Local<v8::Value> argv[kArgc] = {obj};
Local<Value> result;
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 1229279598..08fbd7ee8f 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -168,7 +168,7 @@ class RegisteredExtension {
V(Symbol, Symbol) \
V(Script, JSFunction) \
V(UnboundScript, SharedFunctionInfo) \
- V(Function, JSFunction) \
+ V(Function, JSReceiver) \
V(Message, JSMessageObject) \
V(Context, Context) \
V(External, Object) \
@@ -192,8 +192,6 @@ class Utils {
v8::internal::Handle<v8::internal::Context> obj);
static inline Local<Value> ToLocal(
v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Function> ToLocal(
- v8::internal::Handle<v8::internal::JSFunction> obj);
static inline Local<Name> ToLocal(
v8::internal::Handle<v8::internal::Name> obj);
static inline Local<String> ToLocal(
@@ -269,6 +267,8 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<NativeWeakMap> NativeWeakMapToLocal(
v8::internal::Handle<v8::internal::JSWeakMap> obj);
+ static inline Local<Function> CallableToLocal(
+ v8::internal::Handle<v8::internal::JSReceiver> obj);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
@@ -349,7 +349,6 @@ inline bool ToLocal(v8::internal::MaybeHandle<v8::internal::Object> maybe,
MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
-MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
MAKE_TO_LOCAL(ToLocal, Name, Name)
MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
@@ -380,6 +379,7 @@ MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(NativeWeakMapToLocal, JSWeakMap, NativeWeakMap)
+MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL
@@ -642,6 +642,7 @@ class Testing {
static v8::Testing::StressType stress_type_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_API_H_
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index ed995e7f58..d11a8cd61e 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -29,10 +29,13 @@ namespace internal {
class Arguments BASE_EMBEDDED {
public:
Arguments(int length, Object** arguments)
- : length_(length), arguments_(arguments) { }
+ : length_(length), arguments_(arguments) {
+ DCHECK_GE(length_, 0);
+ }
Object*& operator[] (int index) {
- DCHECK(0 <= index && index < length_);
+ DCHECK_GE(index, 0);
+ DCHECK_LT(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) -
index * kPointerSize));
}
@@ -283,6 +286,7 @@ static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARGUMENTS_H_
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 8f8956c9e1..175a21df51 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -50,53 +50,11 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
-int Register::NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
-}
-
-
-int DwVfpRegister::NumRegisters() {
+int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
}
-int DwVfpRegister::NumReservedRegisters() {
- return kNumReservedRegisters;
-}
-
-
-int DwVfpRegister::NumAllocatableRegisters() {
- return NumRegisters() - kNumReservedRegisters;
-}
-
-
-// static
-int DwVfpRegister::NumAllocatableAliasedRegisters() {
- return LowDwVfpRegister::kMaxNumLowRegisters - kNumReservedRegisters;
-}
-
-
-int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
- DCHECK(!reg.is(kDoubleRegZero));
- DCHECK(!reg.is(kScratchDoubleReg));
- if (reg.code() > kDoubleRegZero.code()) {
- return reg.code() - kNumReservedRegisters;
- }
- return reg.code();
-}
-
-
-DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < NumAllocatableRegisters());
- DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code()) {
- return from_code(index + kNumReservedRegisters);
- }
- return from_code(index);
-}
-
-
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
@@ -666,6 +624,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 50c707d2a0..e7b619debb 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -52,6 +52,14 @@ namespace internal {
// snapshot.
static unsigned CpuFeaturesImpliedByCompiler() {
unsigned answer = 0;
+#ifdef CAN_USE_ARMV8_INSTRUCTIONS
+ if (FLAG_enable_armv8) {
+ answer |= 1u << ARMv8;
+ // ARMv8 always features VFP and NEON.
+ answer |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
+ answer |= 1u << SUDIV | 1u << MLS;
+ }
+#endif // CAN_USE_ARMV8_INSTRUCTIONS
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
if (FLAG_enable_armv7) answer |= 1u << ARMv7;
#endif // CAN_USE_ARMV7_INSTRUCTIONS
@@ -81,6 +89,13 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef __arm__
// For the simulator build, use whatever the flags specify.
+ if (FLAG_enable_armv8) {
+ supported_ |= 1u << ARMv8;
+ // ARMv8 always features VFP and NEON.
+ supported_ |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
+ supported_ |= 1u << SUDIV | 1u << MLS;
+ if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ }
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
@@ -154,7 +169,9 @@ void CpuFeatures::PrintTarget() {
arm_no_probe = " noprobe";
#endif
-#if defined CAN_USE_ARMV7_INSTRUCTIONS
+#if defined CAN_USE_ARMV8_INSTRUCTIONS
+ arm_arch = "arm v8";
+#elif defined CAN_USE_ARMV7_INSTRUCTIONS
arm_arch = "arm v7";
#else
arm_arch = "arm v6";
@@ -192,13 +209,15 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() {
printf(
- "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
- "MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
+ "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
+ "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
+ CpuFeatures::IsSupported(ARMv8),
CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3),
CpuFeatures::IsSupported(VFP32DREGS),
CpuFeatures::IsSupported(NEON),
CpuFeatures::IsSupported(SUDIV),
+ CpuFeatures::IsSupported(MLS),
CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
CpuFeatures::IsSupported(COHERENT_CACHE));
@@ -214,18 +233,6 @@ void CpuFeatures::PrintFeatures() {
// -----------------------------------------------------------------------------
-// Implementation of DwVfpRegister
-
-const char* DwVfpRegister::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < NumAllocatableRegisters());
- DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
- return VFPRegisters::Name(index, true);
-}
-
-
-// -----------------------------------------------------------------------------
// Implementation of RelocInfo
// static
@@ -398,26 +405,26 @@ NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
+ al | B26 | 4 | NegPreIndex | Register::kCode_sp * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
+ al | B26 | L | 4 | PostIndex | Register::kCode_sp * B16;
// ldr rd, [pc, #offset]
const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+const Instr kLdrPCImmedPattern = 5 * B24 | L | Register::kCode_pc * B16;
// ldr rd, [pp, #offset]
const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
+const Instr kLdrPpImmedPattern = 5 * B24 | L | Register::kCode_r8 * B16;
// ldr rd, [pp, rn]
const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
+const Instr kLdrPpRegPattern = 7 * B24 | L | Register::kCode_r8 * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
-const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
+const Instr kVldrDPCPattern = 13 * B24 | L | Register::kCode_pc * B16 | 11 * B8;
// vldr dd, [pp, #offset]
const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
-const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
+const Instr kVldrDPpPattern = 13 * B24 | L | Register::kCode_r8 * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -444,13 +451,13 @@ const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kLdrRegFpOffsetPattern =
- al | B26 | L | Offset | kRegister_fp_Code * B16;
+ al | B26 | L | Offset | Register::kCode_fp * B16;
const Instr kStrRegFpOffsetPattern =
- al | B26 | Offset | kRegister_fp_Code * B16;
+ al | B26 | Offset | Register::kCode_fp * B16;
const Instr kLdrRegFpNegOffsetPattern =
- al | B26 | L | NegOffset | kRegister_fp_Code * B16;
+ al | B26 | L | NegOffset | Register::kCode_fp * B16;
const Instr kStrRegFpNegOffsetPattern =
- al | B26 | NegOffset | kRegister_fp_Code * B16;
+ al | B26 | NegOffset | Register::kCode_fp * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
@@ -626,21 +633,21 @@ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
Register Assembler::GetRd(Instr instr) {
Register reg;
- reg.code_ = Instruction::RdValue(instr);
+ reg.reg_code = Instruction::RdValue(instr);
return reg;
}
Register Assembler::GetRn(Instr instr) {
Register reg;
- reg.code_ = Instruction::RnValue(instr);
+ reg.reg_code = Instruction::RnValue(instr);
return reg;
}
Register Assembler::GetRm(Instr instr) {
Register reg;
- reg.code_ = Instruction::RmValue(instr);
+ reg.reg_code = Instruction::RmValue(instr);
return reg;
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 1d1cc485d5..db6adae57a 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -45,11 +45,35 @@
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
-#include "src/compiler.h"
namespace v8 {
namespace internal {
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(fp) V(ip) V(sp) V(lr) V(pc)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) V(r8)
+
+#define DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) \
+// clang-format on
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -71,190 +95,123 @@ namespace internal {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0;
-const int kRegister_r1_Code = 1;
-const int kRegister_r2_Code = 2;
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_fp_Code = 11;
-const int kRegister_ip_Code = 12;
-const int kRegister_sp_Code = 13;
-const int kRegister_lr_Code = 14;
-const int kRegister_pc_Code = 15;
-
-// Core register
struct Register {
- static const int kNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters =
- FLAG_enable_embedded_constant_pool ? 8 : 9;
- static const int kSizeInBytes = 4;
-
- inline static int NumAllocatableRegisters();
-
- static int ToAllocationIndex(Register reg) {
- DCHECK(reg.code() < kMaxNumAllocatableRegisters);
- return reg.code();
- }
-
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index);
- }
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "r0",
- "r1",
- "r2",
- "r3",
- "r4",
- "r5",
- "r6",
- "r7",
- "r8",
- };
- if (FLAG_enable_embedded_constant_pool && (index >= 7)) {
- return names[index + 1];
- }
- return names[index];
- }
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
- Register r = { code };
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
+ Register r = {code};
return r;
}
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
-
void set_code(int code) {
- code_ = code;
+ reg_code = code;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-const Register no_reg = { kRegister_no_reg_Code };
-
-const Register r0 = { kRegister_r0_Code };
-const Register r1 = { kRegister_r1_Code };
-const Register r2 = { kRegister_r2_Code };
-const Register r3 = { kRegister_r3_Code };
-const Register r4 = { kRegister_r4_Code };
-const Register r5 = { kRegister_r5_Code };
-const Register r6 = { kRegister_r6_Code };
-// Used as context register.
-const Register r7 = {kRegister_r7_Code};
-// Used as constant pool pointer register if FLAG_enable_embedded_constant_pool.
-const Register r8 = { kRegister_r8_Code };
-// Used as lithium codegen scratch register.
-const Register r9 = { kRegister_r9_Code };
-// Used as roots register.
-const Register r10 = { kRegister_r10_Code };
-const Register fp = { kRegister_fp_Code };
-const Register ip = { kRegister_ip_Code };
-const Register sp = { kRegister_sp_Code };
-const Register lr = { kRegister_lr_Code };
-const Register pc = { kRegister_pc_Code };
+// r7: context register
+// r8: constant pool pointer register if FLAG_enable_embedded_constant_pool.
+// r9: lithium scratch
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
// Single word VFP register.
struct SwVfpRegister {
static const int kSizeInBytes = 4;
- bool is_valid() const { return 0 <= code_ && code_ < 32; }
- bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
+ bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
+ bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
- *m = code_ & 0x1;
- *vm = code_ >> 1;
+ *m = reg_code & 0x1;
+ *vm = reg_code >> 1;
}
- int code_;
+ int reg_code;
};
// Double word VFP register.
-struct DwVfpRegister {
- static const int kMaxNumRegisters = 32;
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kMaxNumRegisters = Code::kAfterLast;
+
+ inline static int NumRegisters();
+
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
- static const int kNumReservedRegisters = 2;
- static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
- kNumReservedRegisters;
static const int kSizeInBytes = 8;
- // Note: the number of registers can be different at snapshot and run-time.
- // Any code included in the snapshot must be able to run both with 16 or 32
- // registers.
- inline static int NumRegisters();
- inline static int NumReservedRegisters();
- inline static int NumAllocatableRegisters();
-
- // TODO(turbofan): This is a temporary work-around required because our
- // register allocator does not yet support the aliasing of single/double
- // registers on ARM.
- inline static int NumAllocatableAliasedRegisters();
-
- inline static int ToAllocationIndex(DwVfpRegister reg);
- static const char* AllocationIndexToString(int index);
- inline static DwVfpRegister FromAllocationIndex(int index);
-
- static DwVfpRegister from_code(int code) {
- DwVfpRegister r = { code };
- return r;
- }
-
- bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumRegisters;
- }
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
+ }
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
+ *m = (reg_code & 0x10) >> 4;
+ *vm = reg_code & 0x0F;
}
- int code_;
+ int reg_code;
};
-typedef DwVfpRegister DoubleRegister;
+typedef DoubleRegister DwVfpRegister;
// Double word VFP register d0-15.
@@ -262,7 +219,7 @@ struct LowDwVfpRegister {
public:
static const int kMaxNumLowRegisters = 16;
operator DwVfpRegister() const {
- DwVfpRegister r = { code_ };
+ DwVfpRegister r = { reg_code };
return r;
}
static LowDwVfpRegister from_code(int code) {
@@ -271,30 +228,30 @@ struct LowDwVfpRegister {
}
bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumLowRegisters;
+ return 0 <= reg_code && reg_code < kMaxNumLowRegisters;
}
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; }
+ bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
+ bool is(LowDwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
SwVfpRegister low() const {
SwVfpRegister reg;
- reg.code_ = code_ * 2;
+ reg.reg_code = reg_code * 2;
DCHECK(reg.is_valid());
return reg;
}
SwVfpRegister high() const {
SwVfpRegister reg;
- reg.code_ = (code_ * 2) + 1;
+ reg.reg_code = (reg_code * 2) + 1;
DCHECK(reg.is_valid());
return reg;
}
- int code_;
+ int reg_code;
};
@@ -308,21 +265,21 @@ struct QwNeonRegister {
}
bool is_valid() const {
- return (0 <= code_) && (code_ < kMaxNumRegisters);
+ return (0 <= reg_code) && (reg_code < kMaxNumRegisters);
}
- bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
+ bool is(QwNeonRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
- int encoded_code = code_ << 1;
+ int encoded_code = reg_code << 1;
*m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F;
}
- int code_;
+ int reg_code;
};
@@ -427,19 +384,19 @@ const QwNeonRegister q15 = { 15 };
// Coprocessor register
struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
+ bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
+ bool is(CRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
@@ -1667,6 +1624,7 @@ class EnsureSpace BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index ea2c92e640..4464816f72 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -22,11 +22,12 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- r0 : number of arguments excluding receiver
- // -- r1 : called function (only guaranteed when
- // extra_args requires it)
+ // (only guaranteed when the called function
+ // is not marked as DontAdaptArguments)
+ // -- r1 : called function
// -- sp[0] : last argument
// -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == r0)
+ // -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
__ AssertFunction(r1);
@@ -48,8 +49,17 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
// JumpToExternalReference expects r0 to contain the number of arguments
- // including the receiver and the extra arguments.
+ // including the receiver and the extra arguments. But r0 is only valid
+ // if the called function is marked as DontAdaptArguments, otherwise we
+ // need to load the argument count from the SharedFunctionInfo.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(r2);
+ __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ mov(r0, r2, LeaveCC, ne);
__ add(r0, r0, Operand(num_extra_args + 1));
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -61,8 +71,7 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
__ ldr(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
__ ldr(result,
MemOperand(result,
@@ -77,8 +86,7 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
__ ldr(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
__ ldr(result,
MemOperand(result,
@@ -203,39 +211,42 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- r3 : original constructor
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r0 and get rid of the rest (including the
+ // 1. Load the first argument into r2 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
__ sub(r0, r0, Operand(1), SetCC);
__ b(lo, &no_arguments);
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
+ __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
__ Drop(2);
__ b(&done);
__ bind(&no_arguments);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
+ __ LoadRoot(r2, Heap::kempty_stringRootIndex);
__ Drop(1);
__ bind(&done);
}
- // 2. Make sure r0 is a string.
+ // 2. Make sure r2 is a string.
{
Label convert, done_convert;
- __ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ JumpIfSmi(r2, &convert);
+ __ CompareObjectType(r2, r4, r4, FIRST_NONSTRING_TYPE);
__ b(lo, &done_convert);
__ bind(&convert);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ Push(r1);
+ __ Push(r1, r3);
+ __ Move(r0, r2);
__ CallStub(&stub);
- __ Pop(r1);
+ __ Move(r2, r0);
+ __ Pop(r1, r3);
}
__ bind(&done_convert);
}
@@ -243,13 +254,18 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 3. Allocate a JSValue wrapper for the string.
{
// ----------- S t a t e -------------
- // -- r0 : the first argument
+ // -- r2 : the first argument
// -- r1 : constructor function
+ // -- r3 : original constructor
// -- lr : return address
// -----------------------------------
- Label allocate, done_allocate;
- __ Move(r2, r0);
+ Label allocate, done_allocate, rt_call;
+
+ // Fall back to runtime if the original constructor and function differ.
+ __ cmp(r1, r3);
+ __ b(ne, &rt_call);
+
__ Allocate(JSValue::kSize, r0, r3, r4, &allocate, TAG_OBJECT);
__ bind(&done_allocate);
@@ -273,6 +289,18 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Pop(r1, r2);
}
__ b(&done_allocate);
+
+ // Fallback to the runtime to create new object.
+ __ bind(&rt_call);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r2);
+ __ Push(r1, r3); // constructor function, original constructor
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Pop(r1, r2);
+ }
+ __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ Ret();
}
}
@@ -360,17 +388,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ tst(r2, r2);
__ b(ne, &rt_call);
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(r1, r3);
+ // Verify that the original constructor is a JSFunction.
+ __ CompareObjectType(r3, r5, r4, JS_FUNCTION_TYPE);
__ b(ne, &rt_call);
// Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ // r3: original constructor
+ __ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r2, &rt_call);
__ CompareObjectType(r2, r5, r4, MAP_TYPE);
__ b(ne, &rt_call);
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ ldr(r5, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r1, r5);
+ __ b(ne, &rt_call);
+
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the
// initial map's instance type would be JS_FUNCTION_TYPE.
@@ -393,9 +427,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
__ b(ne, &allocate);
- __ push(r1);
+ __ Push(r1, r2);
- __ Push(r2, r1); // r1 = constructor
+ __ push(r2); // r2 = intial map
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ pop(r2);
@@ -490,8 +524,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: original constructor
__ bind(&rt_call);
- __ push(r1); // argument 2/1: constructor function
- __ push(r3); // argument 3/2: original constructor
+ __ push(r1); // constructor function
+ __ push(r3); // original constructor
__ CallRuntime(Runtime::kNewObject, 2);
__ mov(r4, r0);
@@ -896,21 +930,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
@@ -918,7 +938,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
+ __ push(kInterpreterBytecodeArrayRegister);
__ CallRuntime(Runtime::kStackGuard, 0);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -966,6 +988,66 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
+static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
+ Register limit, Register scratch) {
+ Label loop_header, loop_check;
+ __ b(al, &loop_check);
+ __ bind(&loop_header);
+ __ ldr(scratch, MemOperand(index, -kPointerSize, PostIndex));
+ __ push(scratch);
+ __ bind(&loop_check);
+ __ cmp(index, limit);
+ __ b(gt, &loop_header);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- r1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ add(r3, r0, Operand(1)); // Add one for receiver.
+ __ mov(r3, Operand(r3, LSL, kPointerSizeLog2));
+ __ sub(r3, r2, r3);
+
+ // Push the arguments.
+ Generate_InterpreterPushArgs(masm, r2, r3, r4);
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (not including receiver)
+ // -- r3 : original constructor
+ // -- r1 : constructor to call
+ // -- r2 : address of the first argument
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ mov(r4, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r4, r2, r4);
+
+ // Push a slot for the receiver to be constructed.
+ __ push(r0);
+
+ // Push the arguments.
+ Generate_InterpreterPushArgs(masm, r2, r4, r5);
+
+ // Call the constructor with r0, r1, and r3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1483,70 +1565,82 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(r1);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ b(ne, &class_constructor);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kNativeByteOffset));
__ tst(r3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ b(ne, &done_convert);
{
- __ ldr(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
-
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSFunction)
// -- r2 : the shared function info.
- // -- r3 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(r3, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
- __ b(hs, &done_convert);
- __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(r3, Heap::kNullValueRootIndex, &convert_to_object);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(r3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ ldr(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ JumpIfSmi(r3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
+ __ b(hs, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(r3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(r3);
+ }
+ __ b(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r0);
+ __ Push(r0, r1);
+ __ mov(r0, r3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(r3, r0);
+ __ Pop(r0, r1);
+ __ SmiUntag(r0);
+ }
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ b(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r0);
- __ Push(r0, r1);
- __ mov(r0, r3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(r3, r0);
- __ Pop(r0, r1);
- __ SmiUntag(r0);
- }
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
}
__ bind(&done_convert);
@@ -1565,11 +1659,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
ParameterCount actual(r0);
ParameterCount expected(r2);
__ InvokeCode(r3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ }
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the target to call (can be any Object).
@@ -1579,8 +1680,8 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(r1, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq);
__ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function);
@@ -1601,7 +1702,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
@@ -1696,35 +1799,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
- // -- r2 : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- r1 : the target to call (can be any Object).
-
- // Find the address of the last argument.
- __ add(r3, r0, Operand(1)); // Add one for receiver.
- __ mov(r3, Operand(r3, LSL, kPointerSizeLog2));
- __ sub(r3, r2, r3);
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ b(al, &loop_check);
- __ bind(&loop_header);
- __ ldr(r4, MemOperand(r2, -kPointerSize, PostIndex));
- __ push(r4);
- __ bind(&loop_check);
- __ cmp(r2, r3);
- __ b(gt, &loop_header);
-
- // Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : actual number of arguments
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index a8a4b5f5ac..c920725477 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -974,14 +974,21 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
-
+ //
+ // If argv_in_register():
+ // r2: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ mov(r5, Operand(r1));
- // Compute the argv pointer in a callee-saved register.
- __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ sub(r1, r1, Operand(kPointerSize));
+ if (argv_in_register()) {
+ // Move argv into the correct register.
+ __ mov(r1, Operand(r2));
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r1, r1, Operand(kPointerSize));
+ }
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
@@ -1057,8 +1064,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- // Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles(), r4, true);
+ Register argc;
+ if (argv_in_register()) {
+ // We don't want to pop arguments so set argc to no_reg.
+ argc = no_reg;
+ } else {
+ // Callee-saved register r4 still holds argc.
+ argc = r4;
+ }
+ __ LeaveExitFrame(save_doubles(), argc, true);
__ mov(pc, lr);
// Handling of exception.
@@ -1587,7 +1601,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+ __ ldr(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
__ cmp(r6, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
@@ -1780,7 +1794,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the arguments boilerplate from the current native context.
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+ __ ldr(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
__ ldr(r4, MemOperand(
r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
@@ -2365,99 +2379,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, cont);
-
- // Do not transform the receiver for native (Compilerhints already in r3).
- __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, cont);
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ mov(r0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(r1);
- __ mov(r0, r3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(r1);
- }
- __ str(r0, MemOperand(sp, argc * kPointerSize));
- __ jmp(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // r1 : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &slow);
-
- // Goto slow case if we do not have a function.
- __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
- }
-
- // Fast-case: Invoke the function now.
- // r1: pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(r3, &wrap);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, &wrap);
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
@@ -2540,9 +2461,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2579,34 +2498,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
- // Compute the receiver in sloppy mode.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- __ JumpIfSmi(r3, &wrap);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, &wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call);
+ __ mov(r0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
- __ b(eq, &slow_start);
+ __ b(eq, &call);
// Verify that r4 contains an AllocationSite
__ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
@@ -2642,7 +2542,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r2, generic_offset));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, generic_offset));
- __ jmp(&slow_start);
+ __ jmp(&call);
__ bind(&uninitialized);
@@ -2681,23 +2581,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(r1);
}
- __ jmp(&have_js_function);
+ __ jmp(&call);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &slow);
-
- // Goto slow case if we do not have a function.
- __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
- __ jmp(&have_js_function);
+ __ jmp(&call);
}
@@ -2843,7 +2734,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode, 1);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3154,6 +3045,21 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
}
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes one argument in r0.
+ Label not_smi;
+ __ JumpIfNotSmi(r0, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r0, r0);
+ __ mov(r0, Operand(0), LeaveCC, lt);
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ push(r0); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength, 1, 1);
+}
+
+
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in r0.
Label is_number;
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index e572fd9a1b..845e38a85e 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -311,6 +311,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index d36ce59d66..f54fb71d0a 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -44,6 +44,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index 9fefc3140a..915d9030e8 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -51,17 +51,6 @@ const Registers::RegisterAlias Registers::aliases_[] = {
};
-const char* Registers::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumRegisters)) {
- result = names_[reg];
- } else {
- result = "noreg";
- }
- return result;
-}
-
-
// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
// Note that "sN:sM" is the same as "dN/2" up to d15.
// These register names are defined in a way to match the native disassembler
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 6d544f3f36..efc060a82d 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -702,6 +702,7 @@ class VFPRegisters {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_CONSTANTS_ARM_H_
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 312bb00df3..43f2fb3463 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -5,6 +5,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -93,7 +94,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -142,8 +143,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Everything but pc, lr and ip which will be saved but not restored.
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
- const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kMaxNumRegisters;
// Save all allocatable VFP registers before messing with them.
DCHECK(kDoubleRegZero.code() == 14);
@@ -152,11 +152,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
- // Push registers d0-d13, and possibly d16-d31, on the stack.
+ // Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- __ vstm(db_w, sp, d0, d13);
+ __ vstm(db_w, sp, d0, d15);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -211,9 +211,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 0cc24e00af..9d86579f28 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1923,7 +1923,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Registers::Name(reg);
+ return v8::internal::Register::from_code(reg).ToString();
}
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index dcba34f017..1ea7b1af56 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -128,6 +128,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index aa49843bd0..963b77782a 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -80,14 +80,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
@@ -110,6 +102,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return r0; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return r0; }
@@ -230,6 +226,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -417,16 +420,39 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- r0, // argument count (including receiver)
+ r0, // argument count (not including receiver)
r2, // address of first argument
r1 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r0, // argument count (not including receiver)
+ r3, // original constructor
+ r1, // constructor to call
+ r2 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r0, // argument count (argc)
+ r2, // address of first argument (argv)
+ r1 // the runtime function to call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/interface-descriptors-arm.h b/deps/v8/src/arm/interface-descriptors-arm.h
index 6201adc685..a64927924e 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.h
+++ b/deps/v8/src/arm/interface-descriptors-arm.h
@@ -20,7 +20,7 @@ class PlatformInterfaceDescriptor {
private:
TargetAddressStorageMode storage_mode_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 49802ba734..456bfd5629 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -11,6 +11,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/arm/macro-assembler-arm.h"
@@ -23,8 +24,8 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
generating_stub_(false),
has_frame_(false) {
if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -759,7 +760,9 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// Number of d-regs not known at snapshot time.
DCHECK(!serializer_enabled());
// General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -1474,7 +1477,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
ldr(scratch, FieldMemOperand(scratch, offset));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ ldr(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -2503,7 +2506,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
// Load the builtins object into target register.
ldr(target,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
+ ldr(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
ldr(target, ContextOperand(target, native_context_index));
}
@@ -2650,7 +2653,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
ldr(dst, GlobalObjectOperand());
- ldr(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+ ldr(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
@@ -2663,7 +2666,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Load the global or builtins object from the current context.
ldr(scratch,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ ldr(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
ldr(scratch,
@@ -2687,8 +2690,8 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
ldr(function,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- ldr(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
+ ldr(function,
+ FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
ldr(function, MemOperand(function, Context::SlotOffset(index)));
}
@@ -3578,8 +3581,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
if (regs & candidate.bit()) continue;
return candidate;
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index d78bf8f49a..8ab676f39b 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -14,17 +14,18 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_r0_Code};
-const Register kReturnRegister1 = {kRegister_r1_Code};
-const Register kJSFunctionRegister = {kRegister_r1_Code};
-const Register kContextRegister = {kRegister_r7_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_r0_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_r4_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_r5_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_r6_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_r8_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_r1_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_r0_Code};
+const Register kReturnRegister0 = {Register::kCode_r0};
+const Register kReturnRegister1 = {Register::kCode_r1};
+const Register kJSFunctionRegister = {Register::kCode_r1};
+const Register kContextRegister = {Register::kCode_r7};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_r0};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_r1};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_r0};
// ----------------------------------------------------------------------------
// Static helper functions
@@ -36,9 +37,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
-const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
-const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
-const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
+const Register cp = {Register::kCode_r7}; // JavaScript context pointer.
+const Register pp = {Register::kCode_r8}; // Constant pool pointer.
+const Register kRootRegister = {Register::kCode_r10}; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
@@ -1564,6 +1565,7 @@ inline MemOperand GlobalObjectOperand() {
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 5da6204050..716e804e3a 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -298,7 +298,8 @@ void ArmDebugger::Debug() {
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
- PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value);
+ PrintF("%3s: 0x%08x %10d", Register::from_code(i).ToString(),
+ value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
i < 8 &&
(i % 2) == 0) {
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index a972a77d41..0c6aaf8c24 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -55,7 +55,8 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() { }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
@@ -459,7 +460,8 @@ class SimulatorStack : public v8::internal::AllStatic {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index f02207f549..6de7fb1b2a 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -41,7 +41,7 @@ void RelocInfo::set_target_address(Address target,
}
-inline unsigned CPURegister::code() const {
+inline int CPURegister::code() const {
DCHECK(IsValid());
return reg_code;
}
@@ -54,12 +54,12 @@ inline CPURegister::RegisterType CPURegister::type() const {
inline RegList CPURegister::Bit() const {
- DCHECK(reg_code < (sizeof(RegList) * kBitsPerByte));
+ DCHECK(static_cast<size_t>(reg_code) < (sizeof(RegList) * kBitsPerByte));
return IsValid() ? 1UL << reg_code : 0;
}
-inline unsigned CPURegister::SizeInBits() const {
+inline int CPURegister::SizeInBits() const {
DCHECK(IsValid());
return reg_size;
}
@@ -1259,6 +1259,7 @@ void Assembler::ClearRecordedAstId() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 37a2f5a29d..d981f635ba 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -35,6 +35,7 @@
#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/register-configuration.h"
namespace v8 {
namespace internal {
@@ -109,17 +110,17 @@ void CPURegList::RemoveCalleeSaved() {
}
-CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+CPURegList CPURegList::GetCalleeSaved(int size) {
return CPURegList(CPURegister::kRegister, size, 19, 29);
}
-CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
+CPURegList CPURegList::GetCalleeSavedFP(int size) {
return CPURegList(CPURegister::kFPRegister, size, 8, 15);
}
-CPURegList CPURegList::GetCallerSaved(unsigned size) {
+CPURegList CPURegList::GetCallerSaved(int size) {
// Registers x0-x18 and lr (x30) are caller-saved.
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
list.Combine(lr);
@@ -127,7 +128,7 @@ CPURegList CPURegList::GetCallerSaved(unsigned size) {
}
-CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
+CPURegList CPURegList::GetCallerSavedFP(int size) {
// Registers d0-d7 and d16-d31 are caller-saved.
CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
@@ -192,8 +193,11 @@ bool RelocInfo::IsInConstantPool() {
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
Register reg3, Register reg4) {
CPURegList regs(reg1, reg2, reg3, reg4);
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ Register candidate = Register::from_code(code);
if (regs.IncludesAliasOf(candidate)) continue;
return candidate;
}
@@ -1275,10 +1279,8 @@ void Assembler::rorv(const Register& rd,
// Bitfield operations.
-void Assembler::bfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
+void Assembler::bfm(const Register& rd, const Register& rn, int immr,
+ int imms) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | BFM | N |
@@ -1288,10 +1290,8 @@ void Assembler::bfm(const Register& rd,
}
-void Assembler::sbfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
+void Assembler::sbfm(const Register& rd, const Register& rn, int immr,
+ int imms) {
DCHECK(rd.Is64Bits() || rn.Is32Bits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | SBFM | N |
@@ -1301,10 +1301,8 @@ void Assembler::sbfm(const Register& rd,
}
-void Assembler::ubfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
+void Assembler::ubfm(const Register& rd, const Register& rn, int immr,
+ int imms) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | UBFM | N |
@@ -1314,10 +1312,8 @@ void Assembler::ubfm(const Register& rd,
}
-void Assembler::extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb) {
+void Assembler::extr(const Register& rd, const Register& rn, const Register& rm,
+ int lsb) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index f20be8315e..41060122d8 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -12,7 +12,6 @@
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/globals.h"
#include "src/utils.h"
@@ -23,12 +22,36 @@ namespace internal {
// -----------------------------------------------------------------------------
// Registers.
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
+// clang-format off
+#define GENERAL_REGISTER_CODE_LIST(R) \
+ R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+ R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+ R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+ R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define GENERAL_REGISTERS(R) \
+ R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
+ R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
+ R(x16) R(x17) R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) \
+ R(x24) R(x25) R(x26) R(x27) R(x28) R(x29) R(x30) R(x31)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(R) \
+ R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
+ R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
+ R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
+
+#define DOUBLE_REGISTERS(R) \
+ R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
+ R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d15) \
+ R(d16) R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) \
+ R(d24) R(d25) R(d26) R(d27) R(d28) R(d29) R(d30) R(d31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
+ R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
+ R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
+ R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) R(d24) \
+ R(d25) R(d26) R(d27) R(d28)
+// clang-format on
static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
@@ -40,6 +63,14 @@ struct FPRegister;
struct CPURegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
enum RegisterType {
// The kInvalid value is used to detect uninitialized static instances,
// which are always zero-initialized before any constructors are called.
@@ -49,15 +80,15 @@ struct CPURegister {
kNoRegister
};
- static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
+ static CPURegister Create(int code, int size, RegisterType type) {
CPURegister r = {code, size, type};
return r;
}
- unsigned code() const;
+ int code() const;
RegisterType type() const;
RegList Bit() const;
- unsigned SizeInBits() const;
+ int SizeInBits() const;
int SizeInBytes() const;
bool Is32Bits() const;
bool Is64Bits() const;
@@ -86,14 +117,14 @@ struct CPURegister {
bool is(const CPURegister& other) const { return Is(other); }
bool is_valid() const { return IsValid(); }
- unsigned reg_code;
- unsigned reg_size;
+ int reg_code;
+ int reg_size;
RegisterType reg_type;
};
struct Register : public CPURegister {
- static Register Create(unsigned code, unsigned size) {
+ static Register Create(int code, int size) {
return Register(CPURegister::Create(code, size, CPURegister::kRegister));
}
@@ -117,6 +148,8 @@ struct Register : public CPURegister {
DCHECK(IsValidOrNone());
}
+ const char* ToString();
+ bool IsAllocatable() const;
bool IsValid() const {
DCHECK(IsRegister() || IsNone());
return IsValidRegister();
@@ -130,6 +163,7 @@ struct Register : public CPURegister {
// A few of them may be unused for now.
static const int kNumRegisters = kNumberOfRegisters;
+ STATIC_ASSERT(kNumRegisters == Code::kAfterLast);
static int NumRegisters() { return kNumRegisters; }
// We allow crankshaft to use the following registers:
@@ -146,70 +180,6 @@ struct Register : public CPURegister {
// - "low range"
// - "high range"
// - "context"
- static const unsigned kAllocatableLowRangeBegin = 0;
- static const unsigned kAllocatableLowRangeEnd = 15;
- static const unsigned kAllocatableHighRangeBegin = 18;
- static const unsigned kAllocatableHighRangeEnd = 24;
- static const unsigned kAllocatableContext = 27;
-
- // Gap between low and high ranges.
- static const int kAllocatableRangeGapSize =
- (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
-
- static const int kMaxNumAllocatableRegisters =
- (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
- (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
-
- // Return true if the register is one that crankshaft can allocate.
- bool IsAllocatable() const {
- return ((reg_code == kAllocatableContext) ||
- (reg_code <= kAllocatableLowRangeEnd) ||
- ((reg_code >= kAllocatableHighRangeBegin) &&
- (reg_code <= kAllocatableHighRangeEnd)));
- }
-
- static Register FromAllocationIndex(unsigned index) {
- DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
- // cp is the last allocatable register.
- if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
- return from_code(kAllocatableContext);
- }
-
- // Handle low and high ranges.
- return (index <= kAllocatableLowRangeEnd)
- ? from_code(index)
- : from_code(index + kAllocatableRangeGapSize);
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
- DCHECK((kAllocatableLowRangeBegin == 0) &&
- (kAllocatableLowRangeEnd == 15) &&
- (kAllocatableHighRangeBegin == 18) &&
- (kAllocatableHighRangeEnd == 24) &&
- (kAllocatableContext == 27));
- const char* const names[] = {
- "x0", "x1", "x2", "x3", "x4",
- "x5", "x6", "x7", "x8", "x9",
- "x10", "x11", "x12", "x13", "x14",
- "x15", "x18", "x19", "x20", "x21",
- "x22", "x23", "x24", "x27",
- };
- return names[index];
- }
-
- static int ToAllocationIndex(Register reg) {
- DCHECK(reg.IsAllocatable());
- unsigned code = reg.code();
- if (code == kAllocatableContext) {
- return NumAllocatableRegisters() - 1;
- }
-
- return (code <= kAllocatableLowRangeEnd)
- ? code
- : code - kAllocatableRangeGapSize;
- }
static Register from_code(int code) {
// Always return an X register.
@@ -221,7 +191,15 @@ struct Register : public CPURegister {
struct FPRegister : public CPURegister {
- static FPRegister Create(unsigned code, unsigned size) {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static FPRegister Create(int code, int size) {
return FPRegister(
CPURegister::Create(code, size, CPURegister::kFPRegister));
}
@@ -246,6 +224,8 @@ struct FPRegister : public CPURegister {
DCHECK(IsValidOrNone());
}
+ const char* ToString();
+ bool IsAllocatable() const;
bool IsValid() const {
DCHECK(IsFPRegister() || IsNone());
return IsValidFPRegister();
@@ -256,69 +236,12 @@ struct FPRegister : public CPURegister {
// Start of V8 compatibility section ---------------------
static const int kMaxNumRegisters = kNumberOfFPRegisters;
+ STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
// Crankshaft can use all the FP registers except:
// - d15 which is used to keep the 0 double value
// - d30 which is used in crankshaft as a double scratch register
// - d31 which is used in the MacroAssembler as a double scratch register
- static const unsigned kAllocatableLowRangeBegin = 0;
- static const unsigned kAllocatableLowRangeEnd = 14;
- static const unsigned kAllocatableHighRangeBegin = 16;
- static const unsigned kAllocatableHighRangeEnd = 28;
-
- static const RegList kAllocatableFPRegisters = 0x1fff7fff;
-
- // Gap between low and high ranges.
- static const int kAllocatableRangeGapSize =
- (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
-
- static const int kMaxNumAllocatableRegisters =
- (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
- (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
-
- // TODO(turbofan): Proper float32 support.
- static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
-
- // Return true if the register is one that crankshaft can allocate.
- bool IsAllocatable() const {
- return (Bit() & kAllocatableFPRegisters) != 0;
- }
-
- static FPRegister FromAllocationIndex(unsigned int index) {
- DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
-
- return (index <= kAllocatableLowRangeEnd)
- ? from_code(index)
- : from_code(index + kAllocatableRangeGapSize);
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
- DCHECK((kAllocatableLowRangeBegin == 0) &&
- (kAllocatableLowRangeEnd == 14) &&
- (kAllocatableHighRangeBegin == 16) &&
- (kAllocatableHighRangeEnd == 28));
- const char* const names[] = {
- "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
- "d24", "d25", "d26", "d27", "d28"
- };
- return names[index];
- }
-
- static int ToAllocationIndex(FPRegister reg) {
- DCHECK(reg.IsAllocatable());
- unsigned code = reg.code();
-
- return (code <= kAllocatableLowRangeEnd)
- ? code
- : code - kAllocatableRangeGapSize;
- }
-
static FPRegister from_code(int code) {
// Always return a D register.
return FPRegister::Create(code, kDRegSizeInBits);
@@ -361,7 +284,7 @@ INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
kWRegSizeInBits, CPURegister::kRegister); \
INITIALIZE_REGISTER(Register, x##N, N, \
kXRegSizeInBits, CPURegister::kRegister);
-REGISTER_CODE_LIST(DEFINE_REGISTERS)
+GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
@@ -374,7 +297,7 @@ INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
kSRegSizeInBits, CPURegister::kFPRegister); \
INITIALIZE_REGISTER(FPRegister, d##N, N, \
kDRegSizeInBits, CPURegister::kFPRegister);
-REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
+GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
#undef DEFINE_FPREGISTERS
#undef INITIALIZE_REGISTER
@@ -461,13 +384,13 @@ class CPURegList {
DCHECK(IsValid());
}
- CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ CPURegList(CPURegister::RegisterType type, int size, RegList list)
: list_(list), size_(size), type_(type) {
DCHECK(IsValid());
}
- CPURegList(CPURegister::RegisterType type, unsigned size,
- unsigned first_reg, unsigned last_reg)
+ CPURegList(CPURegister::RegisterType type, int size, int first_reg,
+ int last_reg)
: size_(size), type_(type) {
DCHECK(((type == CPURegister::kRegister) &&
(last_reg < kNumberOfRegisters)) ||
@@ -524,12 +447,12 @@ class CPURegList {
CPURegister PopHighestIndex();
// AAPCS64 callee-saved registers.
- static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
- static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
+ static CPURegList GetCalleeSaved(int size = kXRegSizeInBits);
+ static CPURegList GetCalleeSavedFP(int size = kDRegSizeInBits);
// AAPCS64 caller-saved registers. Note that this includes lr.
- static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
- static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
+ static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
+ static CPURegList GetCallerSavedFP(int size = kDRegSizeInBits);
// Registers saved as safepoints.
static CPURegList GetSafepointSavedRegisters();
@@ -557,25 +480,25 @@ class CPURegList {
return CountSetBits(list_, kRegListSizeInBits);
}
- unsigned RegisterSizeInBits() const {
+ int RegisterSizeInBits() const {
DCHECK(IsValid());
return size_;
}
- unsigned RegisterSizeInBytes() const {
+ int RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
DCHECK((size_in_bits % kBitsPerByte) == 0);
return size_in_bits / kBitsPerByte;
}
- unsigned TotalSizeInBytes() const {
+ int TotalSizeInBytes() const {
DCHECK(IsValid());
return RegisterSizeInBytes() * Count();
}
private:
RegList list_;
- unsigned size_;
+ int size_;
CPURegister::RegisterType type_;
bool IsValid() const {
@@ -1197,39 +1120,24 @@ class Assembler : public AssemblerBase {
// Bitfield instructions.
// Bitfield move.
- void bfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms);
+ void bfm(const Register& rd, const Register& rn, int immr, int imms);
// Signed bitfield move.
- void sbfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms);
+ void sbfm(const Register& rd, const Register& rn, int immr, int imms);
// Unsigned bitfield move.
- void ubfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms);
+ void ubfm(const Register& rd, const Register& rn, int immr, int imms);
// Bfm aliases.
// Bitfield insert.
- void bfi(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void bfi(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Bitfield extract and insert low.
- void bfxil(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void bfxil(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, lsb, lsb + width - 1);
@@ -1237,26 +1145,20 @@ class Assembler : public AssemblerBase {
// Sbfm aliases.
// Arithmetic shift right.
- void asr(const Register& rd, const Register& rn, unsigned shift) {
+ void asr(const Register& rd, const Register& rn, int shift) {
DCHECK(shift < rd.SizeInBits());
sbfm(rd, rn, shift, rd.SizeInBits() - 1);
}
// Signed bitfield insert in zero.
- void sbfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void sbfiz(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Signed bitfield extract.
- void sbfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void sbfx(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, lsb, lsb + width - 1);
@@ -1279,33 +1181,27 @@ class Assembler : public AssemblerBase {
// Ubfm aliases.
// Logical shift left.
- void lsl(const Register& rd, const Register& rn, unsigned shift) {
- unsigned reg_size = rd.SizeInBits();
+ void lsl(const Register& rd, const Register& rn, int shift) {
+ int reg_size = rd.SizeInBits();
DCHECK(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
}
// Logical shift right.
- void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ void lsr(const Register& rd, const Register& rn, int shift) {
DCHECK(shift < rd.SizeInBits());
ubfm(rd, rn, shift, rd.SizeInBits() - 1);
}
// Unsigned bitfield insert in zero.
- void ubfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void ubfiz(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Unsigned bitfield extract.
- void ubfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void ubfx(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, lsb, lsb + width - 1);
@@ -1327,10 +1223,8 @@ class Assembler : public AssemblerBase {
}
// Extract.
- void extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb);
+ void extr(const Register& rd, const Register& rn, const Register& rm,
+ int lsb);
// Conditional select: rd = cond ? rn : rm.
void csel(const Register& rd,
@@ -2296,6 +2190,7 @@ class EnsureSpace BASE_EMBEDDED {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 4331198017..f7ea89d807 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -22,8 +22,7 @@ namespace internal {
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the native context.
__ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ Ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
__ Ldr(result,
MemOperand(result,
@@ -36,8 +35,7 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
// Load the native context.
__ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ Ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
__ Ldr(result, ContextMemOperand(result,
Context::INTERNAL_ARRAY_FUNCTION_INDEX));
@@ -49,11 +47,12 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- x0 : number of arguments excluding receiver
- // -- x1 : called function (only guaranteed when
- // extra_args requires it)
+ // (only guaranteed when the called function
+ // is not marked as DontAdaptArguments)
+ // -- x1 : called function
// -- sp[0] : last argument
// -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == x0)
+ // -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
__ AssertFunction(x1);
@@ -75,8 +74,16 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
// JumpToExternalReference expects x0 to contain the number of arguments
- // including the receiver and the extra arguments.
+ // including the receiver and the extra arguments. But x0 is only valid
+ // if the called function is marked as DontAdaptArguments, otherwise we
+ // need to load the argument count from the SharedFunctionInfo.
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(
+ x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Cmp(x2, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ Csel(x0, x0, x2, eq);
__ Add(x0, x0, num_extra_args + 1);
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -200,6 +207,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
+ // -- x3 : original constructor
// -- lr : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
@@ -225,16 +233,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
Label convert, done_convert;
__ JumpIfSmi(x2, &convert);
- __ JumpIfObjectType(x2, x3, x3, FIRST_NONSTRING_TYPE, &done_convert, lo);
+ __ JumpIfObjectType(x2, x4, x4, FIRST_NONSTRING_TYPE, &done_convert, lo);
__ Bind(&convert);
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ Push(x1);
+ __ Push(x1, x3);
__ Move(x0, x2);
__ CallStub(&stub);
__ Move(x2, x0);
- __ Pop(x1);
+ __ Pop(x1, x3);
}
__ Bind(&done_convert);
}
@@ -242,12 +250,18 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 3. Allocate a JSValue wrapper for the string.
{
// ----------- S t a t e -------------
- // -- x1 : constructor function
// -- x2 : the first argument
+ // -- x1 : constructor function
+ // -- x3 : original constructor
// -- lr : return address
// -----------------------------------
- Label allocate, done_allocate;
+ Label allocate, done_allocate, rt_call;
+
+ // Fall back to runtime if the original constructor and function differ.
+ __ cmp(x1, x3);
+ __ B(ne, &rt_call);
+
__ Allocate(JSValue::kSize, x0, x3, x4, &allocate, TAG_OBJECT);
__ Bind(&done_allocate);
@@ -271,6 +285,17 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Pop(x2, x1);
}
__ B(&done_allocate);
+
+ // Fallback to the runtime to create new object.
+ __ bind(&rt_call);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x2, x1, x3); // constructor function, original constructor
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Pop(x2, x1);
+ }
+ __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
+ __ Ret();
}
}
@@ -327,7 +352,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- x0 : number of arguments
// -- x1 : constructor function
// -- x2 : allocation site or undefined
- // -- x3 : original constructor
+ // -- x3 : original constructor
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -365,18 +390,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Ldr(x2, MemOperand(x2));
__ Cbnz(x2, &rt_call);
- // Fall back to runtime if the original constructor and function differ.
- __ Cmp(constructor, original_constructor);
- __ B(ne, &rt_call);
+ // Verify that the original constructor is a JSFunction.
+ __ JumpIfNotObjectType(original_constructor, x10, x11, JS_FUNCTION_TYPE,
+ &rt_call);
// Load the initial map and verify that it is in fact a map.
Register init_map = x2;
__ Ldr(init_map,
- FieldMemOperand(constructor,
+ FieldMemOperand(original_constructor,
JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(init_map, &rt_call);
__ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ Ldr(x10,
+ FieldMemOperand(init_map, Map::kConstructorOrBackPointerOffset));
+ __ Cmp(constructor, x10);
+ __ B(ne, &rt_call);
+
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the initial
// map's instance type would be JS_FUNCTION_TYPE.
@@ -399,9 +431,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
__ B(ne, &allocate);
- // Push the constructor and map to the stack, and the constructor again
+ // Push the constructor and map to the stack, and the map again
// as argument to the runtime call.
- __ Push(constructor, init_map, constructor);
+ __ Push(constructor, init_map, init_map);
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(init_map, constructor);
__ Mov(constructon_count, Operand(Map::kSlackTrackingCounterEnd - 1));
@@ -699,7 +731,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
ParameterCount actual(x0);
__ InvokeFunction(x1, actual, CALL_FUNCTION, NullCallWrapper());
-
// Restore the context from the frame.
// x0: result
// jssp[0]: number of arguments (smi-tagged)
@@ -924,28 +955,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
__ B(hs, &ok);
+ __ Push(kInterpreterBytecodeArrayRegister);
__ CallRuntime(Runtime::kStackGuard, 0);
+ __ Pop(kInterpreterBytecodeArrayRegister);
__ Bind(&ok);
}
@@ -1542,69 +1561,83 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(x1);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that function is not a "classConstructor".
+ Label class_constructor;
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestAndBranchIfAnySet(
+ w3, (1 << SharedFunctionInfo::kIsDefaultConstructor) |
+ (1 << SharedFunctionInfo::kIsSubclassConstructor) |
+ (1 << SharedFunctionInfo::kIsBaseConstructor),
+ &class_constructor);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
- __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ Label done_convert;
__ TestAndBranchIfAnySet(w3,
(1 << SharedFunctionInfo::kNative) |
(1 << SharedFunctionInfo::kStrictModeFunction),
&done_convert);
{
- __ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
-
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the function to call (checked to be a JSFunction)
// -- x2 : the shared function info.
- // -- x3 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(x3, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
- __ B(hs, &done_convert);
- __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(x3, Heap::kNullValueRootIndex, &convert_to_object);
- __ Bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(x3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
+ __ JumpIfSmi(x3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
+ __ B(hs, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(x3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ Bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(x3);
+ }
+ __ B(&convert_receiver);
+ }
+ __ Bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(x0);
+ __ Push(x0, x1);
+ __ Mov(x0, x3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Mov(x3, x0);
+ __ Pop(x1, x0);
+ __ SmiUntag(x0);
+ }
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Bind(&convert_receiver);
}
- __ B(&convert_receiver);
- __ Bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(x0);
- __ Push(x0, x1);
- __ Mov(x0, x3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Mov(x3, x0);
- __ Pop(x1, x0);
- __ SmiUntag(x0);
- }
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Bind(&convert_receiver);
__ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
}
__ Bind(&done_convert);
@@ -1622,11 +1655,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
ParameterCount actual(x0);
ParameterCount expected(x2);
__ InvokeCode(x3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ }
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the target to call (can be any Object).
@@ -1636,8 +1676,8 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(x1, &non_callable);
__ Bind(&non_smi);
__ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq);
__ Cmp(x5, JS_FUNCTION_PROXY_TYPE);
__ B(ne, &non_function);
@@ -1657,7 +1697,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
@@ -1753,13 +1795,14 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x2 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -- x1 : the target to call (can be any Object).
+ // -----------------------------------
// Find the address of the last argument.
__ add(x3, x0, Operand(1)); // Add one for receiver.
@@ -1784,6 +1827,43 @@ void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (not including receiver)
+ // -- x3 : original constructor
+ // -- x1 : constructor to call
+ // -- x2 : address of the first argument
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ add(x5, x0, Operand(1)); // Add one for receiver (to be constructed).
+ __ lsl(x5, x5, kPointerSizeLog2);
+
+ // Set stack pointer and where to stop.
+ __ Mov(x6, jssp);
+ __ Claim(x5, 1);
+ __ sub(x4, x6, x5);
+
+ // Push a slot for the receiver.
+ __ Str(xzr, MemOperand(x6, -kPointerSize, PreIndex));
+
+ Label loop_header, loop_check;
+ // Push the arguments.
+ __ B(&loop_check);
+ __ Bind(&loop_header);
+ // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+ __ Ldr(x5, MemOperand(x2, -kPointerSize, PostIndex));
+ __ Str(x5, MemOperand(x6, -kPointerSize, PreIndex));
+ __ Bind(&loop_check);
+ __ Cmp(x6, x4);
+ __ B(gt, &loop_header);
+
+ // Call the constructor with x0, x1, and x3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index e39e08831a..751d8aebde 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -1067,6 +1067,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Register parameters:
// x0: argc (including receiver, untagged)
// x1: target
+ // If argv_in_register():
+ // x11: argv (pointer to first argument)
//
// The stack on entry holds the arguments and the receiver, with the receiver
// at the highest address:
@@ -1098,9 +1100,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// (arg[argc-2]), or just below the receiver in case there are no arguments.
// - Adjust for the arg[] array.
Register temp_argv = x11;
- __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
- // - Adjust for the receiver.
- __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+ if (!argv_in_register()) {
+ __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ // - Adjust for the receiver.
+ __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+ }
// Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
// registers.
@@ -1204,12 +1208,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ LeaveExitFrame(save_doubles(), x10, true);
DCHECK(jssp.Is(__ StackPointer()));
- // Pop or drop the remaining stack slots and return from the stub.
- // jssp[24]: Arguments array (of size argc), including receiver.
- // jssp[16]: Preserved x23 (used for target).
- // jssp[8]: Preserved x22 (used for argc).
- // jssp[0]: Preserved x21 (used for argv).
- __ Drop(x11);
+ if (!argv_in_register()) {
+ // Drop the remaining stack slots and return from the stub.
+ __ Drop(x11);
+ }
__ AssertFPCRState();
__ Ret();
@@ -1804,8 +1806,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
Register sloppy_args_map = x11;
Register aliased_args_map = x10;
__ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx, FieldMemOperand(global_object,
- GlobalObject::kNativeContextOffset));
+ __ Ldr(global_ctx,
+ FieldMemOperand(global_object, JSGlobalObject::kNativeContextOffset));
__ Ldr(sloppy_args_map,
ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
@@ -2049,8 +2051,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
Register global_ctx = x10;
Register strict_args_map = x4;
__ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx, FieldMemOperand(global_object,
- GlobalObject::kNativeContextOffset));
+ __ Ldr(global_ctx,
+ FieldMemOperand(global_object, JSGlobalObject::kNativeContextOffset));
__ Ldr(strict_args_map,
ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
@@ -2745,101 +2747,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
- __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont);
-
- // Do not transform the receiver for native (Compilerhints already in x3).
- __ Tbnz(w4, SharedFunctionInfo::kNative, cont);
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ Mov(x0, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
- __ Mov(x0, x3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Pop(x1);
- }
- __ Poke(x0, argc * kPointerSize);
- __ B(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // x1 function the function to call
- Register function = x1;
- Register type = x4;
- Label slow, wrap, cont;
-
- // TODO(jbramley): This function has a lot of unnamed registers. Name them,
- // and tidy things up a bit.
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- __ JumpIfSmi(function, &slow);
-
- // Goto slow case if we do not have a function.
- __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
- }
-
- // Fast-case: Invoke the function now.
- // x1 function pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ Peek(x3, argc * kPointerSize);
-
- if (needs_checks) {
- __ JumpIfSmi(x3, &wrap);
- __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
- } else {
- __ B(&wrap);
- }
-
- __ Bind(&cont);
- }
-
- __ InvokeFunction(function,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper());
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ Bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ Bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("CallFunctionStub::Generate");
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallConstructStub::Generate");
// x0 : number of arguments
@@ -2939,16 +2846,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call;
int argc = arg_count();
ParameterCount actual(argc);
Register function = x1;
Register feedback_vector = x2;
Register index = x3;
- Register type = x4;
// The checks. First, does x1 match the recorded monomorphic target?
__ Add(x4, feedback_vector,
@@ -2986,36 +2890,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
-
- // Compute the receiver in sloppy mode.
- __ Peek(x3, argc * kPointerSize);
-
- __ JumpIfSmi(x3, &wrap);
- __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
-
- __ Bind(&cont);
- }
-
- __ InvokeFunction(function,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call);
+ __ Mov(x0, argc);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
- __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
+ __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
__ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
__ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &not_allocation_site);
@@ -3047,7 +2929,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
- __ B(&slow_start);
+ __ B(&call);
__ bind(&uninitialized);
@@ -3086,22 +2968,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(function);
}
- __ B(&have_js_function);
+ __ B(&call);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
-
- // Check that the function is really a JavaScript function.
- __ JumpIfSmi(function, &slow);
-
- // Goto slow case if we do not have a function.
- __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
- __ B(&have_js_function);
+ __ B(&call);
}
@@ -3235,7 +3109,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Bind(&slow_case_);
call_helper.BeforeCall(masm);
__ Push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode, 1);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@@ -3912,6 +3786,21 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
}
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes one argument in x0.
+ Label not_smi;
+ __ JumpIfNotSmi(x0, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(x0, x0);
+ __ Csel(x0, x0, Operand(0), ge);
+ __ Ret();
+ __ Bind(&not_smi);
+
+ __ Push(x0); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength, 1, 1);
+}
+
+
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in x0.
Label is_number;
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index 1b64a625f9..341153380d 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -384,6 +384,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_CODE_STUBS_ARM64_H_
diff --git a/deps/v8/src/arm64/codegen-arm64.h b/deps/v8/src/arm64/codegen-arm64.h
index 2f01c510de..7100ef1134 100644
--- a/deps/v8/src/arm64/codegen-arm64.h
+++ b/deps/v8/src/arm64/codegen-arm64.h
@@ -43,6 +43,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_CODEGEN_ARM64_H_
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 1529c647ff..43a375d953 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -32,8 +32,8 @@ const unsigned kInstructionSizeLog2 = 2;
const unsigned kLoadLiteralScaleLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MB;
-const unsigned kNumberOfRegisters = 32;
-const unsigned kNumberOfFPRegisters = 32;
+const int kNumberOfRegisters = 32;
+const int kNumberOfFPRegisters = 32;
// Callee saved registers are x19-x30(lr).
const int kNumberOfCalleeSavedRegisters = 11;
const int kFirstCalleeSavedRegisterIndex = 19;
@@ -42,23 +42,22 @@ const int kNumberOfCalleeSavedFPRegisters = 8;
const int kFirstCalleeSavedFPRegisterIndex = 8;
// Callee saved registers with no specific purpose in JS are x19-x25.
const unsigned kJSCalleeSavedRegList = 0x03f80000;
-// TODO(all): k<Y>RegSize should probably be k<Y>RegSizeInBits.
-const unsigned kWRegSizeInBits = 32;
-const unsigned kWRegSizeInBitsLog2 = 5;
-const unsigned kWRegSize = kWRegSizeInBits >> 3;
-const unsigned kWRegSizeLog2 = kWRegSizeInBitsLog2 - 3;
-const unsigned kXRegSizeInBits = 64;
-const unsigned kXRegSizeInBitsLog2 = 6;
-const unsigned kXRegSize = kXRegSizeInBits >> 3;
-const unsigned kXRegSizeLog2 = kXRegSizeInBitsLog2 - 3;
-const unsigned kSRegSizeInBits = 32;
-const unsigned kSRegSizeInBitsLog2 = 5;
-const unsigned kSRegSize = kSRegSizeInBits >> 3;
-const unsigned kSRegSizeLog2 = kSRegSizeInBitsLog2 - 3;
-const unsigned kDRegSizeInBits = 64;
-const unsigned kDRegSizeInBitsLog2 = 6;
-const unsigned kDRegSize = kDRegSizeInBits >> 3;
-const unsigned kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3;
+const int kWRegSizeInBits = 32;
+const int kWRegSizeInBitsLog2 = 5;
+const int kWRegSize = kWRegSizeInBits >> 3;
+const int kWRegSizeLog2 = kWRegSizeInBitsLog2 - 3;
+const int kXRegSizeInBits = 64;
+const int kXRegSizeInBitsLog2 = 6;
+const int kXRegSize = kXRegSizeInBits >> 3;
+const int kXRegSizeLog2 = kXRegSizeInBitsLog2 - 3;
+const int kSRegSizeInBits = 32;
+const int kSRegSizeInBitsLog2 = 5;
+const int kSRegSize = kSRegSizeInBits >> 3;
+const int kSRegSizeLog2 = kSRegSizeInBitsLog2 - 3;
+const int kDRegSizeInBits = 64;
+const int kDRegSizeInBitsLog2 = 6;
+const int kDRegSize = kDRegSizeInBits >> 3;
+const int kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3;
const int64_t kWRegMask = 0x00000000ffffffffL;
const int64_t kXRegMask = 0xffffffffffffffffL;
const int64_t kSRegMask = 0x00000000ffffffffL;
@@ -86,13 +85,13 @@ const int64_t kXMaxInt = 0x7fffffffffffffffL;
const int64_t kXMinInt = 0x8000000000000000L;
const int32_t kWMaxInt = 0x7fffffff;
const int32_t kWMinInt = 0x80000000;
-const unsigned kIp0Code = 16;
-const unsigned kIp1Code = 17;
-const unsigned kFramePointerRegCode = 29;
-const unsigned kLinkRegCode = 30;
-const unsigned kZeroRegCode = 31;
-const unsigned kJSSPCode = 28;
-const unsigned kSPRegInternalCode = 63;
+const int kIp0Code = 16;
+const int kIp1Code = 17;
+const int kFramePointerRegCode = 29;
+const int kLinkRegCode = 30;
+const int kZeroRegCode = 31;
+const int kJSSPCode = 28;
+const int kSPRegInternalCode = 63;
const unsigned kRegCodeMask = 0x1f;
const unsigned kShiftAmountWRegMask = 0x1f;
const unsigned kShiftAmountXRegMask = 0x3f;
@@ -118,12 +117,6 @@ const unsigned kDoubleExponentBias = 1023;
const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8;
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
#define INSTRUCTION_FIELDS_LIST(V_) \
/* Register fields */ \
V_(Rd, 4, 0, Bits) /* Destination register. */ \
@@ -1237,6 +1230,7 @@ enum UnallocatedOp {
UnallocatedFMask = 0x00000000
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_CONSTANTS_ARM64_H_
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index c29f2d3c5e..e00105e7bc 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -644,6 +644,7 @@ void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_DECODER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/arm64/decoder-arm64.h
index 6140bc2818..b1ef41f1a2 100644
--- a/deps/v8/src/arm64/decoder-arm64.h
+++ b/deps/v8/src/arm64/decoder-arm64.h
@@ -181,6 +181,7 @@ class Decoder : public V {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_DECODER_ARM64_H_
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 65fb93e53c..19ee123b36 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -6,6 +6,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -75,7 +76,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -122,8 +123,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// in the input frame.
// Save all allocatable floating point registers.
- CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSizeInBits,
- FPRegister::kAllocatableFPRegisters);
+ CPURegList saved_fp_registers(
+ CPURegister::kFPRegister, kDRegSizeInBits,
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes_mask());
__ PushCPURegList(saved_fp_registers);
// We save all the registers expcept jssp, sp and lr.
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index fb3b692d08..00c3ec25d6 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -19,7 +19,7 @@ namespace v8 {
namespace internal {
-Disassembler::Disassembler() {
+DisassemblingDecoder::DisassemblingDecoder() {
buffer_size_ = 256;
buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
buffer_pos_ = 0;
@@ -27,7 +27,7 @@ Disassembler::Disassembler() {
}
-Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+DisassemblingDecoder::DisassemblingDecoder(char* text_buffer, int buffer_size) {
buffer_size_ = buffer_size;
buffer_ = text_buffer;
buffer_pos_ = 0;
@@ -35,19 +35,17 @@ Disassembler::Disassembler(char* text_buffer, int buffer_size) {
}
-Disassembler::~Disassembler() {
+DisassemblingDecoder::~DisassemblingDecoder() {
if (own_buffer_) {
free(buffer_);
}
}
-char* Disassembler::GetOutput() {
- return buffer_;
-}
+char* DisassemblingDecoder::GetOutput() { return buffer_; }
-void Disassembler::VisitAddSubImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitAddSubImmediate(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
(instr->ImmAddSub() == 0) ? true : false;
@@ -92,7 +90,7 @@ void Disassembler::VisitAddSubImmediate(Instruction* instr) {
}
-void Disassembler::VisitAddSubShifted(Instruction* instr) {
+void DisassemblingDecoder::VisitAddSubShifted(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
@@ -139,7 +137,7 @@ void Disassembler::VisitAddSubShifted(Instruction* instr) {
}
-void Disassembler::VisitAddSubExtended(Instruction* instr) {
+void DisassemblingDecoder::VisitAddSubExtended(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
const char *mnemonic = "";
Extend mode = static_cast<Extend>(instr->ExtendMode());
@@ -177,7 +175,7 @@ void Disassembler::VisitAddSubExtended(Instruction* instr) {
}
-void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
+void DisassemblingDecoder::VisitAddSubWithCarry(Instruction* instr) {
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
const char *form = "'Rd, 'Rn, 'Rm";
@@ -212,7 +210,7 @@ void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
}
-void Disassembler::VisitLogicalImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitLogicalImmediate(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
@@ -255,7 +253,7 @@ void Disassembler::VisitLogicalImmediate(Instruction* instr) {
}
-bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+bool DisassemblingDecoder::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
DCHECK((reg_size == kXRegSizeInBits) ||
((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
@@ -284,7 +282,7 @@ bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
}
-void Disassembler::VisitLogicalShifted(Instruction* instr) {
+void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
@@ -335,7 +333,7 @@ void Disassembler::VisitLogicalShifted(Instruction* instr) {
}
-void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
+void DisassemblingDecoder::VisitConditionalCompareRegister(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
@@ -350,7 +348,8 @@ void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
}
-void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitConditionalCompareImmediate(
+ Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
@@ -365,7 +364,7 @@ void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
}
-void Disassembler::VisitConditionalSelect(Instruction* instr) {
+void DisassemblingDecoder::VisitConditionalSelect(Instruction* instr) {
bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
bool rn_is_rm = (instr->Rn() == instr->Rm());
const char *mnemonic = "";
@@ -418,7 +417,7 @@ void Disassembler::VisitConditionalSelect(Instruction* instr) {
}
-void Disassembler::VisitBitfield(Instruction* instr) {
+void DisassemblingDecoder::VisitBitfield(Instruction* instr) {
unsigned s = instr->ImmS();
unsigned r = instr->ImmR();
unsigned rd_size_minus_1 =
@@ -496,7 +495,7 @@ void Disassembler::VisitBitfield(Instruction* instr) {
}
-void Disassembler::VisitExtract(Instruction* instr) {
+void DisassemblingDecoder::VisitExtract(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
@@ -517,7 +516,7 @@ void Disassembler::VisitExtract(Instruction* instr) {
}
-void Disassembler::VisitPCRelAddressing(Instruction* instr) {
+void DisassemblingDecoder::VisitPCRelAddressing(Instruction* instr) {
switch (instr->Mask(PCRelAddressingMask)) {
case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
// ADRP is not implemented.
@@ -526,7 +525,7 @@ void Disassembler::VisitPCRelAddressing(Instruction* instr) {
}
-void Disassembler::VisitConditionalBranch(Instruction* instr) {
+void DisassemblingDecoder::VisitConditionalBranch(Instruction* instr) {
switch (instr->Mask(ConditionalBranchMask)) {
case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
default: UNREACHABLE();
@@ -534,7 +533,8 @@ void Disassembler::VisitConditionalBranch(Instruction* instr) {
}
-void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
+void DisassemblingDecoder::VisitUnconditionalBranchToRegister(
+ Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Xn";
@@ -554,7 +554,7 @@ void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
}
-void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
+void DisassemblingDecoder::VisitUnconditionalBranch(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'BImmUncn";
@@ -567,7 +567,7 @@ void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
}
-void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
+void DisassemblingDecoder::VisitDataProcessing1Source(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rd, 'Rn";
@@ -588,7 +588,7 @@ void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
}
-void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
+void DisassemblingDecoder::VisitDataProcessing2Source(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Rd, 'Rn, 'Rm";
@@ -609,7 +609,7 @@ void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
}
-void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
+void DisassemblingDecoder::VisitDataProcessing3Source(Instruction* instr) {
bool ra_is_zr = RaIsZROrSP(instr);
const char *mnemonic = "";
const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
@@ -687,7 +687,7 @@ void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
}
-void Disassembler::VisitCompareBranch(Instruction* instr) {
+void DisassemblingDecoder::VisitCompareBranch(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rt, 'BImmCmpa";
@@ -702,7 +702,7 @@ void Disassembler::VisitCompareBranch(Instruction* instr) {
}
-void Disassembler::VisitTestBranch(Instruction* instr) {
+void DisassemblingDecoder::VisitTestBranch(Instruction* instr) {
const char *mnemonic = "";
// If the top bit of the immediate is clear, the tested register is
// disassembled as Wt, otherwise Xt. As the top bit of the immediate is
@@ -719,7 +719,7 @@ void Disassembler::VisitTestBranch(Instruction* instr) {
}
-void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitMoveWideImmediate(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rd, 'IMoveImm";
@@ -758,7 +758,7 @@ void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
V(LDR_s, "ldr", "'St") \
V(LDR_d, "ldr", "'Dt")
-void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePreIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePreIndex)";
@@ -772,7 +772,7 @@ void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
}
-void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePostIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePostIndex)";
@@ -786,7 +786,7 @@ void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
}
-void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStoreUnsignedOffset(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStoreUnsignedOffset)";
@@ -801,7 +801,7 @@ void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
}
-void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStoreRegisterOffset(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStoreRegisterOffset)";
@@ -816,7 +816,7 @@ void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
}
-void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStoreUnscaledOffset(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Wt, ['Xns'ILS]";
const char *form_x = "'Xt, ['Xns'ILS]";
@@ -847,7 +847,7 @@ void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
}
-void Disassembler::VisitLoadLiteral(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadLiteral(Instruction* instr) {
const char *mnemonic = "ldr";
const char *form = "(LoadLiteral)";
@@ -873,7 +873,7 @@ void Disassembler::VisitLoadLiteral(Instruction* instr) {
V(STP_d, "stp", "'Dt, 'Dt2", "8") \
V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
-void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePairPostIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePairPostIndex)";
@@ -887,7 +887,7 @@ void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
}
-void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePairPreIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePairPreIndex)";
@@ -901,7 +901,7 @@ void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
}
-void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePairOffset)";
@@ -915,7 +915,7 @@ void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
}
-void Disassembler::VisitFPCompare(Instruction* instr) {
+void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fn, 'Fm";
const char *form_zero = "'Fn, #0.0";
@@ -931,7 +931,7 @@ void Disassembler::VisitFPCompare(Instruction* instr) {
}
-void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
+void DisassemblingDecoder::VisitFPConditionalCompare(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
@@ -946,7 +946,7 @@ void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
}
-void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
+void DisassemblingDecoder::VisitFPConditionalSelect(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
@@ -959,7 +959,7 @@ void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
}
-void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
+void DisassemblingDecoder::VisitFPDataProcessing1Source(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fd, 'Fn";
@@ -987,7 +987,7 @@ void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
}
-void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
+void DisassemblingDecoder::VisitFPDataProcessing2Source(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Fd, 'Fn, 'Fm";
@@ -1011,7 +1011,7 @@ void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
}
-void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
+void DisassemblingDecoder::VisitFPDataProcessing3Source(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
@@ -1030,7 +1030,7 @@ void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
}
-void Disassembler::VisitFPImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitFPImmediate(Instruction* instr) {
const char *mnemonic = "";
const char *form = "(FPImmediate)";
@@ -1043,7 +1043,7 @@ void Disassembler::VisitFPImmediate(Instruction* instr) {
}
-void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
+void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(FPIntegerConvert)";
const char *form_rf = "'Rd, 'Fn";
@@ -1099,7 +1099,7 @@ void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
}
-void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
+void DisassemblingDecoder::VisitFPFixedPointConvert(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rd, 'Fn, 'IFPFBits";
const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
@@ -1126,7 +1126,7 @@ void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
}
-void Disassembler::VisitSystem(Instruction* instr) {
+void DisassemblingDecoder::VisitSystem(Instruction* instr) {
// Some system instructions hijack their Op and Cp fields to represent a
// range of immediates instead of indicating a different instruction. This
// makes the decoding tricky.
@@ -1187,7 +1187,7 @@ void Disassembler::VisitSystem(Instruction* instr) {
}
-void Disassembler::VisitException(Instruction* instr) {
+void DisassemblingDecoder::VisitException(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'IDebug";
@@ -1206,23 +1206,23 @@ void Disassembler::VisitException(Instruction* instr) {
}
-void Disassembler::VisitUnimplemented(Instruction* instr) {
+void DisassemblingDecoder::VisitUnimplemented(Instruction* instr) {
Format(instr, "unimplemented", "(Unimplemented)");
}
-void Disassembler::VisitUnallocated(Instruction* instr) {
+void DisassemblingDecoder::VisitUnallocated(Instruction* instr) {
Format(instr, "unallocated", "(Unallocated)");
}
-void Disassembler::ProcessOutput(Instruction* /*instr*/) {
+void DisassemblingDecoder::ProcessOutput(Instruction* /*instr*/) {
// The base disasm does nothing more than disassembling into a buffer.
}
-void Disassembler::Format(Instruction* instr, const char* mnemonic,
- const char* format) {
+void DisassemblingDecoder::Format(Instruction* instr, const char* mnemonic,
+ const char* format) {
// TODO(mcapewel) don't think I can use the instr address here - there needs
// to be a base address too
DCHECK(mnemonic != NULL);
@@ -1237,7 +1237,7 @@ void Disassembler::Format(Instruction* instr, const char* mnemonic,
}
-void Disassembler::Substitute(Instruction* instr, const char* string) {
+void DisassemblingDecoder::Substitute(Instruction* instr, const char* string) {
char chr = *string++;
while (chr != '\0') {
if (chr == '\'') {
@@ -1250,7 +1250,8 @@ void Disassembler::Substitute(Instruction* instr, const char* string) {
}
-int Disassembler::SubstituteField(Instruction* instr, const char* format) {
+int DisassemblingDecoder::SubstituteField(Instruction* instr,
+ const char* format) {
switch (format[0]) {
case 'R': // Register. X or W, selected by sf bit.
case 'F': // FP Register. S or D, selected by type field.
@@ -1276,8 +1277,8 @@ int Disassembler::SubstituteField(Instruction* instr, const char* format) {
}
-int Disassembler::SubstituteRegisterField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
+ const char* format) {
unsigned reg_num = 0;
unsigned field_len = 2;
switch (format[1]) {
@@ -1341,8 +1342,8 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
}
-int Disassembler::SubstituteImmediateField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'I');
switch (format[1]) {
@@ -1452,8 +1453,8 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
}
-int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
+ const char* format) {
DCHECK((format[0] == 'I') && (format[1] == 'B'));
unsigned r = instr->ImmR();
unsigned s = instr->ImmS();
@@ -1488,8 +1489,8 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
}
-int Disassembler::SubstituteLiteralField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteLiteralField(Instruction* instr,
+ const char* format) {
DCHECK(strncmp(format, "LValue", 6) == 0);
USE(format);
@@ -1507,7 +1508,8 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
}
-int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
+int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'H');
DCHECK(instr->ShiftDP() <= 0x3);
@@ -1530,8 +1532,8 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
}
-int Disassembler::SubstituteConditionField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteConditionField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'C');
const char* condition_code[] = { "eq", "ne", "hs", "lo",
"mi", "pl", "vs", "vc",
@@ -1551,8 +1553,8 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
}
-int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstitutePCRelAddressField(Instruction* instr,
+ const char* format) {
USE(format);
DCHECK(strncmp(format, "AddrPCRel", 9) == 0);
@@ -1572,8 +1574,8 @@ int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
}
-int Disassembler::SubstituteBranchTargetField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
+ const char* format) {
DCHECK(strncmp(format, "BImm", 4) == 0);
int64_t offset = 0;
@@ -1599,8 +1601,8 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
}
-int Disassembler::SubstituteExtendField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteExtendField(Instruction* instr,
+ const char* format) {
DCHECK(strncmp(format, "Ext", 3) == 0);
DCHECK(instr->ExtendMode() <= 7);
USE(format);
@@ -1626,8 +1628,8 @@ int Disassembler::SubstituteExtendField(Instruction* instr,
}
-int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteLSRegOffsetField(Instruction* instr,
+ const char* format) {
DCHECK(strncmp(format, "Offsetreg", 9) == 0);
const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
"undefined", "undefined", "sxtw", "sxtx" };
@@ -1655,8 +1657,8 @@ int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
}
-int Disassembler::SubstitutePrefetchField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstitutePrefetchField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'P');
USE(format);
@@ -1670,8 +1672,8 @@ int Disassembler::SubstitutePrefetchField(Instruction* instr,
return 6;
}
-int Disassembler::SubstituteBarrierField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteBarrierField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'M');
USE(format);
@@ -1689,13 +1691,13 @@ int Disassembler::SubstituteBarrierField(Instruction* instr,
}
-void Disassembler::ResetOutput() {
+void DisassemblingDecoder::ResetOutput() {
buffer_pos_ = 0;
buffer_[buffer_pos_] = 0;
}
-void Disassembler::AppendToOutput(const char* format, ...) {
+void DisassemblingDecoder::AppendToOutput(const char* format, ...) {
va_list args;
va_start(args, format);
buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
@@ -1761,7 +1763,7 @@ const char* NameConverter::NameInCode(byte* addr) const {
//------------------------------------------------------------------------------
-class BufferDisassembler : public v8::internal::Disassembler {
+class BufferDisassembler : public v8::internal::DisassemblingDecoder {
public:
explicit BufferDisassembler(v8::internal::Vector<char> out_buffer)
: out_buffer_(out_buffer) { }
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
index c6b189bf97..4b477bc438 100644
--- a/deps/v8/src/arm64/disasm-arm64.h
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
-class Disassembler: public DecoderVisitor {
+class DisassemblingDecoder : public DecoderVisitor {
public:
- Disassembler();
- Disassembler(char* text_buffer, int buffer_size);
- virtual ~Disassembler();
+ DisassemblingDecoder();
+ DisassemblingDecoder(char* text_buffer, int buffer_size);
+ virtual ~DisassemblingDecoder();
char* GetOutput();
// Declare all Visitor functions.
@@ -73,7 +73,7 @@ class Disassembler: public DecoderVisitor {
};
-class PrintDisassembler: public Disassembler {
+class PrintDisassembler : public DisassemblingDecoder {
public:
explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
~PrintDisassembler() { }
@@ -85,6 +85,7 @@ class PrintDisassembler: public Disassembler {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_DISASM_ARM64_H
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
index 9e6551783d..783514437f 100644
--- a/deps/v8/src/arm64/frames-arm64.h
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -63,6 +63,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_FRAMES_ARM64_H_
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 145a7c9053..5c652e3ec8 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -532,7 +532,8 @@ enum DebugParameters {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_
diff --git a/deps/v8/src/arm64/instrument-arm64.h b/deps/v8/src/arm64/instrument-arm64.h
index 86ddfcbbc1..02816e943e 100644
--- a/deps/v8/src/arm64/instrument-arm64.h
+++ b/deps/v8/src/arm64/instrument-arm64.h
@@ -80,6 +80,7 @@ class Instrument: public DecoderVisitor {
uint64_t sample_period_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_INSTRUMENT_ARM64_H_
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 3dac70e784..4e1b818065 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -78,14 +78,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: function info
@@ -111,6 +103,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return x0; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return x0; }
@@ -250,6 +246,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: function
@@ -446,16 +449,40 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- x0, // argument count (including receiver)
+ x0, // argument count (not including receiver)
x2, // address of first argument
x1 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ x0, // argument count (not including receiver)
+ x3, // original constructor
+ x1, // constructor to call
+ x2 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ x0, // argument count (argc)
+ x11, // address of first argument (argv)
+ x1 // the runtime function to call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.h b/deps/v8/src/arm64/interface-descriptors-arm64.h
index 76def88326..20ab8cb612 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.h
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.h
@@ -20,7 +20,7 @@ class PlatformInterfaceDescriptor {
private:
TargetAddressStorageMode storage_mode_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 445513bf5a..9b4abe5514 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -1683,6 +1683,7 @@ void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
movn(xzr, (marker_name[1] << 8) | marker_name[0]);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 5e8abe7215..5b941a2a5a 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -9,6 +9,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/arm64/frames-arm64.h"
@@ -35,8 +36,8 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate,
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()) {
if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -208,7 +209,7 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
// halfword, and movk for subsequent halfwords.
DCHECK((reg_size % 16) == 0);
bool first_mov_done = false;
- for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
+ for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
if (imm16 != ignored_halfword) {
if (!first_mov_done) {
@@ -1704,7 +1705,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the builtins object into target register.
Ldr(target, GlobalObjectMemOperand());
- Ldr(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
+ Ldr(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
Ldr(target, ContextMemOperand(target, native_context_index));
}
@@ -2423,9 +2424,10 @@ void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
Label* failure) {
DCHECK(!AreAliased(scratch1, second));
DCHECK(!AreAliased(scratch1, scratch2));
- static const int kFlatOneByteStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch1, first, kFlatOneByteStringMask);
And(scratch2, second, kFlatOneByteStringMask);
Cmp(scratch1, kFlatOneByteStringTag);
@@ -3000,7 +3002,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
Ldr(dst, GlobalObjectMemOperand());
- Ldr(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+ Ldr(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
@@ -3570,6 +3572,14 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
}
+void MacroAssembler::PushRoot(Heap::RootListIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ LoadRoot(temp, index);
+ Push(temp);
+}
+
+
void MacroAssembler::CompareRoot(const Register& obj,
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
@@ -3772,7 +3782,8 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
Ldr(scratch1, FieldMemOperand(scratch1, offset));
- Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+ Ldr(scratch1,
+ FieldMemOperand(scratch1, JSGlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -3984,14 +3995,18 @@ void MacroAssembler::PushSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
- PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
- FPRegister::kAllocatableFPRegisters));
+ PushCPURegList(CPURegList(
+ CPURegister::kFPRegister, kDRegSizeInBits,
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes_mask()));
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
- PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
- FPRegister::kAllocatableFPRegisters));
+ PopCPURegList(CPURegList(
+ CPURegister::kFPRegister, kDRegSizeInBits,
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes_mask()));
PopSafepointRegisters();
}
@@ -4602,7 +4617,8 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Label* no_map_match) {
// Load the global or builtins object from the current context.
Ldr(scratch1, GlobalObjectMemOperand());
- Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+ Ldr(scratch1,
+ FieldMemOperand(scratch1, JSGlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
@@ -4621,8 +4637,8 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
Ldr(function, GlobalObjectMemOperand());
// Load the native context from the global or builtins object.
- Ldr(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
+ Ldr(function,
+ FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
Ldr(function, ContextMemOperand(function, index));
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 769140d917..2747397993 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -44,6 +44,7 @@ namespace internal {
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
+#define kJavaScriptCallArgCountRegister x0
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
@@ -1461,6 +1462,9 @@ class MacroAssembler : public Assembler {
// register.
void LoadElementsKindFromMap(Register result, Register map);
+ // Load the value from the root list and push it onto the stack.
+ void PushRoot(Heap::RootListIndex index);
+
// Compare the object in a register to a value from the root list.
void CompareRoot(const Register& obj, Heap::RootListIndex index);
@@ -2278,7 +2282,8 @@ class InlineSmiCheckInfo {
class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#ifdef GENERATED_CODE_COVERAGE
#error "Unsupported option"
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index e4d9a81ffd..3d7c15cfd0 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -17,12 +17,6 @@
#include "src/globals.h"
#include "src/utils.h"
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
namespace v8 {
namespace internal {
@@ -911,6 +905,7 @@ class SimulatorStack : public v8::internal::AllStatic {
#endif // !defined(USE_SIMULATOR)
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_SIMULATOR_ARM64_H_
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
index da91fd5d60..1e1c0a33c2 100644
--- a/deps/v8/src/arm64/utils-arm64.h
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -9,12 +9,6 @@
#include "src/arm64/constants-arm64.h"
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
namespace v8 {
namespace internal {
@@ -151,6 +145,7 @@ inline float FusedMultiplyAdd(float op1, float op2, float a) {
return fmaf(op1, op2, a);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_UTILS_ARM64_H_
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index dd05a07750..8571f33176 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -46,13 +46,16 @@
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
+#include "src/disassembler.h"
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/ostreams.h"
#include "src/profiler/cpu-profiler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serialize.h"
@@ -105,6 +108,39 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
+// Common register code.
+
+const char* Register::ToString() {
+ // This is the mapping of allocation indices to registers.
+ DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
+ return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->GetGeneralRegisterName(reg_code);
+}
+
+
+bool Register::IsAllocatable() const {
+ return ((1 << reg_code) &
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_general_codes_mask()) != 0;
+}
+
+
+const char* DoubleRegister::ToString() {
+ // This is the mapping of allocation indices to registers.
+ DCHECK(reg_code >= 0 && reg_code < kMaxNumRegisters);
+ return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->GetDoubleRegisterName(reg_code);
+}
+
+
+bool DoubleRegister::IsAllocatable() const {
+ return ((1 << reg_code) &
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes_mask()) != 0;
+}
+
+
+// -----------------------------------------------------------------------------
// Common double constants.
struct DoubleConstant BASE_EMBEDDED {
@@ -181,6 +217,12 @@ void AssemblerBase::FlushICacheWithoutIsolate(void* start, size_t size) {
}
+void AssemblerBase::Print() {
+ OFStream os(stdout);
+ v8::internal::Disassembler::Decode(isolate(), &os, buffer_, pc_, nullptr);
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
@@ -1407,9 +1449,21 @@ ExternalReference
}
-ExternalReference ExternalReference::vector_store_virtual_register(
+ExternalReference ExternalReference::virtual_handler_register(
+ Isolate* isolate) {
+ return ExternalReference(isolate->virtual_handler_register_address());
+}
+
+
+ExternalReference ExternalReference::virtual_slot_register(Isolate* isolate) {
+ return ExternalReference(isolate->virtual_slot_register_address());
+}
+
+
+ExternalReference ExternalReference::runtime_function_table_address(
Isolate* isolate) {
- return ExternalReference(isolate->vector_store_virtual_register_address());
+ return ExternalReference(
+ const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 433b9b8456..1243adf468 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -49,6 +49,7 @@ class ApiFunction;
namespace internal {
// Forward declarations.
+class SourcePosition;
class StatsCounter;
// -----------------------------------------------------------------------------
@@ -99,6 +100,9 @@ class AssemblerBase: public Malloced {
// the assembler could clean up internal data structures.
virtual void AbortedCodeGeneration() { }
+ // Debugging
+ void Print();
+
static const int kMinimalBufferSize = 4*KB;
static void FlushICache(Isolate* isolate, void* start, size_t size);
@@ -319,6 +323,8 @@ class Label {
enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
+enum ArgvMode { kArgvOnStack, kArgvInRegister };
+
// Specifies whether to perform icache flush operations on RelocInfo updates.
// If FLUSH_ICACHE_IF_NEEDED, the icache will always be flushed if an
// instruction was modified. If SKIP_ICACHE_FLUSH the flush will always be
@@ -659,11 +665,6 @@ class RelocInfo {
Mode rmode_;
intptr_t data_;
Code* host_;
- // External-reference pointers are also split across instruction-pairs
- // on some platforms, but are accessed via indirect pointers. This location
- // provides a place for that pointer to exist naturally. Its address
- // is returned by RelocInfo::target_reference_address().
- Address reconstructed_adr_ptr_;
friend class RelocIterator;
};
@@ -990,7 +991,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
- static ExternalReference vector_store_virtual_register(Isolate* isolate);
+ static ExternalReference virtual_handler_register(Isolate* isolate);
+ static ExternalReference virtual_slot_register(Isolate* isolate);
+
+ static ExternalReference runtime_function_table_address(Isolate* isolate);
Address address() const { return reinterpret_cast<Address>(address_); }
@@ -1276,7 +1280,6 @@ class ConstantPoolBuilder BASE_EMBEDDED {
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
};
-
-} } // namespace v8::internal
-
+} // namespace internal
+} // namespace v8
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index 8757a32910..84e6990b04 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -170,6 +170,7 @@ typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, false>
// Scope to introduce an exception to DisallowDeoptimization.
typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true>
AllowCompilation;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ASSERT_SCOPE_H_
diff --git a/deps/v8/src/ast-expression-visitor.cc b/deps/v8/src/ast-expression-visitor.cc
index 782d4bbca6..e38b444699 100644
--- a/deps/v8/src/ast-expression-visitor.cc
+++ b/deps/v8/src/ast-expression-visitor.cc
@@ -32,14 +32,20 @@ namespace internal {
} while (false)
-AstExpressionVisitor::AstExpressionVisitor(Isolate* isolate, Zone* zone,
- FunctionLiteral* root)
+AstExpressionVisitor::AstExpressionVisitor(Isolate* isolate, Expression* root)
: root_(root), depth_(0) {
- InitializeAstVisitor(isolate, zone);
+ InitializeAstVisitor(isolate);
}
-void AstExpressionVisitor::Run() { RECURSE(VisitFunctionLiteral(root_)); }
+AstExpressionVisitor::AstExpressionVisitor(uintptr_t stack_limit,
+ Expression* root)
+ : root_(root), depth_(0) {
+ InitializeAstVisitor(stack_limit);
+}
+
+
+void AstExpressionVisitor::Run() { RECURSE(Visit(root_)); }
void AstExpressionVisitor::VisitVariableDeclaration(VariableDeclaration* decl) {
@@ -196,6 +202,12 @@ void AstExpressionVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {}
+void AstExpressionVisitor::VisitDoExpression(DoExpression* expr) {
+ RECURSE(VisitBlock(expr->block()));
+ RECURSE(VisitVariableProxy(expr->result()));
+}
+
+
void AstExpressionVisitor::VisitConditional(Conditional* expr) {
RECURSE(Visit(expr->condition()));
RECURSE(Visit(expr->then_expression()));
@@ -223,6 +235,9 @@ void AstExpressionVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
ZoneList<ObjectLiteralProperty*>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ObjectLiteralProperty* prop = props->at(i);
+ if (!prop->key()->IsLiteral()) {
+ RECURSE_EXPRESSION(Visit(prop->key()));
+ }
RECURSE_EXPRESSION(Visit(prop->value()));
}
}
@@ -336,21 +351,47 @@ void AstExpressionVisitor::VisitDeclarations(ZoneList<Declaration*>* decls) {
}
-void AstExpressionVisitor::VisitClassLiteral(ClassLiteral* expr) {}
+void AstExpressionVisitor::VisitClassLiteral(ClassLiteral* expr) {
+ VisitExpression(expr);
+ if (expr->extends() != nullptr) {
+ RECURSE_EXPRESSION(Visit(expr->extends()));
+ }
+ RECURSE_EXPRESSION(Visit(expr->constructor()));
+ ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ if (!prop->key()->IsLiteral()) {
+ RECURSE_EXPRESSION(Visit(prop->key()));
+ }
+ RECURSE_EXPRESSION(Visit(prop->value()));
+ }
+}
-void AstExpressionVisitor::VisitSpread(Spread* expr) {}
+void AstExpressionVisitor::VisitSpread(Spread* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
void AstExpressionVisitor::VisitEmptyParentheses(EmptyParentheses* expr) {}
void AstExpressionVisitor::VisitSuperPropertyReference(
- SuperPropertyReference* expr) {}
+ SuperPropertyReference* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
+ RECURSE_EXPRESSION(Visit(expr->home_object()));
+}
-void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {}
+void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->new_target_var()));
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->this_function_var()));
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ast-expression-visitor.h b/deps/v8/src/ast-expression-visitor.h
index 43b34bac79..a4bf34d63f 100644
--- a/deps/v8/src/ast-expression-visitor.h
+++ b/deps/v8/src/ast-expression-visitor.h
@@ -21,7 +21,8 @@ namespace internal {
class AstExpressionVisitor : public AstVisitor {
public:
- AstExpressionVisitor(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+ AstExpressionVisitor(Isolate* isolate, Expression* root);
+ AstExpressionVisitor(uintptr_t stack_limit, Expression* root);
void Run();
protected:
@@ -34,16 +35,16 @@ class AstExpressionVisitor : public AstVisitor {
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- FunctionLiteral* root_;
+ Expression* root_;
int depth_;
DISALLOW_COPY_AND_ASSIGN(AstExpressionVisitor);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_AST_EXPRESSION_VISITOR_H_
diff --git a/deps/v8/src/ast-literal-reindexer.cc b/deps/v8/src/ast-literal-reindexer.cc
index e5729c7818..5987399f97 100644
--- a/deps/v8/src/ast-literal-reindexer.cc
+++ b/deps/v8/src/ast-literal-reindexer.cc
@@ -43,6 +43,11 @@ void AstLiteralReindexer::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {}
+void AstLiteralReindexer::VisitDoExpression(DoExpression* node) {
+ // TODO(caitp): literals in do expressions need re-indexing too.
+}
+
+
void AstLiteralReindexer::VisitLiteral(Literal* node) {}
@@ -316,5 +321,5 @@ void AstLiteralReindexer::VisitFunctionLiteral(FunctionLiteral* node) {
void AstLiteralReindexer::Reindex(Expression* pattern) {
pattern->Accept(this);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ast-literal-reindexer.h b/deps/v8/src/ast-literal-reindexer.h
index 2fe920b7c4..14f64f6ef1 100644
--- a/deps/v8/src/ast-literal-reindexer.h
+++ b/deps/v8/src/ast-literal-reindexer.h
@@ -20,7 +20,7 @@ class AstLiteralReindexer final : public AstVisitor {
int NextIndex() { return next_index_++; }
private:
-#define DEFINE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DEFINE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
@@ -39,7 +39,7 @@ class AstLiteralReindexer final : public AstVisitor {
DISALLOW_COPY_AND_ASSIGN(AstLiteralReindexer);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_AST_LITERAL_REINDEXER
diff --git a/deps/v8/src/ast-numbering.cc b/deps/v8/src/ast-numbering.cc
index 55eaacd1f5..8479191b5e 100644
--- a/deps/v8/src/ast-numbering.cc
+++ b/deps/v8/src/ast-numbering.cc
@@ -14,18 +14,20 @@ class AstNumberingVisitor final : public AstVisitor {
public:
AstNumberingVisitor(Isolate* isolate, Zone* zone)
: AstVisitor(),
+ isolate_(isolate),
+ zone_(zone),
next_id_(BailoutId::FirstUsable().ToInt()),
properties_(zone),
- ic_slot_cache_(zone),
+ slot_cache_(zone),
dont_optimize_reason_(kNoReason) {
- InitializeAstVisitor(isolate, zone);
+ InitializeAstVisitor(isolate);
}
bool Renumber(FunctionLiteral* node);
private:
// AST node visitor interface.
-#define DEFINE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DEFINE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
@@ -65,16 +67,18 @@ class AstNumberingVisitor final : public AstVisitor {
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
- node->AssignFeedbackVectorSlots(isolate(), properties_.get_spec(),
- &ic_slot_cache_);
+ node->AssignFeedbackVectorSlots(isolate_, properties_.get_spec(),
+ &slot_cache_);
}
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
+ Isolate* isolate_;
+ Zone* zone_;
int next_id_;
AstProperties properties_;
- // The slot cache allows us to reuse certain vector IC slots.
- ICSlotCache ic_slot_cache_;
+ // The slot cache allows us to reuse certain feedback vector slots.
+ FeedbackVectorSlotCache slot_cache_;
BailoutReason dont_optimize_reason_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -132,6 +136,15 @@ void AstNumberingVisitor::VisitNativeFunctionLiteral(
}
+void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
+ IncrementNodeCount();
+ DisableCrankshaft(kDoExpression);
+ node->set_base_id(ReserveIdRange(DoExpression::num_ids()));
+ Visit(node->block());
+ Visit(node->result());
+}
+
+
void AstNumberingVisitor::VisitLiteral(Literal* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Literal::num_ids()));
@@ -466,11 +479,11 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
VisitObjectLiteralProperty(node->properties()->at(i));
}
- node->BuildConstantProperties(isolate());
+ node->BuildConstantProperties(isolate_);
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code will be is emitted.
- node->CalculateEmitStore(zone());
+ node->CalculateEmitStore(zone_);
ReserveFeedbackSlots(node);
}
@@ -489,6 +502,8 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
+ node->BuildConstantElements(isolate_);
+ ReserveFeedbackSlots(node);
}
diff --git a/deps/v8/src/ast-value-factory.cc b/deps/v8/src/ast-value-factory.cc
index fbcde8b457..8a4a4daf0c 100644
--- a/deps/v8/src/ast-value-factory.cc
+++ b/deps/v8/src/ast-value-factory.cc
@@ -50,7 +50,7 @@ class OneByteStringStream {
int pos_;
};
-}
+} // namespace
class AstRawStringInternalizationKey : public HashTableKey {
public:
diff --git a/deps/v8/src/ast-value-factory.h b/deps/v8/src/ast-value-factory.h
index 69fc6cc2f4..645b8b6631 100644
--- a/deps/v8/src/ast-value-factory.h
+++ b/deps/v8/src/ast-value-factory.h
@@ -255,6 +255,7 @@ class AstValue : public ZoneObject {
F(dot_module, ".module") \
F(dot_result, ".result") \
F(dot_switch_tag, ".switch_tag") \
+ F(dot_catch, ".catch") \
F(empty, "") \
F(eval, "eval") \
F(let, "let") \
@@ -366,7 +367,8 @@ class AstValueFactory {
OTHER_CONSTANTS(F)
#undef F
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#undef STRING_CONSTANTS
#undef OTHER_CONSTANTS
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 3292b1d50b..b5c6cf57ea 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -71,7 +71,6 @@ VariableProxy::VariableProxy(Zone* zone, Variable* var, int start_position,
bit_field_(IsThisField::encode(var->is_this()) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
- variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
raw_name_(var->raw_name()),
end_position_(end_position) {
BindTo(var);
@@ -85,7 +84,6 @@ VariableProxy::VariableProxy(Zone* zone, const AstRawString* name,
bit_field_(IsThisField::encode(variable_kind == Variable::THIS) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
- variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
raw_name_(name),
end_position_(end_position) {}
@@ -100,14 +98,14 @@ void VariableProxy::BindTo(Variable* var) {
void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
// make their loads from the same IC slot.
if (var()->IsUnallocated()) {
ZoneHashMap::Entry* entry = cache->Get(var());
if (entry != NULL) {
- variable_feedback_slot_ = FeedbackVectorICSlot(
+ variable_feedback_slot_ = FeedbackVectorSlot(
static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
return;
}
@@ -121,7 +119,7 @@ void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
- FeedbackVectorICSlot* out_slot) {
+ FeedbackVectorSlot* out_slot) {
if (FLAG_vector_stores) {
Property* property = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -138,9 +136,9 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
}
-void ForEachStatement::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+void ForEachStatement::AssignFeedbackVectorSlots(
+ Isolate* isolate, FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
AssignVectorSlots(each(), spec, &each_slot_);
}
@@ -153,20 +151,19 @@ Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
target_(target),
value_(value),
- binary_operation_(NULL),
- slot_(FeedbackVectorICSlot::Invalid()) {}
+ binary_operation_(NULL) {}
void Assignment::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
AssignVectorSlots(target(), spec, &slot_);
}
void CountOperation::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
AssignVectorSlots(expression(), spec, &slot_);
}
@@ -227,7 +224,6 @@ ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
bool is_computed_name)
: key_(key),
value_(value),
- slot_(FeedbackVectorICSlot::Invalid()),
kind_(kind),
emit_store_(true),
is_static_(is_static),
@@ -240,7 +236,6 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
bool is_computed_name)
: key_(key),
value_(value),
- slot_(FeedbackVectorICSlot::Invalid()),
emit_store_(true),
is_static_(is_static),
is_computed_name_(is_computed_name) {
@@ -260,7 +255,7 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
if (!FLAG_vector_stores) return;
// This logic that computes the number of slots needed for vector store
@@ -273,7 +268,7 @@ void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
ObjectLiteral::Property* property = properties()->at(i);
Expression* value = property->value();
if (FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
}
}
@@ -298,7 +293,7 @@ bool ObjectLiteral::Property::emit_store() {
void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
if (!FLAG_vector_stores) return;
// This logic that computes the number of slots needed for vector store
@@ -321,27 +316,27 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
// contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
if (FunctionLiteral::NeedsHomeObject(value)) {
- spec->AddStoreICSlot();
+ property->SetSlot(spec->AddStoreICSlot(), 1);
}
}
break;
}
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
break;
case ObjectLiteral::Property::PROTOTYPE:
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
break;
}
@@ -353,7 +348,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
Expression* value = property->value();
if (property->kind() != ObjectLiteral::Property::PROTOTYPE) {
if (FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
}
}
@@ -552,6 +547,27 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
}
+void ArrayLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
+ if (!FLAG_vector_stores) return;
+
+ // This logic that computes the number of slots needed for vector store
+ // ics must mirror FullCodeGenerator::VisitArrayLiteral.
+ int array_index = 0;
+ for (; array_index < values()->length(); array_index++) {
+ Expression* subexpr = values()->at(array_index);
+ if (subexpr->IsSpread()) break;
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ // We'll reuse the same literal slot for all of the non-constant
+ // subexpressions that use a keyed store IC.
+ literal_slot_ = spec->AddKeyedStoreICSlot();
+ return;
+ }
+}
+
+
Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
Isolate* isolate) {
if (expression->IsLiteral()) {
@@ -720,12 +736,12 @@ bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
if (IsUsingCallFeedbackICSlot(isolate)) {
ic_slot_ = spec->AddCallICSlot();
}
if (IsUsingCallFeedbackSlot(isolate)) {
- slot_ = spec->AddStubSlot();
+ stub_slot_ = spec->AddGeneralSlot();
}
}
@@ -745,7 +761,16 @@ Call::CallType Call::GetCallType(Isolate* isolate) const {
if (expression()->IsSuperCallReference()) return SUPER_CALL;
Property* property = expression()->AsProperty();
- return property != NULL ? PROPERTY_CALL : OTHER_CALL;
+ if (property != nullptr) {
+ bool is_super = property->IsSuperAccess();
+ if (property->key()->IsPropertyName()) {
+ return is_super ? NAMED_SUPER_PROPERTY_CALL : NAMED_PROPERTY_CALL;
+ } else {
+ return is_super ? KEYED_SUPER_PROPERTY_CALL : KEYED_PROPERTY_CALL;
+ }
+ }
+
+ return OTHER_CALL;
}
@@ -917,8 +942,7 @@ class RegExpUnparser final : public RegExpVisitor {
public:
RegExpUnparser(std::ostream& os, Zone* zone) : os_(os), zone_(zone) {}
void VisitCharacterRange(CharacterRange that);
-#define MAKE_CASE(Name) \
- virtual void* Visit##Name(RegExp##Name*, void* data) override;
+#define MAKE_CASE(Name) void* Visit##Name(RegExp##Name*, void* data) override;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE
private:
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 4764918849..14f71a6cc2 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -90,7 +90,8 @@ namespace internal {
V(SuperPropertyReference) \
V(SuperCallReference) \
V(CaseClause) \
- V(EmptyParentheses)
+ V(EmptyParentheses) \
+ V(DoExpression)
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
@@ -138,14 +139,14 @@ typedef ZoneList<Handle<Object>> ZoneObjectList;
friend class AstNodeFactory;
-class ICSlotCache {
+class FeedbackVectorSlotCache {
public:
- explicit ICSlotCache(Zone* zone)
+ explicit FeedbackVectorSlotCache(Zone* zone)
: zone_(zone),
hash_map_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)) {}
- void Put(Variable* variable, FeedbackVectorICSlot slot) {
+ void Put(Variable* variable, FeedbackVectorSlot slot) {
ZoneHashMap::Entry* entry = hash_map_.LookupOrInsert(
variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
entry->value = reinterpret_cast<void*>(slot.ToInt());
@@ -230,7 +231,7 @@ class AstNode: public ZoneObject {
// vtable entry per node, something we don't want for space reasons.
virtual void AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {}
+ FeedbackVectorSlotCache* cache) {}
private:
// Hidden to prevent accidental usage. It would have to load the
@@ -458,10 +459,6 @@ class Block final : public BreakableStatement {
public:
DECLARE_NODE_TYPE(Block)
- void AddStatement(Statement* statement, Zone* zone) {
- statements_.Add(statement, zone);
- }
-
ZoneList<Statement*>* statements() { return &statements_; }
bool ignore_completion_value() const { return ignore_completion_value_; }
@@ -494,6 +491,29 @@ class Block final : public BreakableStatement {
};
+class DoExpression final : public Expression {
+ public:
+ DECLARE_NODE_TYPE(DoExpression)
+
+ Block* block() { return block_; }
+ VariableProxy* result() { return result_; }
+
+ protected:
+ DoExpression(Zone* zone, Block* block, VariableProxy* result, int pos)
+ : Expression(zone, pos), block_(block), result_(result) {
+ DCHECK_NOT_NULL(block_);
+ DCHECK_NOT_NULL(result_);
+ }
+ static int parent_num_ids() { return Expression::num_ids(); }
+
+ private:
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
+ Block* block_;
+ VariableProxy* result_;
+};
+
+
class Declaration : public AstNode {
public:
VariableProxy* proxy() const { return proxy_; }
@@ -640,6 +660,7 @@ class IterationStatement : public BreakableStatement {
IterationStatement* AsIterationStatement() final { return this; }
Statement* body() const { return body_; }
+ void set_body(Statement* s) { body_ = s; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId OsrEntryId() const { return BailoutId(local_id(0)); }
@@ -777,20 +798,17 @@ class ForEachStatement : public IterationStatement {
Expression* subject() const { return subject_; }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
- FeedbackVectorICSlot EachFeedbackSlot() const { return each_slot_; }
+ FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
protected:
ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos),
- each_(NULL),
- subject_(NULL),
- each_slot_(FeedbackVectorICSlot::Invalid()) {}
+ : IterationStatement(zone, labels, pos), each_(NULL), subject_(NULL) {}
private:
Expression* each_;
Expression* subject_;
- FeedbackVectorICSlot each_slot_;
+ FeedbackVectorSlot each_slot_;
};
@@ -804,9 +822,9 @@ class ForInStatement final : public ForEachStatement {
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override {
+ FeedbackVectorSlotCache* cache) override {
ForEachStatement::AssignFeedbackVectorSlots(isolate, spec, cache);
- for_in_feedback_slot_ = spec->AddStubSlot();
+ for_in_feedback_slot_ = spec->AddGeneralSlot();
}
FeedbackVectorSlot ForInFeedbackSlot() {
@@ -830,9 +848,7 @@ class ForInStatement final : public ForEachStatement {
protected:
ForInStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : ForEachStatement(zone, labels, pos),
- for_in_type_(SLOW_FOR_IN),
- for_in_feedback_slot_(FeedbackVectorSlot::Invalid()) {}
+ : ForEachStatement(zone, labels, pos), for_in_type_(SLOW_FOR_IN) {}
static int parent_num_ids() { return ForEachStatement::num_ids(); }
private:
@@ -988,6 +1004,7 @@ class WithStatement final : public Statement {
Scope* scope() { return scope_; }
Expression* expression() const { return expression_; }
Statement* statement() const { return statement_; }
+ void set_statement(Statement* s) { statement_ = s; }
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 1; }
@@ -1092,6 +1109,9 @@ class IfStatement final : public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
+ void set_then_statement(Statement* s) { then_statement_ = s; }
+ void set_else_statement(Statement* s) { else_statement_ = s; }
+
bool IsJump() const override {
return HasThenStatement() && then_statement()->IsJump()
&& HasElseStatement() && else_statement()->IsJump();
@@ -1131,6 +1151,7 @@ class IfStatement final : public Statement {
class TryStatement : public Statement {
public:
Block* try_block() const { return try_block_; }
+ void set_try_block(Block* b) { try_block_ = b; }
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 1; }
@@ -1163,6 +1184,7 @@ class TryCatchStatement final : public TryStatement {
Scope* scope() { return scope_; }
Variable* variable() { return variable_; }
Block* catch_block() const { return catch_block_; }
+ void set_catch_block(Block* b) { catch_block_ = b; }
protected:
TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
@@ -1184,6 +1206,7 @@ class TryFinallyStatement final : public TryStatement {
DECLARE_NODE_TYPE(TryFinallyStatement)
Block* finally_block() const { return finally_block_; }
+ void set_finally_block(Block* b) { finally_block_ = b; }
protected:
TryFinallyStatement(Zone* zone, Block* try_block, Block* finally_block,
@@ -1301,7 +1324,7 @@ class AstLiteralReindexer;
// Base class for literals that needs space in the corresponding JSFunction.
class MaterializedLiteral : public Expression {
public:
- virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
+ MaterializedLiteral* AsMaterializedLiteral() final { return this; }
int literal_index() { return literal_index_; }
@@ -1383,13 +1406,14 @@ class ObjectLiteralProperty final : public ZoneObject {
bool is_static() const { return is_static_; }
bool is_computed_name() const { return is_computed_name_; }
- FeedbackVectorICSlot GetSlot(int offset = 0) const {
- if (slot_.IsInvalid()) return slot_;
- int slot = slot_.ToInt();
- return FeedbackVectorICSlot(slot + offset);
+ FeedbackVectorSlot GetSlot(int offset = 0) const {
+ DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
+ return slots_[offset];
+ }
+ void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
+ DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
+ slots_[offset] = slot;
}
- FeedbackVectorICSlot slot() const { return slot_; }
- void set_slot(FeedbackVectorICSlot slot) { slot_ = slot; }
void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
@@ -1405,7 +1429,7 @@ class ObjectLiteralProperty final : public ZoneObject {
private:
Expression* key_;
Expression* value_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slots_[2];
Kind kind_;
bool emit_store_;
bool is_static_;
@@ -1486,7 +1510,7 @@ class ObjectLiteral final : public MaterializedLiteral {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache) override;
protected:
ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
@@ -1498,9 +1522,7 @@ class ObjectLiteral final : public MaterializedLiteral {
fast_elements_(false),
has_elements_(false),
may_store_doubles_(false),
- has_function_(has_function),
- slot_(FeedbackVectorICSlot::Invalid()) {
- }
+ has_function_(has_function) {}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
private:
@@ -1512,7 +1534,28 @@ class ObjectLiteral final : public MaterializedLiteral {
bool has_elements_;
bool may_store_doubles_;
bool has_function_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
+};
+
+
+// A map from property names to getter/setter pairs allocated in the zone.
+class AccessorTable : public TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ ZoneAllocationPolicy> {
+ public:
+ explicit AccessorTable(Zone* zone)
+ : TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ ZoneAllocationPolicy>(Literal::Match,
+ ZoneAllocationPolicy(zone)),
+ zone_(zone) {}
+
+ Iterator lookup(Literal* literal) {
+ Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
+ if (it->second == NULL) it->second = new (zone_) ObjectLiteral::Accessors();
+ return it;
+ }
+
+ private:
+ Zone* zone_;
};
@@ -1586,6 +1629,10 @@ class ArrayLiteral final : public MaterializedLiteral {
kIsStrong = 1 << 2
};
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; }
+
protected:
ArrayLiteral(Zone* zone, ZoneList<Expression*>* values,
int first_spread_index, int literal_index, bool is_strong,
@@ -1601,6 +1648,7 @@ class ArrayLiteral final : public MaterializedLiteral {
Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
int first_spread_index_;
+ FeedbackVectorSlot literal_slot_;
};
@@ -1656,11 +1704,9 @@ class VariableProxy final : public Expression {
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache) override;
- FeedbackVectorICSlot VariableFeedbackSlot() {
- return variable_feedback_slot_;
- }
+ FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId BeforeId() const { return BailoutId(local_id(0)); }
@@ -1683,7 +1729,7 @@ class VariableProxy final : public Expression {
// Start with 16-bit (or smaller) field, which should get packed together
// with Expression's trailing 16-bit field.
uint8_t bit_field_;
- FeedbackVectorICSlot variable_feedback_slot_;
+ FeedbackVectorSlot variable_feedback_slot_;
union {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
@@ -1755,14 +1801,14 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override {
+ FeedbackVectorSlotCache* cache) override {
FeedbackVectorSlotKind kind = key()->IsPropertyName()
? FeedbackVectorSlotKind::LOAD_IC
: FeedbackVectorSlotKind::KEYED_LOAD_IC;
property_feedback_slot_ = spec->AddSlot(kind);
}
- FeedbackVectorICSlot PropertyFeedbackSlot() const {
+ FeedbackVectorSlot PropertyFeedbackSlot() const {
return property_feedback_slot_;
}
@@ -1780,7 +1826,6 @@ class Property final : public Expression {
bit_field_(IsForCallField::encode(false) |
IsStringAccessField::encode(false) |
InlineCacheStateField::encode(UNINITIALIZED)),
- property_feedback_slot_(FeedbackVectorICSlot::Invalid()),
obj_(obj),
key_(key) {}
static int parent_num_ids() { return Expression::num_ids(); }
@@ -1793,7 +1838,7 @@ class Property final : public Expression {
class KeyTypeField : public BitField8<IcCheckType, 2, 1> {};
class InlineCacheStateField : public BitField8<InlineCacheState, 3, 4> {};
uint8_t bit_field_;
- FeedbackVectorICSlot property_feedback_slot_;
+ FeedbackVectorSlot property_feedback_slot_;
Expression* obj_;
Expression* key_;
SmallMapList receiver_types_;
@@ -1809,11 +1854,11 @@ class Call final : public Expression {
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache) override;
- FeedbackVectorSlot CallFeedbackSlot() const { return slot_; }
+ FeedbackVectorSlot CallFeedbackSlot() const { return stub_slot_; }
- FeedbackVectorICSlot CallFeedbackICSlot() const { return ic_slot_; }
+ FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
SmallMapList* GetReceiverTypes() override {
if (expression()->IsProperty()) {
@@ -1851,10 +1896,11 @@ class Call final : public Expression {
allocation_site_ = site;
}
- static int num_ids() { return parent_num_ids() + 3; }
+ static int num_ids() { return parent_num_ids() + 4; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); }
BailoutId EvalId() const { return BailoutId(local_id(1)); }
BailoutId LookupId() const { return BailoutId(local_id(2)); }
+ BailoutId CallId() const { return BailoutId(local_id(3)); }
bool is_uninitialized() const {
return IsUninitializedField::decode(bit_field_);
@@ -1867,7 +1913,10 @@ class Call final : public Expression {
POSSIBLY_EVAL_CALL,
GLOBAL_CALL,
LOOKUP_SLOT_CALL,
- PROPERTY_CALL,
+ NAMED_PROPERTY_CALL,
+ KEYED_PROPERTY_CALL,
+ NAMED_SUPER_PROPERTY_CALL,
+ KEYED_SUPER_PROPERTY_CALL,
SUPER_CALL,
OTHER_CALL
};
@@ -1886,8 +1935,6 @@ class Call final : public Expression {
Call(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
int pos)
: Expression(zone, pos),
- ic_slot_(FeedbackVectorICSlot::Invalid()),
- slot_(FeedbackVectorSlot::Invalid()),
expression_(expression),
arguments_(arguments),
bit_field_(IsUninitializedField::encode(false)) {
@@ -1900,8 +1947,8 @@ class Call final : public Expression {
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- FeedbackVectorICSlot ic_slot_;
- FeedbackVectorSlot slot_;
+ FeedbackVectorSlot ic_slot_;
+ FeedbackVectorSlot stub_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
@@ -1920,8 +1967,8 @@ class CallNew final : public Expression {
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override {
- callnew_feedback_slot_ = spec->AddStubSlot();
+ FeedbackVectorSlotCache* cache) override {
+ callnew_feedback_slot_ = spec->AddGeneralSlot();
}
FeedbackVectorSlot CallNewFeedbackSlot() {
@@ -1955,8 +2002,7 @@ class CallNew final : public Expression {
: Expression(zone, pos),
expression_(expression),
arguments_(arguments),
- is_monomorphic_(false),
- callnew_feedback_slot_(FeedbackVectorSlot::Invalid()) {}
+ is_monomorphic_(false) {}
static int parent_num_ids() { return Expression::num_ids(); }
@@ -2035,7 +2081,7 @@ class UnaryOperation final : public Expression {
BailoutId MaterializeTrueId() const { return BailoutId(local_id(0)); }
BailoutId MaterializeFalseId() const { return BailoutId(local_id(1)); }
- virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
+ void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
protected:
UnaryOperation(Zone* zone, Token::Value op, Expression* expression, int pos)
@@ -2080,7 +2126,7 @@ class BinaryOperation final : public Expression {
if (arg.IsJust()) fixed_right_arg_value_ = arg.FromJust();
}
- virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
+ void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
protected:
BinaryOperation(Zone* zone, Token::Value op, Expression* left,
@@ -2151,8 +2197,8 @@ class CountOperation final : public Expression {
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
- FeedbackVectorICSlot CountSlot() const { return slot_; }
+ FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlot CountSlot() const { return slot_; }
protected:
CountOperation(Zone* zone, Token::Value op, bool is_prefix, Expression* expr,
@@ -2162,8 +2208,7 @@ class CountOperation final : public Expression {
IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
type_(NULL),
- expression_(expr),
- slot_(FeedbackVectorICSlot::Invalid()) {}
+ expression_(expr) {}
static int parent_num_ids() { return Expression::num_ids(); }
private:
@@ -2180,7 +2225,7 @@ class CountOperation final : public Expression {
Type* type_;
Expression* expression_;
SmallMapList receiver_types_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
};
@@ -2324,8 +2369,8 @@ class Assignment final : public Expression {
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
- FeedbackVectorICSlot AssignmentSlot() const { return slot_; }
+ FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlot AssignmentSlot() const { return slot_; }
protected:
Assignment(Zone* zone, Token::Value op, Expression* target, Expression* value,
@@ -2347,7 +2392,7 @@ class Assignment final : public Expression {
Expression* value_;
BinaryOperation* binary_operation_;
SmallMapList receiver_types_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
};
@@ -2369,23 +2414,22 @@ class Yield final : public Expression {
// Type feedback information.
bool HasFeedbackSlots() const { return yield_kind() == kDelegating; }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override {
+ FeedbackVectorSlotCache* cache) override {
if (HasFeedbackSlots()) {
yield_first_feedback_slot_ = spec->AddKeyedLoadICSlot();
- spec->AddLoadICSlots(2);
+ keyed_load_feedback_slot_ = spec->AddLoadICSlot();
+ done_feedback_slot_ = spec->AddLoadICSlot();
}
}
- FeedbackVectorICSlot KeyedLoadFeedbackSlot() {
+ FeedbackVectorSlot KeyedLoadFeedbackSlot() {
DCHECK(!HasFeedbackSlots() || !yield_first_feedback_slot_.IsInvalid());
return yield_first_feedback_slot_;
}
- FeedbackVectorICSlot DoneFeedbackSlot() {
- return KeyedLoadFeedbackSlot().next();
- }
+ FeedbackVectorSlot DoneFeedbackSlot() { return keyed_load_feedback_slot_; }
- FeedbackVectorICSlot ValueFeedbackSlot() { return DoneFeedbackSlot().next(); }
+ FeedbackVectorSlot ValueFeedbackSlot() { return done_feedback_slot_; }
protected:
Yield(Zone* zone, Expression* generator_object, Expression* expression,
@@ -2393,14 +2437,15 @@ class Yield final : public Expression {
: Expression(zone, pos),
generator_object_(generator_object),
expression_(expression),
- yield_kind_(yield_kind),
- yield_first_feedback_slot_(FeedbackVectorICSlot::Invalid()) {}
+ yield_kind_(yield_kind) {}
private:
Expression* generator_object_;
Expression* expression_;
Kind yield_kind_;
- FeedbackVectorICSlot yield_first_feedback_slot_;
+ FeedbackVectorSlot yield_first_feedback_slot_;
+ FeedbackVectorSlot keyed_load_feedback_slot_;
+ FeedbackVectorSlot done_feedback_slot_;
};
@@ -2642,14 +2687,14 @@ class ClassLiteral final : public Expression {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache) override;
bool NeedsProxySlot() const {
- return FLAG_vector_stores && scope() != NULL &&
+ return FLAG_vector_stores && class_variable_proxy() != nullptr &&
class_variable_proxy()->var()->IsUnallocated();
}
- FeedbackVectorICSlot ProxySlot() const { return slot_; }
+ FeedbackVectorSlot ProxySlot() const { return slot_; }
protected:
ClassLiteral(Zone* zone, const AstRawString* name, Scope* scope,
@@ -2663,9 +2708,7 @@ class ClassLiteral final : public Expression {
extends_(extends),
constructor_(constructor),
properties_(properties),
- end_position_(end_position),
- slot_(FeedbackVectorICSlot::Invalid()) {
- }
+ end_position_(end_position) {}
static int parent_num_ids() { return Expression::num_ids(); }
@@ -2679,7 +2722,7 @@ class ClassLiteral final : public Expression {
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
int end_position_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
};
@@ -2816,8 +2859,7 @@ class RegExpDisjunction final : public RegExpTree {
public:
explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpDisjunction* AsDisjunction() override;
Interval CaptureRegisters() override;
bool IsDisjunction() override;
@@ -2840,8 +2882,7 @@ class RegExpAlternative final : public RegExpTree {
public:
explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpAlternative* AsAlternative() override;
Interval CaptureRegisters() override;
bool IsAlternative() override;
@@ -2869,8 +2910,7 @@ class RegExpAssertion final : public RegExpTree {
};
explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpAssertion* AsAssertion() override;
bool IsAssertion() override;
bool IsAnchoredAtStart() override;
@@ -2915,8 +2955,7 @@ class RegExpCharacterClass final : public RegExpTree {
: set_(type),
is_negated_(false) { }
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpCharacterClass* AsCharacterClass() override;
bool IsCharacterClass() override;
bool IsTextElement() override { return true; }
@@ -2952,8 +2991,7 @@ class RegExpAtom final : public RegExpTree {
public:
explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpAtom* AsAtom() override;
bool IsAtom() override;
bool IsTextElement() override { return true; }
@@ -2971,8 +3009,7 @@ class RegExpText final : public RegExpTree {
public:
explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpText* AsText() override;
bool IsText() override;
bool IsTextElement() override { return true; }
@@ -3006,8 +3043,7 @@ class RegExpQuantifier final : public RegExpTree {
}
}
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
static RegExpNode* ToNode(int min,
int max,
bool is_greedy,
@@ -3042,8 +3078,7 @@ class RegExpCapture final : public RegExpTree {
explicit RegExpCapture(RegExpTree* body, int index)
: body_(body), index_(index) { }
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
static RegExpNode* ToNode(RegExpTree* body,
int index,
RegExpCompiler* compiler,
@@ -3078,8 +3113,7 @@ class RegExpLookahead final : public RegExpTree {
capture_from_(capture_from) { }
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpLookahead* AsLookahead() override;
Interval CaptureRegisters() override;
bool IsLookahead() override;
@@ -3104,8 +3138,7 @@ class RegExpBackReference final : public RegExpTree {
explicit RegExpBackReference(RegExpCapture* capture)
: capture_(capture) { }
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpBackReference* AsBackReference() override;
bool IsBackReference() override;
int min_match() override { return 0; }
@@ -3121,8 +3154,7 @@ class RegExpEmpty final : public RegExpTree {
public:
RegExpEmpty() { }
void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpEmpty* AsEmpty() override;
bool IsEmpty() override;
int min_match() override { return 0; }
@@ -3167,23 +3199,25 @@ class AstVisitor BASE_EMBEDDED {
\
bool CheckStackOverflow() { \
if (stack_overflow_) return true; \
- StackLimitCheck check(isolate_); \
- if (!check.HasOverflowed()) return false; \
- stack_overflow_ = true; \
- return true; \
+ if (GetCurrentStackPosition() < stack_limit_) { \
+ stack_overflow_ = true; \
+ return true; \
+ } \
+ return false; \
} \
\
private: \
- void InitializeAstVisitor(Isolate* isolate, Zone* zone) { \
- isolate_ = isolate; \
- zone_ = zone; \
+ void InitializeAstVisitor(Isolate* isolate) { \
+ stack_limit_ = isolate->stack_guard()->real_climit(); \
+ stack_overflow_ = false; \
+ } \
+ \
+ void InitializeAstVisitor(uintptr_t stack_limit) { \
+ stack_limit_ = stack_limit; \
stack_overflow_ = false; \
} \
- Zone* zone() { return zone_; } \
- Isolate* isolate() { return isolate_; } \
\
- Isolate* isolate_; \
- Zone* zone_; \
+ uintptr_t stack_limit_; \
bool stack_overflow_
@@ -3197,6 +3231,8 @@ class AstNodeFactory final BASE_EMBEDDED {
parser_zone_(ast_value_factory->zone()),
ast_value_factory_(ast_value_factory) {}
+ AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
+
VariableDeclaration* NewVariableDeclaration(
VariableProxy* proxy, VariableMode mode, Scope* scope, int pos,
bool is_class_declaration = false, int declaration_group_start = -1) {
@@ -3567,6 +3603,11 @@ class AstNodeFactory final BASE_EMBEDDED {
NativeFunctionLiteral(parser_zone_, name, extension, pos);
}
+ DoExpression* NewDoExpression(Block* block, Variable* result_var, int pos) {
+ VariableProxy* result = NewVariableProxy(result_var, pos);
+ return new (parser_zone_) DoExpression(parser_zone_, block, result, pos);
+ }
+
ThisFunction* NewThisFunction(int pos) {
return new (local_zone_) ThisFunction(local_zone_, pos);
}
@@ -3622,6 +3663,7 @@ class AstNodeFactory final BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_AST_H_
diff --git a/deps/v8/src/background-parsing-task.h b/deps/v8/src/background-parsing-task.h
index e99916169c..c224303e05 100644
--- a/deps/v8/src/background-parsing-task.h
+++ b/deps/v8/src/background-parsing-task.h
@@ -54,7 +54,7 @@ class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
StreamedSource* source_; // Not owned.
int stack_size_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BACKGROUND_PARSING_TASK_H_
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index b63c5fbfba..c09e429cd8 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -29,7 +29,6 @@ namespace internal {
"Assignment to parameter, function uses arguments object") \
V(kAssignmentToParameterInArgumentsObject, \
"Assignment to parameter in arguments object") \
- V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache") \
V(kBadValueContextForArgumentsObjectValue, \
"Bad value context for arguments object value") \
V(kBadValueContextForArgumentsValue, \
@@ -58,6 +57,7 @@ namespace internal {
V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
V(kDontDeleteCellsCannotContainTheHole, \
"DontDelete cells can't contain the hole") \
+ V(kDoExpression, "Do expression encountered") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
@@ -105,14 +105,6 @@ namespace internal {
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
V(kInputStringTooLong, "Input string too long") \
- V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
- "InstanceofStub unexpected call site cache (check)") \
- V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
- "InstanceofStub unexpected call site cache (cmp 1)") \
- V(kInstanceofStubUnexpectedCallSiteCacheCmp2, \
- "InstanceofStub unexpected call site cache (cmp 2)") \
- V(kInstanceofStubUnexpectedCallSiteCacheMov, \
- "InstanceofStub unexpected call site cache (mov)") \
V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
"Integer32ToSmiField writing to non-smi location") \
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
@@ -183,7 +175,8 @@ namespace internal {
V(kRegisterWasClobbered, "Register was clobbered") \
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kScriptContext, "Allocation of script context") \
+ V(kSloppyFunctionExpectsJSReceiverReceiver, \
+ "Sloppy function expects JSReceiver as receiver.") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kSpread, "Spread in array literal") \
@@ -192,20 +185,6 @@ namespace internal {
V(kSuperReference, "Super reference") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
- V(kTheInstructionShouldBeALis, "The instruction should be a lis") \
- V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
- V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
- V(kTheInstructionShouldBeAnOris, "The instruction should be an oris") \
- V(kTheInstructionShouldBeALi, "The instruction should be a li") \
- V(kTheInstructionShouldBeASldi, "The instruction should be a sldi") \
- V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
- "The instruction to patch should be a ldr literal") \
- V(kTheInstructionToPatchShouldBeALis, \
- "The instruction to patch should be a lis") \
- V(kTheInstructionToPatchShouldBeALui, \
- "The instruction to patch should be a lui") \
- V(kTheInstructionToPatchShouldBeAnOri, \
- "The instruction to patch should be an ori") \
V(kTheSourceAndDestinationAreTheSame, \
"The source and destination are the same") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
@@ -220,7 +199,6 @@ namespace internal {
"ToOperand Unsupported double immediate") \
V(kTryCatchStatement, "TryCatchStatement") \
V(kTryFinallyStatement, "TryFinallyStatement") \
- V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
@@ -254,15 +232,11 @@ namespace internal {
"Unexpected number of pre-allocated property fields") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
V(kUnexpectedSmi, "Unexpected smi value") \
- V(kUnexpectedStringFunction, "Unexpected String function") \
+ V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
V(kUnexpectedStringType, "Unexpected string type") \
- V(kUnexpectedStringWrapperInstanceSize, \
- "Unexpected string wrapper instance size") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
V(kUnexpectedValue, "Unexpected value") \
- V(kUnexpectedUnusedPropertiesOfStringWrapper, \
- "Unexpected unused properties of string wrapper") \
V(kUnsupportedConstCompoundAssignment, \
"Unsupported const compound assignment") \
V(kUnsupportedCountOperationWithConst, \
@@ -275,6 +249,7 @@ namespace internal {
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const variable") \
+ V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
V(kVariableResolvedToWithContext, "Variable resolved to with context") \
V(kWeShouldNotHaveAnEmptyLexicalContext, \
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
index dceb413339..097b914399 100644
--- a/deps/v8/src/base.isolate
+++ b/deps/v8/src/base.isolate
@@ -2,8 +2,19 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
+ 'includes': [
+ '../third_party/icu/icu.isolate',
+ '../build/config/win/msvs_dependencies.isolate',
+ ],
'conditions': [
- ['v8_use_external_startup_data==1', {
+ ['use_custom_libcxx==1', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/lib/libc++.so',
+ ],
+ },
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
'variables': {
'files': [
'<(PRODUCT_DIR)/natives_blob.bin',
@@ -11,5 +22,38 @@
],
},
}],
+ ['OS=="linux" and component=="shared_library" and target_arch=="ia32"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/lib/',
+ ],
+ },
+ }],
+ ['tsan==1', {
+ 'variables': {
+ 'files': [
+ '../tools/sanitizers/tsan_suppressions.txt',
+ ],
+ },
+ }],
+ ['OS=="linux" and (asan==1 or cfi_vptr==1 or msan==1 or tsan==1)', {
+ 'variables': {
+ 'files': [
+ # For llvm-symbolizer.
+ '../third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6',
+ ],
+ },
+ }],
+ ['asan==1 or cfi_vptr==1 or msan==1 or tsan==1', {
+ 'variables': {
+ 'files': [
+ '../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ }],
+ # Workaround for https://code.google.com/p/swarming/issues/detail?id=211
+ ['asan==0 or cfi_vptr==0 or msan==0 or tsan==0', {
+ 'variables': {},
+ }],
],
} \ No newline at end of file
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index e76b3d02d2..3e628fead9 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -133,7 +133,8 @@ Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
// Include our platform specific implementation.
#if defined(THREAD_SANITIZER)
diff --git a/deps/v8/src/base/atomicops_internals_arm64_gcc.h b/deps/v8/src/base/atomicops_internals_arm64_gcc.h
index b01783e6a7..f24050a3e6 100644
--- a/deps/v8/src/base/atomicops_internals_arm64_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_arm64_gcc.h
@@ -311,6 +311,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
return *ptr;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_arm_gcc.h b/deps/v8/src/base/atomicops_internals_arm_gcc.h
index e399657e13..6c8b27ea24 100644
--- a/deps/v8/src/base/atomicops_internals_arm_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_arm_gcc.h
@@ -59,7 +59,8 @@ inline void MemoryBarrier() {
// variant of the target architecture is being used. This tests against
// any known ARMv6 or ARMv7 variant, where it is possible to directly
// use ldrex/strex instructions to implement fast atomic operations.
-#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+#if defined(__ARM_ARCH_8A__) || \
+ defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
@@ -296,6 +297,7 @@ inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_atomicword_compat.h b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
index 0530ced2a4..4f758a7299 100644
--- a/deps/v8/src/base/atomicops_internals_atomicword_compat.h
+++ b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
@@ -92,7 +92,8 @@ inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
reinterpret_cast<volatile const Atomic32*>(ptr));
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // !defined(V8_HOST_ARCH_64_BIT)
diff --git a/deps/v8/src/base/atomicops_internals_mac.h b/deps/v8/src/base/atomicops_internals_mac.h
index 84f9dbcd75..c112506238 100644
--- a/deps/v8/src/base/atomicops_internals_mac.h
+++ b/deps/v8/src/base/atomicops_internals_mac.h
@@ -210,6 +210,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
#endif // defined(__LP64__)
#undef ATOMICOPS_COMPILER_BARRIER
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/deps/v8/src/base/atomicops_internals_mips64_gcc.h b/deps/v8/src/base/atomicops_internals_mips64_gcc.h
index ccb973c039..85b4e462b9 100644
--- a/deps/v8/src/base/atomicops_internals_mips64_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_mips64_gcc.h
@@ -302,6 +302,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
return *ptr;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_mips_gcc.h b/deps/v8/src/base/atomicops_internals_mips_gcc.h
index 442fdd0f96..8d65db2127 100644
--- a/deps/v8/src/base/atomicops_internals_mips_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_mips_gcc.h
@@ -155,6 +155,7 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
index a3a6e74c72..bb99973786 100644
--- a/deps/v8/src/base/atomicops_internals_portable.h
+++ b/deps/v8/src/base/atomicops_internals_portable.h
@@ -132,7 +132,7 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
return __sync_add_and_fetch(ptr, 0);
}
-}
-} // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/deps/v8/src/base/atomicops_internals_ppc_gcc.h b/deps/v8/src/base/atomicops_internals_ppc_gcc.h
index daa27b4693..0d16500d1b 100644
--- a/deps/v8/src/base/atomicops_internals_ppc_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_ppc_gcc.h
@@ -162,7 +162,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
}
#endif
-}
-} // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_x86_gcc.cc b/deps/v8/src/base/atomicops_internals_x86_gcc.cc
index ab7dd8d091..c0310300a1 100644
--- a/deps/v8/src/base/atomicops_internals_x86_gcc.cc
+++ b/deps/v8/src/base/atomicops_internals_x86_gcc.cc
@@ -47,7 +47,8 @@ struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
#endif
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
namespace {
diff --git a/deps/v8/src/base/atomicops_internals_x86_gcc.h b/deps/v8/src/base/atomicops_internals_x86_gcc.h
index ec87c42121..55bc44cd8b 100644
--- a/deps/v8/src/base/atomicops_internals_x86_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_x86_gcc.h
@@ -267,7 +267,8 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
#endif // defined(__x86_64__)
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#undef ATOMICOPS_COMPILER_BARRIER
diff --git a/deps/v8/src/base/atomicops_internals_x86_msvc.h b/deps/v8/src/base/atomicops_internals_x86_msvc.h
index adc40318e9..c37bc78df6 100644
--- a/deps/v8/src/base/atomicops_internals_x86_msvc.h
+++ b/deps/v8/src/base/atomicops_internals_x86_msvc.h
@@ -197,6 +197,7 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
#endif // defined(_WIN64)
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 8016218e5c..5f66d21fcf 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -68,6 +68,10 @@
# endif
#endif
+#if defined(__ARM_ARCH_8A__)
+# define CAN_USE_ARMV8_INSTRUCTIONS 1
+#endif
+
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 4f587201fa..692494afcb 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -691,4 +691,5 @@ CPU::CPU()
#endif // V8_HOST_ARCH_PPC
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index 1dc0a91f65..ca108fa2bf 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -145,6 +145,7 @@ class CPU final {
bool is_fp64_mode_;
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_CPU_H_
diff --git a/deps/v8/src/base/lazy-instance.h b/deps/v8/src/base/lazy-instance.h
index a20689a16c..4c0a3f897b 100644
--- a/deps/v8/src/base/lazy-instance.h
+++ b/deps/v8/src/base/lazy-instance.h
@@ -232,6 +232,7 @@ struct LazyDynamicInstance {
CreateTrait, InitOnceTrait, DestroyTrait> type;
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_LAZY_INSTANCE_H_
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 511ebf1e9c..e4e3f49bfa 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -11,7 +11,8 @@
#include "src/base/build_config.h"
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+extern "C" V8_NORETURN void V8_Fatal(const char* file, int line,
+ const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -28,7 +29,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
V8_Fatal("", 0, "%s", (msg))
#define UNIMPLEMENTED() \
V8_Fatal("", 0, "unimplemented code")
-#define UNREACHABLE() ((void) 0)
+#define UNREACHABLE() V8_Fatal("", 0, "unreachable code")
#endif
@@ -153,6 +154,7 @@ void DumpBacktrace();
#define DCHECK(condition) CHECK(condition)
#define DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2)
#define DCHECK_NE(v1, v2) CHECK_NE(v1, v2)
+#define DCHECK_GT(v1, v2) CHECK_GT(v1, v2)
#define DCHECK_GE(v1, v2) CHECK_GE(v1, v2)
#define DCHECK_LT(v1, v2) CHECK_LT(v1, v2)
#define DCHECK_LE(v1, v2) CHECK_LE(v1, v2)
@@ -163,6 +165,7 @@ void DumpBacktrace();
#define DCHECK(condition) ((void) 0)
#define DCHECK_EQ(v1, v2) ((void) 0)
#define DCHECK_NE(v1, v2) ((void) 0)
+#define DCHECK_GT(v1, v2) ((void) 0)
#define DCHECK_GE(v1, v2) ((void) 0)
#define DCHECK_LT(v1, v2) ((void) 0)
#define DCHECK_LE(v1, v2) ((void) 0)
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 6dc96f4f3b..f47b0b9d55 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -417,7 +417,8 @@ template <>
inline bool is_fundamental<uint8_t>() {
return true;
}
-}
-} // namespace v8::base
+
+} // namespace base
+} // namespace v8
#endif // V8_BASE_MACROS_H_
diff --git a/deps/v8/src/base/once.cc b/deps/v8/src/base/once.cc
index eaabf40d9a..818a9f2e84 100644
--- a/deps/v8/src/base/once.cc
+++ b/deps/v8/src/base/once.cc
@@ -50,4 +50,5 @@ void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
}
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h
index 6bf741d38a..790a8866e0 100644
--- a/deps/v8/src/base/once.h
+++ b/deps/v8/src/base/once.h
@@ -97,6 +97,7 @@ inline void CallOnce(OnceType* once,
}
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ONCE_H_
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index e5b9bd0810..fcd6cf7974 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -313,4 +313,5 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
#endif // V8_OS_POSIX
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index 9cb706460f..72d6f28507 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -113,6 +113,7 @@ typedef LazyStaticInstance<
#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
diff --git a/deps/v8/src/base/platform/elapsed-timer.h b/deps/v8/src/base/platform/elapsed-timer.h
index 3fe7e8f720..f9a9ef4361 100644
--- a/deps/v8/src/base/platform/elapsed-timer.h
+++ b/deps/v8/src/base/platform/elapsed-timer.h
@@ -92,6 +92,7 @@ class ElapsedTimer final {
#endif
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_ELAPSED_TIMER_H_
diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc
index 8b1e305701..14016058ae 100644
--- a/deps/v8/src/base/platform/mutex.cc
+++ b/deps/v8/src/base/platform/mutex.cc
@@ -188,4 +188,5 @@ bool RecursiveMutex::TryLock() {
return true;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 18e85de7bc..61df19d66a 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -210,6 +210,7 @@ class LockGuard final {
DISALLOW_COPY_AND_ASSIGN(LockGuard);
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_MUTEX_H_
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index 03e9aa3717..ea2824d8c3 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -239,5 +239,5 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
bool VirtualMemory::HasLazyCommits() { return true; }
-}
-} // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index 18f151ac29..a49e28723d 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -252,4 +252,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index b279e0c926..8b3398039f 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -255,4 +255,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 2a2abfeb25..a4b742adc7 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -388,4 +388,5 @@ bool VirtualMemory::HasLazyCommits() {
return true;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index f16f329fc3..419281f669 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -249,4 +249,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index 369dd8e1a6..af145e2fca 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -286,4 +286,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index b16652886e..3c90467627 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -312,4 +312,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index 7e07f1a1e2..a2ce2c13f6 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -208,4 +208,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index a73dc523c4..6afa6f9c37 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -751,9 +751,19 @@ void* OS::GetRandomMmapAddr() {
static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
LPVOID base = NULL;
+ static BOOL use_aslr = -1;
+#ifdef V8_HOST_ARCH_32_BIT
+ // Don't bother randomizing on 32-bit hosts, because they lack the room and
+ // don't have viable ASLR anyway.
+ if (use_aslr == -1 && !IsWow64Process(GetCurrentProcess(), &use_aslr))
+ use_aslr = FALSE;
+#else
+ use_aslr = TRUE;
+#endif
- if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
- // For exectutable pages try and randomize the allocation address
+ if (use_aslr &&
+ (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS)) {
+ // For executable pages try and randomize the allocation address
for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
}
@@ -823,6 +833,9 @@ void OS::Abort() {
}
// Make the MSVCRT do a silent abort.
raise(SIGABRT);
+
+ // Make sure function doesn't return.
+ abort();
}
@@ -1133,9 +1146,9 @@ static std::vector<OS::SharedLibraryAddress> LoadSymbols(
WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
lib_name_length, NULL, NULL);
result.push_back(OS::SharedLibraryAddress(
- lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
- reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
- module_entry.modBaseSize)));
+ lib_name, reinterpret_cast<uintptr_t>(module_entry.modBaseAddr),
+ reinterpret_cast<uintptr_t>(module_entry.modBaseAddr +
+ module_entry.modBaseSize)));
cont = _Module32NextW(snapshot, &module_entry);
}
CloseHandle(snapshot);
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 2d08ecbd7f..89d6225ede 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -194,7 +194,7 @@ class OS {
static void Sleep(TimeDelta interval);
// Abort the current process.
- static void Abort();
+ V8_NORETURN static void Abort();
// Debug break.
static void DebugBreak();
@@ -479,6 +479,7 @@ class Thread {
DISALLOW_COPY_AND_ASSIGN(Thread);
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_PLATFORM_H_
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 0679c00d95..9e7b59a1d2 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -75,6 +75,10 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
Semaphore::Semaphore(int count) {
DCHECK(count >= 0);
+#if V8_LIBC_GLIBC
+ // sem_init in glibc prior to 2.1 does not zero out semaphores.
+ memset(&native_handle_, 0, sizeof(native_handle_));
+#endif
int result = sem_init(&native_handle_, 0, count);
DCHECK_EQ(0, result);
USE(result);
@@ -201,4 +205,5 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
#endif // V8_OS_MACOSX
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index fa131018b3..18700d1ba0 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -96,6 +96,7 @@ struct LazySemaphore {
#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_SEMAPHORE_H_
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 5162182b7a..e847d54de8 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -644,4 +644,5 @@ bool TimeTicks::KernelTimestampAvailable() {
#endif // V8_OS_WIN
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 5fc01e88fd..29300e5404 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -398,6 +398,7 @@ inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
return ticks + delta;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_TIME_H_
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index 29a48ffb05..4cf06a9047 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -136,4 +136,5 @@ void RandomNumberGenerator::SetSeed(int64_t seed) {
seed_ = (seed ^ kMultiplier) & kMask;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index 62c6b27b1b..10f2789c7d 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -94,6 +94,7 @@ class RandomNumberGenerator final {
int64_t seed_;
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
diff --git a/deps/v8/src/bignum-dtoa.h b/deps/v8/src/bignum-dtoa.h
index d42801bd69..dab27badba 100644
--- a/deps/v8/src/bignum-dtoa.h
+++ b/deps/v8/src/bignum-dtoa.h
@@ -55,6 +55,7 @@ enum BignumDtoaMode {
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
Vector<char> buffer, int* length, int* point);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BIGNUM_DTOA_H_
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index 9baf77e7f2..e7c6747665 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -68,7 +68,9 @@ static uint64_t ReadUInt64(Vector<const char> buffer,
int from,
int digits_to_read) {
uint64_t result = 0;
- for (int i = from; i < from + digits_to_read; ++i) {
+ int to = from + digits_to_read;
+
+ for (int i = from; i < to; ++i) {
int digit = buffer[i] - '0';
DCHECK(0 <= digit && digit <= 9);
result = result * 10 + digit;
diff --git a/deps/v8/src/bignum.h b/deps/v8/src/bignum.h
index 7ebdae47bc..167c1842da 100644
--- a/deps/v8/src/bignum.h
+++ b/deps/v8/src/bignum.h
@@ -115,6 +115,7 @@ class Bignum {
DISALLOW_COPY_AND_ASSIGN(Bignum);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BIGNUM_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index a8a5f97680..c609e578d4 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -6,7 +6,6 @@
#include "src/accessors.h"
#include "src/api-natives.h"
-#include "src/base/utils/random-number-generator.h"
#include "src/code-stubs.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
@@ -164,6 +163,7 @@ class Genesis BASE_EMBEDDED {
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
void CreateStrongModeFunctionMaps(Handle<JSFunction> empty);
+ void CreateIteratorMaps();
// Make the "arguments" and "caller" properties throw a TypeError on access.
void AddRestrictedFunctionProperties(Handle<Map> map);
@@ -174,18 +174,18 @@ class Genesis BASE_EMBEDDED {
// but in the latter case we don't use the objects it produces directly, as
// we have to used the deserialized ones that are linked together with the
// rest of the context snapshot.
- Handle<GlobalObject> CreateNewGlobals(
+ Handle<JSGlobalObject> CreateNewGlobals(
v8::Local<v8::ObjectTemplate> global_proxy_template,
Handle<JSGlobalProxy> global_proxy);
// Hooks the given global proxy into the context. If the context was created
// by deserialization then this will unhook the global proxy that was
// deserialized, leaving the GC to pick it up.
- void HookUpGlobalProxy(Handle<GlobalObject> global_object,
+ void HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
Handle<JSGlobalProxy> global_proxy);
// Similarly, we want to use the global that has been created by the templates
// passed through the API. The global from the snapshot is detached from the
// other objects in the snapshot.
- void HookUpGlobalObject(Handle<GlobalObject> global_object,
+ void HookUpGlobalObject(Handle<JSGlobalObject> global_object,
Handle<FixedArray> outdated_contexts);
// The native context has a ScriptContextTable that store declarative bindings
// made in script scopes. Add a "this" binding to that table pointing to the
@@ -193,12 +193,12 @@ class Genesis BASE_EMBEDDED {
void InstallGlobalThisBinding();
void HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts);
// New context initialization. Used for creating a context from scratch.
- void InitializeGlobal(Handle<GlobalObject> global_object,
+ void InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> empty_function,
ContextType context_type);
void InitializeExperimentalGlobal();
- // Typed arrays are not serializable and have to initialized afterwards.
- void InitializeBuiltinTypedArrays();
+ // Depending on the situation, expose and/or get rid of the utils object.
+ void ConfigureUtilsObject(ContextType context_type);
#define DECLARE_FEATURE_INITIALIZATION(id, descr) \
void InitializeGlobal_##id();
@@ -370,6 +370,9 @@ static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
void Bootstrapper::DetachGlobal(Handle<Context> env) {
+ env->GetIsolate()->counters()->errors_thrown_per_context()->AddSample(
+ env->GetErrorsThrown());
+
Factory* factory = env->GetIsolate()->factory();
Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
global_proxy->set_native_context(*factory->null_value());
@@ -418,13 +421,7 @@ Handle<JSFunction> InstallFunction(Handle<JSObject> target, const char* name,
Builtins::Name call,
bool strict_function_map = false) {
Factory* const factory = target->GetIsolate()->factory();
- PropertyAttributes attributes;
- if (target->IsJSBuiltinsObject()) {
- attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- } else {
- attributes = DONT_ENUM;
- }
+ PropertyAttributes attributes = DONT_ENUM;
return InstallFunction(target, factory->InternalizeUtf8String(name), type,
instance_size, maybe_prototype, call, attributes,
strict_function_map);
@@ -698,7 +695,8 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY))
.Assert();
- JSObject::PreventExtensions(function).Assert();
+ if (JSObject::PreventExtensions(function, Object::THROW_ON_ERROR).IsNothing())
+ DCHECK(false);
return function;
}
@@ -793,6 +791,57 @@ void Genesis::CreateStrongModeFunctionMaps(Handle<JSFunction> empty) {
}
+void Genesis::CreateIteratorMaps() {
+ // Create iterator-related meta-objects.
+ Handle<JSObject> iterator_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Handle<JSObject> generator_object_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Handle<JSObject> generator_function_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ SetObjectPrototype(generator_object_prototype, iterator_prototype);
+
+ JSObject::AddProperty(generator_function_prototype,
+ factory()->InternalizeUtf8String("prototype"),
+ generator_object_prototype,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Create maps for generator functions and their prototypes. Store those
+ // maps in the native context. The "prototype" property descriptor is
+ // writable, non-enumerable, and non-configurable (as per ES6 draft
+ // 04-14-15, section 25.2.4.3).
+ Handle<Map> strict_function_map(strict_function_map_writable_prototype_);
+ // Generator functions do not have "caller" or "arguments" accessors.
+ Handle<Map> sloppy_generator_function_map =
+ Map::Copy(strict_function_map, "SloppyGeneratorFunction");
+ Map::SetPrototype(sloppy_generator_function_map,
+ generator_function_prototype);
+ native_context()->set_sloppy_generator_function_map(
+ *sloppy_generator_function_map);
+
+ Handle<Map> strict_generator_function_map =
+ Map::Copy(strict_function_map, "StrictGeneratorFunction");
+ Map::SetPrototype(strict_generator_function_map,
+ generator_function_prototype);
+ native_context()->set_strict_generator_function_map(
+ *strict_generator_function_map);
+
+ Handle<Map> strong_function_map(native_context()->strong_function_map());
+ Handle<Map> strong_generator_function_map =
+ Map::Copy(strong_function_map, "StrongGeneratorFunction");
+ Map::SetPrototype(strong_generator_function_map,
+ generator_function_prototype);
+ native_context()->set_strong_generator_function_map(
+ *strong_generator_function_map);
+
+ Handle<JSFunction> object_function(native_context()->object_function());
+ Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
+ Map::SetPrototype(generator_object_prototype_map, generator_object_prototype);
+ native_context()->set_generator_object_prototype_map(
+ *generator_object_prototype_map);
+}
+
+
static void ReplaceAccessors(Handle<Map> map,
Handle<String> name,
PropertyAttributes attributes,
@@ -888,7 +937,7 @@ void Genesis::HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts) {
}
-Handle<GlobalObject> Genesis::CreateNewGlobals(
+Handle<JSGlobalObject> Genesis::CreateNewGlobals(
v8::Local<v8::ObjectTemplate> global_proxy_template,
Handle<JSGlobalProxy> global_proxy) {
// The argument global_proxy_template aka data is an ObjectTemplateInfo.
@@ -948,8 +997,8 @@ Handle<GlobalObject> Genesis::CreateNewGlobals(
js_global_object_function->initial_map()->set_is_prototype_map(true);
js_global_object_function->initial_map()->set_is_hidden_prototype();
js_global_object_function->initial_map()->set_dictionary_map(true);
- Handle<GlobalObject> global_object =
- factory()->NewGlobalObject(js_global_object_function);
+ Handle<JSGlobalObject> global_object =
+ factory()->NewJSGlobalObject(js_global_object_function);
// Step 2: (re)initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
@@ -981,7 +1030,7 @@ Handle<GlobalObject> Genesis::CreateNewGlobals(
}
-void Genesis::HookUpGlobalProxy(Handle<GlobalObject> global_object,
+void Genesis::HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
Handle<JSGlobalProxy> global_proxy) {
// Set the native context for the global object.
global_object->set_native_context(*native_context());
@@ -995,11 +1044,10 @@ void Genesis::HookUpGlobalProxy(Handle<GlobalObject> global_object,
}
-void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object,
+void Genesis::HookUpGlobalObject(Handle<JSGlobalObject> global_object,
Handle<FixedArray> outdated_contexts) {
- Handle<GlobalObject> global_object_from_snapshot(
- GlobalObject::cast(native_context()->extension()));
- Handle<JSBuiltinsObject> builtins_global(native_context()->builtins());
+ Handle<JSGlobalObject> global_object_from_snapshot(
+ JSGlobalObject::cast(native_context()->extension()));
native_context()->set_extension(*global_object);
native_context()->set_security_token(*global_object);
@@ -1012,13 +1060,6 @@ void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object,
context->set_global_object(*global_object);
}
- static const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- JSObject::SetOwnPropertyIgnoreAttributes(builtins_global,
- factory()->global_string(),
- global_object, attributes).Assert();
- // Set up the reference from the global object to the builtins object.
- JSGlobalObject::cast(*global_object)->set_builtins(*builtins_global);
TransferNamedProperties(global_object_from_snapshot, global_object);
TransferIndexedProperties(global_object_from_snapshot, global_object);
}
@@ -1026,7 +1067,7 @@ void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object,
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpGlobalObject.
-void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
+void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> empty_function,
ContextType context_type) {
// --- N a t i v e C o n t e x t ---
@@ -1043,7 +1084,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Isolate* isolate = global_object->GetIsolate();
Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
Handle<ScriptContextTable> script_context_table =
factory->NewScriptContextTable();
@@ -1061,6 +1101,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
empty_function, Builtins::kIllegal);
function_function->initial_map()->set_is_callable();
+ function_function->initial_map()->set_is_constructor(true);
{ // --- A r r a y ---
Handle<JSFunction> array_function =
@@ -1185,74 +1226,21 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
DCHECK_EQ(0, initial_map->GetInObjectProperties());
- PropertyAttributes final =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Map::EnsureDescriptorSlack(initial_map, 5);
+ Map::EnsureDescriptorSlack(initial_map, 1);
- {
- // ECMA-262, section 15.10.7.1.
- DataDescriptor field(factory->source_string(),
- JSRegExp::kSourceFieldIndex, final,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
- {
- // ECMA-262, section 15.10.7.2.
- DataDescriptor field(factory->global_string(),
- JSRegExp::kGlobalFieldIndex, final,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
- {
- // ECMA-262, section 15.10.7.3.
- DataDescriptor field(factory->ignore_case_string(),
- JSRegExp::kIgnoreCaseFieldIndex, final,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
- {
- // ECMA-262, section 15.10.7.4.
- DataDescriptor field(factory->multiline_string(),
- JSRegExp::kMultilineFieldIndex, final,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
- {
- // ECMA-262, section 15.10.7.5.
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- DataDescriptor field(factory->last_index_string(),
- JSRegExp::kLastIndexFieldIndex, writable,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
+ // ECMA-262, section 15.10.7.5.
+ PropertyAttributes writable =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ DataDescriptor field(factory->last_index_string(),
+ JSRegExp::kLastIndexFieldIndex, writable,
+ Representation::Tagged());
+ initial_map->AppendDescriptor(&field);
static const int num_fields = JSRegExp::kInObjectFieldCount;
initial_map->SetInObjectProperties(num_fields);
initial_map->set_unused_property_fields(0);
initial_map->set_instance_size(initial_map->instance_size() +
num_fields * kPointerSize);
-
- // RegExp prototype object is itself a RegExp.
- Handle<Map> proto_map = Map::Copy(initial_map, "RegExpPrototype");
- DCHECK(proto_map->prototype() == *isolate->initial_object_prototype());
- Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
- proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
- heap->query_colon_string());
- proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0),
- SKIP_WRITE_BARRIER); // It's a Smi.
- proto_map->set_is_prototype_map(true);
- Map::SetPrototype(initial_map, proto);
- factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
- JSRegExp::IRREGEXP, factory->empty_string(),
- JSRegExp::Flags(0), 0);
}
// Initialize the embedder data slot.
@@ -1266,7 +1254,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Handle<JSFunction> cons = factory->NewFunction(name);
JSFunction::SetInstancePrototype(cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
- cons->SetInstanceClassName(*name);
+ cons->shared()->set_instance_class_name(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
DCHECK(json_object->IsJSObject());
JSObject::AddProperty(global, name, json_object, DONT_ENUM);
@@ -1278,7 +1266,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
JSFunction::SetInstancePrototype(
cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
- cons->SetInstanceClassName(*name);
+ cons->shared()->set_instance_class_name(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
DCHECK(json_object->IsJSObject());
JSObject::AddProperty(global, name, json_object, DONT_ENUM);
@@ -1542,9 +1530,8 @@ bool Bootstrapper::CompileBuiltin(Isolate* isolate, int index) {
Handle<Object> extras_utils = isolate->extras_utils_object();
Handle<Object> args[] = {global, utils, extras_utils};
- return Bootstrapper::CompileNative(
- isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
- source_code, arraysize(args), args);
+ return Bootstrapper::CompileNative(isolate, name, source_code,
+ arraysize(args), args);
}
@@ -1556,9 +1543,8 @@ bool Bootstrapper::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Handle<Object> global = isolate->global_object();
Handle<Object> utils = isolate->natives_utils_object();
Handle<Object> args[] = {global, utils};
- return Bootstrapper::CompileNative(
- isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
- source_code, arraysize(args), args);
+ return Bootstrapper::CompileNative(isolate, name, source_code,
+ arraysize(args), args);
}
@@ -1571,9 +1557,8 @@ bool Bootstrapper::CompileExtraBuiltin(Isolate* isolate, int index) {
Handle<Object> binding = isolate->extras_binding_object();
Handle<Object> extras_utils = isolate->extras_utils_object();
Handle<Object> args[] = {global, binding, extras_utils};
- return Bootstrapper::CompileNative(
- isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
- source_code, arraysize(args), args);
+ return Bootstrapper::CompileNative(isolate, name, source_code,
+ arraysize(args), args);
}
@@ -1587,9 +1572,8 @@ bool Bootstrapper::CompileExperimentalExtraBuiltin(Isolate* isolate,
Handle<Object> binding = isolate->extras_binding_object();
Handle<Object> extras_utils = isolate->extras_utils_object();
Handle<Object> args[] = {global, binding, extras_utils};
- return Bootstrapper::CompileNative(
- isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
- source_code, arraysize(args), args);
+ return Bootstrapper::CompileNative(isolate, name, source_code,
+ arraysize(args), args);
}
@@ -1602,13 +1586,12 @@ bool Bootstrapper::CompileCodeStubBuiltin(Isolate* isolate, int index) {
Handle<JSObject> exports(isolate->heap()->code_stub_exports_object());
Handle<Object> args[] = {global, exports};
bool result =
- CompileNative(isolate, name, global, source_code, arraysize(args), args);
+ CompileNative(isolate, name, source_code, arraysize(args), args);
return result;
}
bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
- Handle<JSObject> receiver,
Handle<String> source, int argc,
Handle<Object> argv[]) {
SuppressDebug compiling_natives(isolate->debug());
@@ -1637,6 +1620,7 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(function_info,
runtime_context);
+ Handle<Object> receiver = isolate->factory()->undefined_value();
// For non-extension scripts, run script to get the function wrapper.
Handle<Object> wrapper;
@@ -1707,7 +1691,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
const char* holder_expr) {
Isolate* isolate = native_context->GetIsolate();
Factory* factory = isolate->factory();
- Handle<GlobalObject> global(native_context->global_object());
+ Handle<JSGlobalObject> global(native_context->global_object());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
return Handle<JSObject>::cast(
@@ -1736,81 +1720,75 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
template <typename Data>
-Data* SetBuiltinTypedArray(Isolate* isolate, Handle<JSBuiltinsObject> builtins,
- ExternalArrayType type, Data* data,
- size_t num_elements, const char* name,
- const SharedFlag shared = SharedFlag::kNotShared,
- const PretenureFlag pretenure = TENURED) {
- size_t byte_length = num_elements * sizeof(*data);
+Handle<JSTypedArray> CreateTypedArray(Isolate* isolate, ExternalArrayType type,
+ size_t num_elements, Data** data) {
+ size_t byte_length = num_elements * sizeof(**data);
Handle<JSArrayBuffer> buffer =
- isolate->factory()->NewJSArrayBuffer(shared, pretenure);
- bool is_external = data != nullptr;
+ isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+ bool is_external = (*data != nullptr);
if (!is_external) {
- data = reinterpret_cast<Data*>(
+ *data = reinterpret_cast<Data*>(
isolate->array_buffer_allocator()->Allocate(byte_length));
}
- JSArrayBuffer::Setup(buffer, isolate, is_external, data, byte_length, shared);
-
- Handle<JSTypedArray> typed_array = isolate->factory()->NewJSTypedArray(
- type, buffer, 0, num_elements, pretenure);
- Handle<String> name_string = isolate->factory()->InternalizeUtf8String(name);
- // Reset property cell type before (re)initializing.
- JSBuiltinsObject::InvalidatePropertyCell(builtins, name_string);
- JSObject::SetOwnPropertyIgnoreAttributes(builtins, name_string, typed_array,
- FROZEN)
- .Assert();
- return data;
-}
-
-
-void Genesis::InitializeBuiltinTypedArrays() {
- Handle<JSBuiltinsObject> builtins(native_context()->builtins());
- { // Initially seed the per-context random number generator using the
- // per-isolate random number generator.
- const size_t num_elements = 2;
- const size_t num_bytes = num_elements * sizeof(uint32_t);
- uint32_t* state = SetBuiltinTypedArray<uint32_t>(isolate(), builtins,
- kExternalUint32Array, NULL,
- num_elements, "rngstate");
- do {
- isolate()->random_number_generator()->NextBytes(state, num_bytes);
- } while (state[0] == 0 || state[1] == 0);
+ JSArrayBuffer::Setup(buffer, isolate, is_external, *data, byte_length,
+ SharedFlag::kNotShared);
+ return isolate->factory()->NewJSTypedArray(type, buffer, 0, num_elements,
+ TENURED);
+}
+
+
+void Genesis::ConfigureUtilsObject(ContextType context_type) {
+ switch (context_type) {
+ // We still need the utils object to find debug functions.
+ case DEBUG_CONTEXT:
+ return;
+ // Expose the natives in global if a valid name for it is specified.
+ case FULL_CONTEXT: {
+ // We still need the utils object after deserialization.
+ if (isolate()->serializer_enabled()) return;
+ if (FLAG_expose_natives_as == NULL) break;
+ if (strlen(FLAG_expose_natives_as) == 0) break;
+ HandleScope scope(isolate());
+ Handle<String> natives_key =
+ factory()->InternalizeUtf8String(FLAG_expose_natives_as);
+ uint32_t dummy_index;
+ if (natives_key->AsArrayIndex(&dummy_index)) break;
+ Handle<Object> utils = isolate()->natives_utils_object();
+ Handle<JSObject> global = isolate()->global_object();
+ JSObject::AddProperty(global, natives_key, utils, DONT_ENUM);
+ break;
+ }
+ case THIN_CONTEXT:
+ break;
}
- { // Initialize trigonometric lookup tables and constants.
- const size_t num_elements = arraysize(fdlibm::MathConstants::constants);
- double* data = const_cast<double*>(fdlibm::MathConstants::constants);
- SetBuiltinTypedArray<double>(isolate(), builtins, kExternalFloat64Array,
- data, num_elements, "kMath");
- }
+ // The utils object can be removed for cases that reach this point.
+ native_context()->set_natives_utils_object(heap()->undefined_value());
- { // Initialize a result array for rempio2 calculation
- const size_t num_elements = 2;
- double* data =
- SetBuiltinTypedArray<double>(isolate(), builtins, kExternalFloat64Array,
- NULL, num_elements, "rempio2result");
- for (size_t i = 0; i < num_elements; i++) data[i] = 0;
- }
+#ifdef DEBUG
+ JSGlobalObject* dummy = native_context()->runtime_context()->global_object();
+ DCHECK_EQ(0, dummy->elements()->length());
+ DCHECK_EQ(0, GlobalDictionary::cast(dummy->properties())->NumberOfElements());
+#endif
}
void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSObject> container) {
+ Factory* factory = isolate->factory();
HandleScope scope(isolate);
-#define EXPORT_PRIVATE_SYMBOL(NAME) \
- Handle<String> NAME##_name = \
- isolate->factory()->NewStringFromAsciiChecked(#NAME); \
- JSObject::AddProperty(container, NAME##_name, isolate->factory()->NAME(), \
- NONE);
+ Handle<Context> native_context = isolate->native_context();
+#define EXPORT_PRIVATE_SYMBOL(NAME) \
+ Handle<String> NAME##_name = factory->NewStringFromAsciiChecked(#NAME); \
+ JSObject::AddProperty(container, NAME##_name, factory->NAME(), NONE);
PRIVATE_SYMBOL_LIST(EXPORT_PRIVATE_SYMBOL)
#undef EXPORT_PRIVATE_SYMBOL
-#define EXPORT_PUBLIC_SYMBOL(NAME, DESCRIPTION) \
- Handle<String> NAME##_name = \
- isolate->factory()->NewStringFromAsciiChecked(#NAME); \
- JSObject::AddProperty(container, NAME##_name, isolate->factory()->NAME(), \
- NONE);
+#define EXPORT_PUBLIC_SYMBOL(NAME, DESCRIPTION) \
+ Handle<String> NAME##_name = factory->NewStringFromAsciiChecked(#NAME); \
+ JSObject::AddProperty(container, NAME##_name, factory->NAME(), NONE);
PUBLIC_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
+ WELL_KNOWN_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
#undef EXPORT_PUBLIC_SYMBOL
{
@@ -1822,7 +1800,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<TypeFeedbackVector> feedback_vector =
TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate);
apply->shared()->set_feedback_vector(*feedback_vector);
- isolate->native_context()->set_reflect_apply(*apply);
+ native_context->set_reflect_apply(*apply);
}
{
@@ -1834,7 +1812,204 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<TypeFeedbackVector> feedback_vector =
TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate);
construct->shared()->set_feedback_vector(*feedback_vector);
- isolate->native_context()->set_reflect_construct(*construct);
+ native_context->set_reflect_construct(*construct);
+ }
+
+ Handle<JSObject> iterator_prototype;
+
+ {
+ PrototypeIterator iter(native_context->generator_object_prototype_map());
+ iter.Advance(); // Advance to the prototype of generator_object_prototype.
+ iterator_prototype = Handle<JSObject>(iter.GetCurrent<JSObject>());
+
+ JSObject::AddProperty(container,
+ factory->InternalizeUtf8String("IteratorPrototype"),
+ iterator_prototype, NONE);
+ }
+
+ {
+ PrototypeIterator iter(native_context->sloppy_generator_function_map());
+ Handle<JSObject> generator_function_prototype(iter.GetCurrent<JSObject>());
+
+ JSObject::AddProperty(
+ container, factory->InternalizeUtf8String("GeneratorFunctionPrototype"),
+ generator_function_prototype, NONE);
+
+ static const bool kUseStrictFunctionMap = true;
+ Handle<JSFunction> generator_function_function =
+ InstallFunction(container, "GeneratorFunction", JS_FUNCTION_TYPE,
+ JSFunction::kSize, generator_function_prototype,
+ Builtins::kIllegal, kUseStrictFunctionMap);
+ generator_function_function->initial_map()->set_is_callable();
+ generator_function_function->initial_map()->set_is_constructor(true);
+ }
+
+ { // -- S e t I t e r a t o r
+ Handle<JSObject> set_iterator_prototype =
+ isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
+ SetObjectPrototype(set_iterator_prototype, iterator_prototype);
+ Handle<JSFunction> set_iterator_function = InstallFunction(
+ container, "SetIterator", JS_SET_ITERATOR_TYPE, JSSetIterator::kSize,
+ set_iterator_prototype, Builtins::kIllegal);
+ native_context->set_set_iterator_map(set_iterator_function->initial_map());
+ }
+
+ { // -- M a p I t e r a t o r
+ Handle<JSObject> map_iterator_prototype =
+ isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
+ SetObjectPrototype(map_iterator_prototype, iterator_prototype);
+ Handle<JSFunction> map_iterator_function = InstallFunction(
+ container, "MapIterator", JS_MAP_ITERATOR_TYPE, JSMapIterator::kSize,
+ map_iterator_prototype, Builtins::kIllegal);
+ native_context->set_map_iterator_map(map_iterator_function->initial_map());
+ }
+
+ { // -- S c r i p t
+ // Builtin functions for Script.
+ Handle<JSFunction> script_fun = InstallFunction(
+ container, "Script", JS_VALUE_TYPE, JSValue::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ Accessors::FunctionSetPrototype(script_fun, prototype).Assert();
+ native_context->set_script_function(*script_fun);
+
+ Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
+ Map::EnsureDescriptorSlack(script_map, 15);
+
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ Handle<AccessorInfo> script_column =
+ Accessors::ScriptColumnOffsetInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_column->name())), script_column,
+ attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_id = Accessors::ScriptIdInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(script_id->name())),
+ script_id, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+
+ Handle<AccessorInfo> script_name =
+ Accessors::ScriptNameInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_name->name())), script_name, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_line =
+ Accessors::ScriptLineOffsetInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_line->name())), script_line, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_source =
+ Accessors::ScriptSourceInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_source->name())), script_source,
+ attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_type =
+ Accessors::ScriptTypeInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_type->name())), script_type, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_compilation_type =
+ Accessors::ScriptCompilationTypeInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_compilation_type->name())),
+ script_compilation_type, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_line_ends =
+ Accessors::ScriptLineEndsInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_line_ends->name())), script_line_ends,
+ attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_context_data =
+ Accessors::ScriptContextDataInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_context_data->name())),
+ script_context_data, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_eval_from_script =
+ Accessors::ScriptEvalFromScriptInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_eval_from_script->name())),
+ script_eval_from_script, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_eval_from_script_position =
+ Accessors::ScriptEvalFromScriptPositionInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_eval_from_script_position->name())),
+ script_eval_from_script_position, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_eval_from_function_name =
+ Accessors::ScriptEvalFromFunctionNameInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_eval_from_function_name->name())),
+ script_eval_from_function_name, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_source_url =
+ Accessors::ScriptSourceUrlInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_source_url->name())),
+ script_source_url, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_source_mapping_url =
+ Accessors::ScriptSourceMappingUrlInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_source_mapping_url->name())),
+ script_source_mapping_url, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_is_embedder_debug_script =
+ Accessors::ScriptIsEmbedderDebugScriptInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_is_embedder_debug_script->name())),
+ script_is_embedder_debug_script, attribs);
+ script_map->AppendDescriptor(&d);
+ }
}
}
@@ -1851,9 +2026,8 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
isolate->factory()->ToBoolean(FLAG), NONE); \
}
- INITIALIZE_FLAG(FLAG_harmony_regexps)
- INITIALIZE_FLAG(FLAG_harmony_unicode_regexps)
INITIALIZE_FLAG(FLAG_harmony_tostring)
+ INITIALIZE_FLAG(FLAG_harmony_tolength)
#undef INITIALIZE_FLAG
}
@@ -1864,30 +2038,73 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_array_includes)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_rest_parameters)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_default_parameters)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spread_calls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spread_arrays)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_new_target)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_concat_spreadable)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tostring)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_completion)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tolength)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
+
+
+static void SimpleInstallFunction(Handle<JSObject>& base, const char* name,
+ Builtins::Name call, int len, bool adapt) {
+ Handle<JSFunction> fun =
+ InstallFunction(base, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), call);
+ if (adapt) {
+ fun->shared()->set_internal_formal_parameter_count(len);
+ } else {
+ fun->shared()->DontAdaptArguments();
+ }
+ fun->shared()->set_length(len);
+}
+
+
+void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
+ const char* name, Handle<Symbol> value) {
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(native_context->global_object()));
+ Handle<String> symbol_string = factory->InternalizeUtf8String("Symbol");
+ Handle<JSObject> symbol = Handle<JSObject>::cast(
+ JSObject::GetProperty(global, symbol_string).ToHandleChecked());
+ Handle<String> name_string = factory->InternalizeUtf8String(name);
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ JSObject::AddProperty(symbol, name_string, value, attributes);
+}
+
+
+void Genesis::InitializeGlobal_harmony_tostring() {
+ if (!FLAG_harmony_tostring) return;
+ InstallPublicSymbol(factory(), native_context(), "toStringTag",
+ factory()->to_string_tag_symbol());
+}
+
+
+void Genesis::InitializeGlobal_harmony_concat_spreadable() {
+ if (!FLAG_harmony_concat_spreadable) return;
+ InstallPublicSymbol(factory(), native_context(), "isConcatSpreadable",
+ factory()->is_concat_spreadable_symbol());
+}
-void Genesis::InitializeGlobal_harmony_tolength() {
- Handle<JSObject> builtins(native_context()->builtins());
- Handle<Object> flag(factory()->ToBoolean(FLAG_harmony_tolength));
- Runtime::SetObjectProperty(isolate(), builtins,
- factory()->harmony_tolength_string(), flag,
- STRICT).Assert();
+void Genesis::InitializeGlobal_harmony_regexp_subclass() {
+ if (!FLAG_harmony_regexp_subclass) return;
+ InstallPublicSymbol(factory(), native_context(), "match",
+ factory()->match_symbol());
+ InstallPublicSymbol(factory(), native_context(), "replace",
+ factory()->replace_symbol());
+ InstallPublicSymbol(factory(), native_context(), "search",
+ factory()->search_symbol());
+ InstallPublicSymbol(factory(), native_context(), "split",
+ factory()->split_symbol());
}
@@ -1898,11 +2115,31 @@ void Genesis::InitializeGlobal_harmony_reflect() {
native_context()->global_object()));
Handle<String> reflect_string =
factory()->NewStringFromStaticChars("Reflect");
- Handle<Object> reflect =
+ Handle<JSObject> reflect =
factory()->NewJSObject(isolate()->object_function(), TENURED);
JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
-}
+ SimpleInstallFunction(reflect, "defineProperty",
+ Builtins::kReflectDefineProperty, 3, true);
+ SimpleInstallFunction(reflect, "deleteProperty",
+ Builtins::kReflectDeleteProperty, 2, true);
+ SimpleInstallFunction(reflect, "get",
+ Builtins::kReflectGet, 3, false);
+ SimpleInstallFunction(reflect, "getOwnPropertyDescriptor",
+ Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
+ SimpleInstallFunction(reflect, "getPrototypeOf",
+ Builtins::kReflectGetPrototypeOf, 1, true);
+ SimpleInstallFunction(reflect, "has",
+ Builtins::kReflectHas, 2, true);
+ SimpleInstallFunction(reflect, "isExtensible",
+ Builtins::kReflectIsExtensible, 1, true);
+ SimpleInstallFunction(reflect, "preventExtensions",
+ Builtins::kReflectPreventExtensions, 1, true);
+ SimpleInstallFunction(reflect, "set",
+ Builtins::kReflectSet, 3, false);
+ SimpleInstallFunction(reflect, "setPrototypeOf",
+ Builtins::kReflectSetPrototypeOf, 2, true);
+}
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
@@ -1932,19 +2169,19 @@ void Genesis::InitializeGlobal_harmony_simd() {
JSFunction::SetInstancePrototype(
cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
- cons->SetInstanceClassName(*name);
+ cons->shared()->set_instance_class_name(*name);
Handle<JSObject> simd_object = factory->NewJSObject(cons, TENURED);
DCHECK(simd_object->IsJSObject());
JSObject::AddProperty(global, name, simd_object, DONT_ENUM);
// Install SIMD type functions. Set the instance class names since
-// InstallFunction only does this when we install on the GlobalObject.
+// InstallFunction only does this when we install on the JSGlobalObject.
#define SIMD128_INSTALL_FUNCTION(TYPE, Type, type, lane_count, lane_type) \
Handle<JSFunction> type##_function = InstallFunction( \
simd_object, #Type, JS_VALUE_TYPE, JSValue::kSize, \
isolate->initial_object_prototype(), Builtins::kIllegal); \
native_context()->set_##type##_function(*type##_function); \
- type##_function->SetInstanceClassName(*factory->Type##_string());
+ type##_function->shared()->set_instance_class_name(*factory->Type##_string());
SIMD128_TYPES(SIMD128_INSTALL_FUNCTION)
#undef SIMD128_INSTALL_FUNCTION
}
@@ -1996,50 +2233,30 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
bool Genesis::InstallNatives(ContextType context_type) {
HandleScope scope(isolate());
- // Create a function for the builtins object. Allocate space for the
- // JavaScript builtins, a reference to the builtins object
- // (itself) and a reference to the native_context directly in the object.
- Handle<Code> code = Handle<Code>(
- isolate()->builtins()->builtin(Builtins::kIllegal));
- Handle<JSFunction> builtins_fun = factory()->NewFunction(
- factory()->empty_string(), code, JS_BUILTINS_OBJECT_TYPE,
- JSBuiltinsObject::kSize);
-
- Handle<String> name =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("builtins"));
- builtins_fun->shared()->set_instance_class_name(*name);
- builtins_fun->initial_map()->set_dictionary_map(true);
- builtins_fun->initial_map()->set_prototype(heap()->null_value());
-
- // Allocate the builtins object.
- Handle<JSBuiltinsObject> builtins =
- Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
- builtins->set_builtins(*builtins);
- builtins->set_native_context(*native_context());
- builtins->set_global_proxy(native_context()->global_proxy());
-
-
- // Set up the 'builtin' property, which refers to the js builtins object.
- static const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Handle<String> builtins_string =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("builtins"));
- JSObject::AddProperty(builtins, builtins_string, builtins, attributes);
-
- // Set up the reference from the global object to the builtins object.
- JSGlobalObject::cast(native_context()->global_object())->
- set_builtins(*builtins);
-
// Create a bridge function that has context in the native context.
Handle<JSFunction> bridge = factory()->NewFunction(factory()->empty_string());
DCHECK(bridge->context() == *isolate()->native_context());
- // Allocate the builtins context.
- Handle<Context> context =
- factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
- context->set_global_object(*builtins); // override builtins global object
-
- native_context()->set_runtime_context(*context);
+ // Allocate the runtime context.
+ {
+ Handle<Context> context =
+ factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
+ native_context()->set_runtime_context(*context);
+ Handle<Code> code = isolate()->builtins()->Illegal();
+ Handle<JSFunction> global_fun =
+ factory()->NewFunction(factory()->empty_string(), code,
+ JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize);
+ global_fun->initial_map()->set_dictionary_map(true);
+ global_fun->initial_map()->set_prototype(heap()->null_value());
+ Handle<JSGlobalObject> dummy_global =
+ Handle<JSGlobalObject>::cast(factory()->NewJSGlobalObject(global_fun));
+ dummy_global->set_native_context(*native_context());
+ dummy_global->set_global_proxy(native_context()->global_proxy());
+ context->set_global_object(*dummy_global);
+ // Something went wrong if we actually need to write into the dummy global.
+ dummy_global->set_properties(*GlobalDictionary::New(isolate(), 0));
+ dummy_global->set_elements(heap()->empty_fixed_array());
+ }
// Set up the utils object as shared container between native scripts.
Handle<JSObject> utils = factory()->NewJSObject(isolate()->object_function());
@@ -2065,166 +2282,13 @@ bool Genesis::InstallNatives(ContextType context_type) {
// A thin context is ready at this point.
if (context_type == THIN_CONTEXT) return true;
- if (FLAG_expose_natives_as != NULL) {
- Handle<String> utils_key = factory()->NewStringFromAsciiChecked("utils");
- JSObject::AddProperty(builtins, utils_key, utils, NONE);
- }
-
- { // -- S c r i p t
- // Builtin functions for Script.
- Handle<JSFunction> script_fun = InstallFunction(
- builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- Accessors::FunctionSetPrototype(script_fun, prototype).Assert();
- native_context()->set_script_function(*script_fun);
-
- Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
- Map::EnsureDescriptorSlack(script_map, 15);
-
- PropertyAttributes attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- Handle<AccessorInfo> script_column =
- Accessors::ScriptColumnOffsetInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_column->name())), script_column,
- attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_id =
- Accessors::ScriptIdInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(script_id->name())),
- script_id, attribs);
- script_map->AppendDescriptor(&d);
- }
-
-
- Handle<AccessorInfo> script_name =
- Accessors::ScriptNameInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_name->name())), script_name, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_line =
- Accessors::ScriptLineOffsetInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_line->name())), script_line, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_source =
- Accessors::ScriptSourceInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_source->name())), script_source,
- attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_type =
- Accessors::ScriptTypeInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_type->name())), script_type, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_compilation_type =
- Accessors::ScriptCompilationTypeInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_compilation_type->name())),
- script_compilation_type, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_line_ends =
- Accessors::ScriptLineEndsInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_line_ends->name())), script_line_ends,
- attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_context_data =
- Accessors::ScriptContextDataInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_context_data->name())),
- script_context_data, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_eval_from_script =
- Accessors::ScriptEvalFromScriptInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_eval_from_script->name())),
- script_eval_from_script, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_eval_from_script_position =
- Accessors::ScriptEvalFromScriptPositionInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_eval_from_script_position->name())),
- script_eval_from_script_position, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_eval_from_function_name =
- Accessors::ScriptEvalFromFunctionNameInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_eval_from_function_name->name())),
- script_eval_from_function_name, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_source_url =
- Accessors::ScriptSourceUrlInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_source_url->name())),
- script_source_url, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_source_mapping_url =
- Accessors::ScriptSourceMappingUrlInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_source_mapping_url->name())),
- script_source_mapping_url, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_is_embedder_debug_script =
- Accessors::ScriptIsEmbedderDebugScriptInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_is_embedder_debug_script->name())),
- script_is_embedder_debug_script, attribs);
- script_map->AppendDescriptor(&d);
- }
- }
{
// Builtin function for OpaqueReference -- a JSValue-based object,
// that keeps its field isolated from JavaScript code. It may store
// objects, that JavaScript code may not access.
- Handle<JSFunction> opaque_reference_fun = InstallFunction(
- builtins, "OpaqueReference", JS_VALUE_TYPE, JSValue::kSize,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
+ Handle<JSFunction> opaque_reference_fun = factory()->NewFunction(
+ factory()->empty_string(), isolate()->builtins()->Illegal(),
+ isolate()->initial_object_prototype(), JS_VALUE_TYPE, JSValue::kSize);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Accessors::FunctionSetPrototype(opaque_reference_fun, prototype).Assert();
@@ -2246,96 +2310,6 @@ bool Genesis::InstallNatives(ContextType context_type) {
InstallInternalArray(utils, "InternalPackedArray", FAST_ELEMENTS);
}
- { // -- S e t I t e r a t o r
- Handle<JSFunction> set_iterator_function = InstallFunction(
- builtins, "SetIterator", JS_SET_ITERATOR_TYPE, JSSetIterator::kSize,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
- native_context()->set_set_iterator_map(
- set_iterator_function->initial_map());
- }
-
- { // -- M a p I t e r a t o r
- Handle<JSFunction> map_iterator_function = InstallFunction(
- builtins, "MapIterator", JS_MAP_ITERATOR_TYPE, JSMapIterator::kSize,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
- native_context()->set_map_iterator_map(
- map_iterator_function->initial_map());
- }
-
- {
- // Create generator meta-objects and install them on the builtins object.
- Handle<JSObject> builtins(native_context()->builtins());
- Handle<JSObject> iterator_prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSObject> generator_object_prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSObject> generator_function_prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetObjectPrototype(generator_object_prototype, iterator_prototype);
- JSObject::AddProperty(
- builtins, factory()->InternalizeUtf8String("$iteratorPrototype"),
- iterator_prototype,
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
- JSObject::AddProperty(
- builtins,
- factory()->InternalizeUtf8String("GeneratorFunctionPrototype"),
- generator_function_prototype,
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
-
- JSObject::AddProperty(
- generator_function_prototype,
- factory()->InternalizeUtf8String("prototype"),
- generator_object_prototype,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
-
- static const bool kUseStrictFunctionMap = true;
- Handle<JSFunction> generator_function_function =
- InstallFunction(builtins, "GeneratorFunction", JS_FUNCTION_TYPE,
- JSFunction::kSize, generator_function_prototype,
- Builtins::kIllegal, kUseStrictFunctionMap);
- generator_function_function->initial_map()->set_is_callable();
-
- // Create maps for generator functions and their prototypes. Store those
- // maps in the native context. The "prototype" property descriptor is
- // writable, non-enumerable, and non-configurable (as per ES6 draft
- // 04-14-15, section 25.2.4.3).
- Handle<Map> strict_function_map(strict_function_map_writable_prototype_);
- // Generator functions do not have "caller" or "arguments" accessors.
- Handle<Map> sloppy_generator_function_map =
- Map::Copy(strict_function_map, "SloppyGeneratorFunction");
- Map::SetPrototype(sloppy_generator_function_map,
- generator_function_prototype);
- native_context()->set_sloppy_generator_function_map(
- *sloppy_generator_function_map);
-
- Handle<Map> strict_generator_function_map =
- Map::Copy(strict_function_map, "StrictGeneratorFunction");
- Map::SetPrototype(strict_generator_function_map,
- generator_function_prototype);
- native_context()->set_strict_generator_function_map(
- *strict_generator_function_map);
-
- Handle<Map> strong_function_map(native_context()->strong_function_map());
- Handle<Map> strong_generator_function_map =
- Map::Copy(strong_function_map, "StrongGeneratorFunction");
- Map::SetPrototype(strong_generator_function_map,
- generator_function_prototype);
- native_context()->set_strong_generator_function_map(
- *strong_generator_function_map);
-
- Handle<JSFunction> object_function(native_context()->object_function());
- Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
- Map::SetPrototype(generator_object_prototype_map,
- generator_object_prototype);
- native_context()->set_generator_object_prototype_map(
- *generator_object_prototype_map);
- }
-
- if (FLAG_disable_native_files) {
- PrintF("Warning: Running without installed natives!\n");
- return true;
- }
-
// Run the rest of the native scripts.
while (builtin_index < Natives::GetBuiltinsCount()) {
if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
@@ -2547,12 +2521,6 @@ bool Genesis::InstallNatives(ContextType context_type) {
}
}
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- builtins->ObjectVerify();
- }
-#endif
-
return true;
}
@@ -2564,9 +2532,7 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_modules_natives[] = {nullptr};
static const char* harmony_regexps_natives[] = {"native harmony-regexp.js",
nullptr};
- static const char* harmony_arrow_functions_natives[] = {nullptr};
- static const char* harmony_tostring_natives[] = {"native harmony-tostring.js",
- nullptr};
+ static const char* harmony_tostring_natives[] = {nullptr};
static const char* harmony_sloppy_natives[] = {nullptr};
static const char* harmony_sloppy_function_natives[] = {nullptr};
static const char* harmony_sloppy_let_natives[] = {nullptr};
@@ -2575,20 +2541,18 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_default_parameters_natives[] = {nullptr};
static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
nullptr};
- static const char* harmony_spread_calls_natives[] = {
- "native harmony-spread.js", nullptr};
static const char* harmony_destructuring_natives[] = {nullptr};
static const char* harmony_object_observe_natives[] = {
"native harmony-object-observe.js", nullptr};
- static const char* harmony_spread_arrays_natives[] = {nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
"native harmony-sharedarraybuffer.js", "native harmony-atomics.js", NULL};
- static const char* harmony_new_target_natives[] = {nullptr};
- static const char* harmony_concat_spreadable_natives[] = {
- "native harmony-concat-spreadable.js", nullptr};
+ static const char* harmony_concat_spreadable_natives[] = {nullptr};
static const char* harmony_simd_natives[] = {"native harmony-simd.js",
nullptr};
static const char* harmony_tolength_natives[] = {nullptr};
+ static const char* harmony_completion_natives[] = {nullptr};
+ static const char* harmony_do_expressions_natives[] = {nullptr};
+ static const char* harmony_regexp_subclass_natives[] = {nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2753,16 +2717,6 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
- // Expose the natives in global if a name for it is specified.
- if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
- Handle<String> natives_key =
- factory->InternalizeUtf8String(FLAG_expose_natives_as);
- uint32_t dummy_index;
- if (natives_key->AsArrayIndex(&dummy_index)) return true;
- Handle<JSBuiltinsObject> natives(global->builtins());
- JSObject::AddProperty(global, natives_key, natives, DONT_ENUM);
- }
-
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
// If loading fails we just bail out without installing the
@@ -3023,7 +2977,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
}
}
- } else if (from->IsGlobalObject()) {
+ } else if (from->IsJSGlobalObject()) {
Handle<GlobalDictionary> properties =
Handle<GlobalDictionary>(from->global_dictionary());
int capacity = properties->Capacity();
@@ -3190,13 +3144,11 @@ Genesis::Genesis(Isolate* isolate,
Map::TraceAllTransitions(object_fun->initial_map());
}
#endif
- Handle<GlobalObject> global_object =
+ Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
HookUpGlobalObject(global_object, outdated_contexts);
- native_context()->builtins()->set_global_proxy(
- native_context()->global_proxy());
HookUpGlobalThisBinding(outdated_contexts);
if (!ConfigureGlobalObjects(global_proxy_template)) return;
@@ -3206,7 +3158,8 @@ Genesis::Genesis(Isolate* isolate,
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
CreateStrongModeFunctionMaps(empty_function);
- Handle<GlobalObject> global_object =
+ CreateIteratorMaps();
+ Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
InitializeGlobal(global_object, empty_function, context_type);
@@ -3221,6 +3174,9 @@ Genesis::Genesis(Isolate* isolate,
if (!ConfigureGlobalObjects(global_proxy_template)) return;
}
isolate->counters()->contexts_created_from_scratch()->Increment();
+ // Re-initialize the counter because it got incremented during snapshot
+ // creation.
+ isolate->native_context()->set_errors_thrown(Smi::FromInt(0));
}
// Install experimental natives. Do not include them into the
@@ -3234,20 +3190,17 @@ Genesis::Genesis(Isolate* isolate,
if (FLAG_experimental_extras) {
if (!InstallExperimentalExtraNatives()) return;
}
-
- // By now the utils object is useless and can be removed.
- native_context()->set_natives_utils_object(
- isolate->heap()->undefined_value());
}
// The serializer cannot serialize typed arrays. Reset those typed arrays
// for each new context.
- InitializeBuiltinTypedArrays();
} else if (context_type == DEBUG_CONTEXT) {
DCHECK(!isolate->serializer_enabled());
InitializeExperimentalGlobal();
if (!InstallDebuggerNatives()) return;
}
+ ConfigureUtilsObject(context_type);
+
// Check that the script context table is empty except for the 'this' binding.
// We do not need script contexts for native scripts.
if (!FLAG_global_var_shortcuts) {
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 659d74aad2..2baa8ff61a 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -110,8 +110,8 @@ class Bootstrapper final {
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
static bool CompileNative(Isolate* isolate, Vector<const char> name,
- Handle<JSObject> receiver, Handle<String> source,
- int argc, Handle<Object> argv[]);
+ Handle<String> source, int argc,
+ Handle<Object> argv[]);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
static bool CompileExtraBuiltin(Isolate* isolate, int index);
@@ -176,6 +176,7 @@ class NativesExternalStringResource final
size_t length_;
};
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BOOTSTRAPPER_H_
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 13225d2065..e4ceec99be 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -17,6 +17,7 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/profiler/cpu-profiler.h"
+#include "src/property-descriptor.h"
#include "src/prototype.h"
#include "src/vm-state-inl.h"
@@ -196,16 +197,12 @@ inline bool ClampedToInteger(Object* object, int* out) {
inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
int* out) {
- Map* arguments_map =
- isolate->context()->native_context()->sloppy_arguments_map();
- if (object->map() != arguments_map || !object->HasFastElements()) {
- return false;
- }
+ Map* arguments_map = isolate->native_context()->sloppy_arguments_map();
+ if (object->map() != arguments_map) return false;
+ DCHECK(object->HasFastElements());
Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
- if (!len_obj->IsSmi()) {
- return false;
- }
- *out = Smi::cast(len_obj)->value();
+ if (!len_obj->IsSmi()) return false;
+ *out = Max(0, Smi::cast(len_obj)->value());
return *out <= object->elements()->length();
}
@@ -992,11 +989,11 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
uint32_t length = 0;
if (receiver->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(receiver));
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
length = static_cast<uint32_t>(array->length()->Number());
} else {
Handle<Object> val;
- Handle<Object> key(isolate->heap()->length_string(), isolate);
+ Handle<Object> key = isolate->factory()->length_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, val, Runtime::GetObjectProperty(isolate, receiver, key),
false);
@@ -1082,6 +1079,14 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
break;
}
case DICTIONARY_ELEMENTS: {
+ // CollectElementIndices() can't be called when there's a JSProxy
+ // on the prototype chain.
+ for (PrototypeIterator iter(isolate, receiver); !iter.IsAtEnd();
+ iter.Advance()) {
+ if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
+ return IterateElementsSlow(isolate, receiver, length, visitor);
+ }
+ }
Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
List<uint32_t> indices(dict->Capacity() / 2);
// Collect all indices in the object and the prototypes less
@@ -1444,6 +1449,268 @@ BUILTIN(ArrayConcat) {
}
+// ES6 section 26.1.3 Reflect.defineProperty
+BUILTIN(ReflectDefineProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+ Handle<Object> attributes = args.at<Object>(3);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.defineProperty")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ PropertyDescriptor desc;
+ if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
+ return isolate->heap()->exception();
+ }
+
+ bool result =
+ JSReceiver::DefineOwnProperty(isolate, Handle<JSReceiver>::cast(target),
+ name, &desc, Object::DONT_THROW);
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ // TODO(neis): Make DefineOwnProperty return Maybe<bool>.
+ return *isolate->factory()->ToBoolean(result);
+}
+
+
+// ES6 section 26.1.4 Reflect.deleteProperty
+BUILTIN(ReflectDeleteProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.deleteProperty")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSReceiver::DeletePropertyOrElement(
+ Handle<JSReceiver>::cast(target), name));
+
+ return *result;
+}
+
+
+// ES6 section 26.1.6 Reflect.get
+BUILTIN(ReflectGet) {
+ HandleScope scope(isolate);
+ Handle<Object> undef = isolate->factory()->undefined_value();
+ Handle<Object> target = args.length() > 1 ? args.at<Object>(1) : undef;
+ Handle<Object> key = args.length() > 2 ? args.at<Object>(2) : undef;
+ Handle<Object> receiver = args.length() > 3 ? args.at<Object>(3) : target;
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.get")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::GetPropertyOrElement(
+ Handle<JSReceiver>::cast(target), name, receiver));
+
+ return *result;
+}
+
+
+// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
+BUILTIN(ReflectGetOwnPropertyDescriptor) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.getOwnPropertyDescriptor")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ PropertyDescriptor desc;
+ bool found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, Handle<JSReceiver>::cast(target), name, &desc);
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ if (!found) return isolate->heap()->undefined_value();
+ return *desc.ToObject(isolate);
+}
+
+
+// ES6 section 26.1.8 Reflect.getPrototypeOf
+BUILTIN(ReflectGetPrototypeOf) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.getPrototypeOf")));
+ }
+
+ return *Object::GetPrototype(isolate, target);
+}
+
+
+// ES6 section 26.1.9 Reflect.has
+BUILTIN(ReflectHas) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.has")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ Maybe<bool> result =
+ JSReceiver::HasProperty(Handle<JSReceiver>::cast(target), name);
+ return result.IsJust() ? *isolate->factory()->ToBoolean(result.FromJust())
+ : isolate->heap()->exception();
+}
+
+
+// ES6 section 26.1.10 Reflect.isExtensible
+BUILTIN(ReflectIsExtensible) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.isExtensible")));
+ }
+
+ // TODO(neis): For now, we ignore proxies. Once proxies are fully
+ // implemented, do something like the following:
+ /*
+ Maybe<bool> maybe = JSReceiver::IsExtensible(
+ Handle<JSReceiver>::cast(target));
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ return *isolate->factory()->ToBoolean(maybe.FromJust());
+ */
+
+ if (target->IsJSObject()) {
+ return *isolate->factory()->ToBoolean(
+ JSObject::IsExtensible(Handle<JSObject>::cast(target)));
+ }
+ return *isolate->factory()->false_value();
+}
+
+
+// ES6 section 26.1.12 Reflect.preventExtensions
+BUILTIN(ReflectPreventExtensions) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.preventExtensions")));
+ }
+
+ Maybe<bool> result = JSReceiver::PreventExtensions(
+ Handle<JSReceiver>::cast(target), Object::DONT_THROW);
+ return result.IsJust() ? *isolate->factory()->ToBoolean(result.FromJust())
+ : isolate->heap()->exception();
+}
+
+
+// ES6 section 26.1.13 Reflect.set
+BUILTIN(ReflectSet) {
+ HandleScope scope(isolate);
+ Handle<Object> undef = isolate->factory()->undefined_value();
+ Handle<Object> target = args.length() > 1 ? args.at<Object>(1) : undef;
+ Handle<Object> key = args.length() > 2 ? args.at<Object>(2) : undef;
+ Handle<Object> value = args.length() > 3 ? args.at<Object>(3) : undef;
+ Handle<Object> receiver = args.length() > 4 ? args.at<Object>(4) : target;
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.set")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, receiver, name, Handle<JSReceiver>::cast(target));
+ Maybe<bool> result = Object::SetSuperProperty(
+ &it, value, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 26.1.14 Reflect.setPrototypeOf
+BUILTIN(ReflectSetPrototypeOf) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> proto = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.setPrototypeOf")));
+ }
+
+ if (!proto->IsJSReceiver() && !proto->IsNull()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
+ }
+
+ Maybe<bool> result = JSReceiver::SetPrototype(
+ Handle<JSReceiver>::cast(target), proto, true, Object::DONT_THROW);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
BUILTIN(DateToPrimitive) {
HandleScope scope(isolate);
@@ -1536,7 +1803,7 @@ MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
Handle<Object> receiver(&args[0]);
if (receiver->IsJSObject() && receiver->IsAccessCheckNeeded()) {
Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
- if (!isolate->MayAccess(js_receiver)) {
+ if (!isolate->MayAccess(handle(isolate->context()), js_receiver)) {
isolate->ReportFailedAccessCheck(js_receiver);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
@@ -1611,6 +1878,34 @@ BUILTIN(HandleApiCallConstruct) {
}
+Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return CallFunction_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return CallFunction_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return CallFunction_ReceiverIsAny();
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+
+Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Call_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Call_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return Call_ReceiverIsAny();
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+
namespace {
class RelocatableArguments : public BuiltinArguments<NEEDS_CALLED_FUNCTION>,
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index d9129608dc..c1159a8d52 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -59,6 +59,17 @@ enum BuiltinExtraArguments {
\
V(DateToPrimitive, NO_EXTRA_ARGUMENTS) \
\
+ V(ReflectDefineProperty, NO_EXTRA_ARGUMENTS) \
+ V(ReflectDeleteProperty, NO_EXTRA_ARGUMENTS) \
+ V(ReflectGet, NO_EXTRA_ARGUMENTS) \
+ V(ReflectGetOwnPropertyDescriptor, NO_EXTRA_ARGUMENTS) \
+ V(ReflectGetPrototypeOf, NO_EXTRA_ARGUMENTS) \
+ V(ReflectHas, NO_EXTRA_ARGUMENTS) \
+ V(ReflectIsExtensible, NO_EXTRA_ARGUMENTS) \
+ V(ReflectPreventExtensions, NO_EXTRA_ARGUMENTS) \
+ V(ReflectSet, NO_EXTRA_ARGUMENTS) \
+ V(ReflectSetPrototypeOf, NO_EXTRA_ARGUMENTS) \
+ \
V(SymbolConstructor, NO_EXTRA_ARGUMENTS) \
V(SymbolConstructor_ConstructStub, NO_EXTRA_ARGUMENTS) \
\
@@ -74,23 +85,26 @@ enum BuiltinExtraArguments {
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
- V(CallFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(Call, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(CallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(CallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Call_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Call_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(Call_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(ConstructProxy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(Construct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
- V(PushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructStubForDerived, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@@ -100,6 +114,11 @@ enum BuiltinExtraArguments {
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
+ V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterPushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterPushArgsAndConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@@ -216,6 +235,10 @@ class Builtins {
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
+ // Convenience wrappers.
+ Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
+ Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
+
Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear.
@@ -266,8 +289,6 @@ class Builtins {
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
- static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
- static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
@@ -276,9 +297,30 @@ class Builtins {
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- static void Generate_CallFunction(MacroAssembler* masm);
+ static void Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode);
+ static void Generate_CallFunction_ReceiverIsNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
+ }
+ static void Generate_CallFunction_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined);
+ }
+ static void Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kAny);
+ }
// ES6 section 7.3.12 Call(F, V, [argumentsList])
- static void Generate_Call(MacroAssembler* masm);
+ static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode);
+ static void Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined);
+ }
+ static void Generate_Call_ReceiverIsNotNullOrUndefined(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined);
+ }
+ static void Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kAny);
+ }
// ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget)
static void Generate_ConstructFunction(MacroAssembler* masm);
@@ -287,8 +329,6 @@ class Builtins {
// ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget])
static void Generate_Construct(MacroAssembler* masm);
- static void Generate_PushArgsAndCall(MacroAssembler* masm);
-
static void Generate_FunctionCall(MacroAssembler* masm);
static void Generate_FunctionApply(MacroAssembler* masm);
static void Generate_ReflectApply(MacroAssembler* masm);
@@ -304,6 +344,11 @@ class Builtins {
static void Generate_InterruptCheck(MacroAssembler* masm);
static void Generate_StackCheck(MacroAssembler* masm);
+ static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
+ static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
+ static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm);
+ static void Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm);
+
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm); \
@@ -326,6 +371,7 @@ class Builtins {
DISALLOW_COPY_AND_ASSIGN(Builtins);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BUILTINS_H_
diff --git a/deps/v8/src/cached-powers.h b/deps/v8/src/cached-powers.h
index bfe36351ba..fade5c9fca 100644
--- a/deps/v8/src/cached-powers.h
+++ b/deps/v8/src/cached-powers.h
@@ -37,6 +37,7 @@ class PowersOfTenCache {
int* found_exponent);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CACHED_POWERS_H_
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index d90f919341..ab5caa7557 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -85,6 +85,7 @@ inline bool IsRegExpNewline(uc16 c) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CHAR_PREDICATES_INL_H_
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index 89f417196e..3161ae4ae9 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -79,6 +79,7 @@ struct WhiteSpaceOrLineTerminator {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CHAR_PREDICATES_H_
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index bd798663ad..80404e8d89 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -23,7 +23,8 @@ extern bool FLAG_enable_slow_asserts;
const bool FLAG_enable_slow_asserts = false;
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#define DCHECK_TAG_ALIGNED(address) \
DCHECK((reinterpret_cast<intptr_t>(address) & \
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 9e776b40fc..ad6890bf22 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -59,17 +59,17 @@ Callable CodeFactory::KeyedLoadICInOptimizedCode(
// static
Callable CodeFactory::CallIC(Isolate* isolate, int argc,
- CallICState::CallType call_type) {
- return Callable(CallIC::initialize_stub(isolate, argc, call_type),
+ ConvertReceiverMode mode) {
+ return Callable(CallIC::initialize_stub(isolate, argc, mode),
CallFunctionWithFeedbackDescriptor(isolate));
}
// static
Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
- CallICState::CallType call_type) {
+ ConvertReceiverMode mode) {
return Callable(
- CallIC::initialize_stub_in_optimized_code(isolate, argc, call_type),
+ CallIC::initialize_stub_in_optimized_code(isolate, argc, mode),
CallFunctionWithFeedbackAndVectorDescriptor(isolate));
}
@@ -130,24 +130,16 @@ Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op,
// static
-Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
- Strength strength) {
- BinaryOpICStub stub(isolate, op, strength);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::CompareNilIC(Isolate* isolate, NilValue nil_value) {
+ Handle<Code> code = CompareNilICStub::GetUninitialized(isolate, nil_value);
+ return Callable(code, CompareNilDescriptor(isolate));
}
// static
-Callable CodeFactory::LoadGlobalViaContext(Isolate* isolate, int depth) {
- LoadGlobalViaContextStub stub(isolate, depth);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
-}
-
-
-// static
-Callable CodeFactory::StoreGlobalViaContext(Isolate* isolate, int depth,
- LanguageMode language_mode) {
- StoreGlobalViaContextStub stub(isolate, depth, language_mode);
+Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
+ Strength strength) {
+ BinaryOpICStub stub(isolate, op, strength);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
@@ -183,6 +175,13 @@ Callable CodeFactory::ToString(Isolate* isolate) {
// static
+Callable CodeFactory::ToLength(Isolate* isolate) {
+ ToLengthStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::ToObject(Isolate* isolate) {
ToObjectStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -190,6 +189,20 @@ Callable CodeFactory::ToObject(Isolate* isolate) {
// static
+Callable CodeFactory::NumberToString(Isolate* isolate) {
+ NumberToStringStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::RegExpConstructResult(Isolate* isolate) {
+ RegExpConstructResultStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag) {
StringAddStub stub(isolate, flags, pretenure_flag);
@@ -227,6 +240,13 @@ Callable CodeFactory::FastCloneShallowObject(Isolate* isolate, int length) {
// static
+Callable CodeFactory::FastNewContext(Isolate* isolate, int slot_count) {
+ FastNewContextStub stub(isolate, slot_count);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::FastNewClosure(Isolate* isolate,
LanguageMode language_mode,
FunctionKind kind) {
@@ -254,17 +274,61 @@ Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
// static
-Callable CodeFactory::CallFunction(Isolate* isolate, int argc,
- CallFunctionFlags flags) {
- CallFunctionStub stub(isolate, argc, flags);
+Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) {
+ AllocateMutableHeapNumberStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::AllocateInNewSpace(Isolate* isolate) {
+ AllocateInNewSpaceStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::PushArgsAndCall(Isolate* isolate) {
- return Callable(isolate->builtins()->PushArgsAndCall(),
- PushArgsAndCallDescriptor(isolate));
+Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
+ return Callable(isolate->builtins()->ArgumentsAdaptorTrampoline(),
+ ArgumentAdaptorDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
+ return Callable(isolate->builtins()->Call(mode),
+ CallTrampolineDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
+ return Callable(isolate->builtins()->CallFunction(mode),
+ CallTrampolineDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate) {
+ return Callable(isolate->builtins()->InterpreterPushArgsAndCall(),
+ InterpreterPushArgsAndCallDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::InterpreterPushArgsAndConstruct(Isolate* isolate) {
+ return Callable(isolate->builtins()->InterpreterPushArgsAndConstruct(),
+ InterpreterPushArgsAndConstructDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::InterpreterCEntry(Isolate* isolate) {
+ // TODO(rmcilroy): Deal with runtime functions that return two values.
+ // Note: If we ever use fpregs in the interpreter then we will need to
+ // save fpregs too.
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs, kArgvInRegister);
+ return Callable(stub.GetCode(), InterpreterCEntryDescriptor(isolate));
}
} // namespace internal
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 5a67b27fd5..4775efeb89 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -43,9 +43,10 @@ class CodeFactory final {
Isolate* isolate, LanguageMode language_mode,
InlineCacheState initialization_state);
static Callable CallIC(Isolate* isolate, int argc,
- CallICState::CallType call_type);
- static Callable CallICInOptimizedCode(Isolate* isolate, int argc,
- CallICState::CallType call_type);
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable CallICInOptimizedCode(
+ Isolate* isolate, int argc,
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable StoreIC(Isolate* isolate, LanguageMode mode);
static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode,
InlineCacheState initialization_state);
@@ -56,16 +57,13 @@ class CodeFactory final {
static Callable CompareIC(Isolate* isolate, Token::Value op,
Strength strength);
+ static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
static Callable BinaryOpIC(Isolate* isolate, Token::Value op,
Strength strength);
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
- static Callable LoadGlobalViaContext(Isolate* isolate, int depth);
- static Callable StoreGlobalViaContext(Isolate* isolate, int depth,
- LanguageMode language_mode);
-
static Callable InstanceOf(Isolate* isolate);
static Callable ToBoolean(
@@ -74,7 +72,11 @@ class CodeFactory final {
static Callable ToNumber(Isolate* isolate);
static Callable ToString(Isolate* isolate);
+ static Callable ToLength(Isolate* isolate);
static Callable ToObject(Isolate* isolate);
+ static Callable NumberToString(Isolate* isolate);
+
+ static Callable RegExpConstructResult(Isolate* isolate);
static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag);
@@ -85,6 +87,7 @@ class CodeFactory final {
static Callable FastCloneShallowArray(Isolate* isolate);
static Callable FastCloneShallowObject(Isolate* isolate, int length);
+ static Callable FastNewContext(Isolate* isolate, int slot_count);
static Callable FastNewClosure(Isolate* isolate, LanguageMode language_mode,
FunctionKind kind);
@@ -92,11 +95,18 @@ class CodeFactory final {
bool has_duplicate_parameters);
static Callable AllocateHeapNumber(Isolate* isolate);
-
- static Callable CallFunction(Isolate* isolate, int argc,
- CallFunctionFlags flags);
-
- static Callable PushArgsAndCall(Isolate* isolate);
+ static Callable AllocateMutableHeapNumber(Isolate* isolate);
+ static Callable AllocateInNewSpace(Isolate* isolate);
+
+ static Callable ArgumentAdaptor(Isolate* isolate);
+ static Callable Call(Isolate* isolate,
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable CallFunction(
+ Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+
+ static Callable InterpreterPushArgsAndCall(Isolate* isolate);
+ static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
+ static Callable InterpreterCEntry(Isolate* isolate);
};
} // namespace internal
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 801079bcd9..b2d07d9c9b 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -5,10 +5,10 @@
#include "src/code-stubs.h"
#include "src/bailout-reason.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
#include "src/field-index.h"
-#include "src/hydrogen.h"
#include "src/ic/ic.h"
-#include "src/lithium.h"
namespace v8 {
namespace internal {
@@ -442,7 +442,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
info()->MarkMustNotHaveEagerFrame();
HInstruction* allocation_site = Add<HLoadKeyed>(
- GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS,
+ GetParameter(0), GetParameter(1), nullptr, nullptr, FAST_ELEMENTS,
NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
IfBuilder checker(this);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
@@ -505,7 +505,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* undefined = graph()->GetConstantUndefined();
HInstruction* allocation_site = Add<HLoadKeyed>(
- GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS,
+ GetParameter(0), GetParameter(1), nullptr, nullptr, FAST_ELEMENTS,
NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
IfBuilder checker(this);
@@ -635,7 +635,7 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
HInstruction* feedback_vector = GetParameter(0);
HInstruction* slot = GetParameter(1);
- Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ Add<HStoreKeyed>(feedback_vector, slot, object, nullptr, FAST_ELEMENTS,
INITIALIZING_STORE);
return feedback_vector;
}
@@ -667,7 +667,7 @@ HValue* CodeStubGraphBuilder<CreateWeakCellStub>::BuildCodeStub() {
HInstruction* feedback_vector =
GetParameter(CreateWeakCellDescriptor::kVectorIndex);
HInstruction* slot = GetParameter(CreateWeakCellDescriptor::kSlotIndex);
- Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ Add<HStoreKeyed>(feedback_vector, slot, object, nullptr, FAST_ELEMENTS,
INITIALIZING_STORE);
return graph()->GetConstant0();
}
@@ -823,8 +823,8 @@ HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key,
HValue* value) {
HValue* result = NULL;
HInstruction* backing_store =
- Add<HLoadKeyed>(elements, graph()->GetConstant1(), nullptr, FAST_ELEMENTS,
- ALLOW_RETURN_HOLE);
+ Add<HLoadKeyed>(elements, graph()->GetConstant1(), nullptr, nullptr,
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
HValue* backing_store_length = Add<HLoadNamedField>(
backing_store, nullptr, HObjectAccess::ForFixedArrayLength());
@@ -834,10 +834,10 @@ HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key,
in_unmapped_range.Then();
{
if (value == NULL) {
- result = Add<HLoadKeyed>(backing_store, key, nullptr, FAST_HOLEY_ELEMENTS,
- NEVER_RETURN_HOLE);
+ result = Add<HLoadKeyed>(backing_store, key, nullptr, nullptr,
+ FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
} else {
- Add<HStoreKeyed>(backing_store, key, value, FAST_HOLEY_ELEMENTS);
+ Add<HStoreKeyed>(backing_store, key, value, nullptr, FAST_HOLEY_ELEMENTS);
}
}
in_unmapped_range.ElseDeopt(Deoptimizer::kOutsideOfRange);
@@ -894,8 +894,9 @@ HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
in_range.Then();
{
HValue* index = AddUncasted<HAdd>(key, constant_two);
- HInstruction* mapped_index = Add<HLoadKeyed>(
- elements, index, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+ HInstruction* mapped_index =
+ Add<HLoadKeyed>(elements, index, nullptr, nullptr, FAST_HOLEY_ELEMENTS,
+ ALLOW_RETURN_HOLE);
IfBuilder is_valid(this);
is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
@@ -906,15 +907,17 @@ HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
// mapped_index is not the hole that it is indeed, a smi. An unnecessary
// smi check is being emitted.
HValue* the_context = Add<HLoadKeyed>(elements, graph()->GetConstant0(),
- nullptr, FAST_ELEMENTS);
+ nullptr, nullptr, FAST_ELEMENTS);
STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
if (is_load) {
- HValue* result = Add<HLoadKeyed>(the_context, mapped_index, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ HValue* result =
+ Add<HLoadKeyed>(the_context, mapped_index, nullptr, nullptr,
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
environment()->Push(result);
} else {
DCHECK(value != NULL);
- Add<HStoreKeyed>(the_context, mapped_index, value, FAST_ELEMENTS);
+ Add<HStoreKeyed>(the_context, mapped_index, value, nullptr,
+ FAST_ELEMENTS);
environment()->Push(value);
}
}
@@ -1139,6 +1142,34 @@ Handle<Code> AllocateHeapNumberStub::GenerateCode() {
}
+template <>
+HValue* CodeStubGraphBuilder<AllocateMutableHeapNumberStub>::BuildCodeStub() {
+ HValue* result =
+ Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapObject(),
+ NOT_TENURED, MUTABLE_HEAP_NUMBER_TYPE);
+ AddStoreMapConstant(result, isolate()->factory()->mutable_heap_number_map());
+ return result;
+}
+
+
+Handle<Code> AllocateMutableHeapNumberStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<AllocateInNewSpaceStub>::BuildCodeStub() {
+ HValue* result = Add<HAllocate>(GetParameter(0), HType::Tagged(), NOT_TENURED,
+ JS_OBJECT_TYPE);
+ return result;
+}
+
+
+Handle<Code> AllocateInNewSpaceStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
ElementsKind kind,
AllocationSiteOverrideMode override_mode,
@@ -1214,7 +1245,7 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
// trigger it.
HValue* length = GetArgumentsLength();
HConstant* max_alloc_length =
- Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ Add<HConstant>(JSArray::kInitialMaxFastElementArray);
HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
// We need to fill with the hole if it's a smi array in the multi-argument
@@ -1245,7 +1276,7 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
HInstruction* argument = Add<HAccessArgumentsAt>(
argument_elements, checked_length, key);
- Add<HStoreKeyed>(elements, key, argument, kind);
+ Add<HStoreKeyed>(elements, key, argument, nullptr, kind);
builder.EndBody();
return new_object;
}
@@ -1342,12 +1373,11 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
if_nil.Then();
if (continuation.IsFalseReachable()) {
if_nil.Else();
- if_nil.Return(graph()->GetConstant0());
+ if_nil.Return(graph()->GetConstantFalse());
}
if_nil.End();
- return continuation.IsTrueReachable()
- ? graph()->GetConstant1()
- : graph()->GetConstantUndefined();
+ return continuation.IsTrueReachable() ? graph()->GetConstantTrue()
+ : graph()->GetConstantUndefined();
}
@@ -1877,8 +1907,8 @@ HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
HValue* field_offset_value = Add<HConstant>(field_offset);
field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
}
- HInstruction* field_entry =
- Add<HLoadKeyed>(optimized_map, field_slot, nullptr, FAST_ELEMENTS);
+ HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
+ nullptr, nullptr, FAST_ELEMENTS);
return field_entry;
}
@@ -2298,13 +2328,13 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
key_index->ClearFlag(HValue::kCanOverflow);
HValue* map_to_check =
- Add<HLoadKeyed>(cache_keys, map_index, nullptr, FAST_ELEMENTS,
- NEVER_RETURN_HOLE, 0);
+ Add<HLoadKeyed>(cache_keys, map_index, nullptr, nullptr,
+ FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
lookup_if->If<HCompareObjectEqAndBranch>(map_to_check, map);
lookup_if->And();
HValue* key_to_check =
- Add<HLoadKeyed>(cache_keys, key_index, nullptr, FAST_ELEMENTS,
- NEVER_RETURN_HOLE, 0);
+ Add<HLoadKeyed>(cache_keys, key_index, nullptr, nullptr,
+ FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
lookup_if->If<HCompareObjectEqAndBranch>(key_to_check, key);
lookup_if->Then();
{
@@ -2315,7 +2345,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
HValue* index = AddUncasted<HAdd>(hash, Add<HConstant>(probe));
index->ClearFlag(HValue::kCanOverflow);
HValue* property_index =
- Add<HLoadKeyed>(cache_field_offsets, index, nullptr,
+ Add<HLoadKeyed>(cache_field_offsets, index, nullptr, cache_keys,
INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
Push(property_index);
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 5c8c763a3a..61df12781b 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -744,6 +744,18 @@ void AllocateHeapNumberStub::InitializeDescriptor(
}
+void AllocateMutableHeapNumberStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize();
+}
+
+
+void AllocateInNewSpaceStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize();
+}
+
+
void CompareNilICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_CompareNilIC_Miss));
descriptor->SetMissHandler(ExternalReference(
@@ -881,11 +893,6 @@ void ArgumentsAccessStub::PrintName(std::ostream& os) const { // NOLINT
}
-void CallFunctionStub::PrintName(std::ostream& os) const { // NOLINT
- os << "CallFunctionStub_Args" << argc();
-}
-
-
void CallConstructStub::PrintName(std::ostream& os) const { // NOLINT
os << "CallConstructStub";
if (RecordCallTarget()) os << "_Recording";
@@ -1049,7 +1056,7 @@ InternalArrayConstructorStub::InternalArrayConstructorStub(
Representation RepresentationFromType(Type* type) {
- if (type->Is(Type::UntaggedSigned()) || type->Is(Type::UntaggedUnsigned())) {
+ if (type->Is(Type::UntaggedIntegral())) {
return Representation::Integer32();
}
@@ -1064,5 +1071,6 @@ Representation RepresentationFromType(Type* type) {
DCHECK(!type->Is(Type::Untagged()));
return Representation::Tagged();
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 163fdd8808..d69e9263e1 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -27,7 +27,6 @@ namespace internal {
V(CallApiAccessor) \
V(CallApiGetter) \
V(CallConstruct) \
- V(CallFunction) \
V(CallIC) \
V(CEntry) \
V(CompareIC) \
@@ -52,6 +51,7 @@ namespace internal {
V(StubFailureTrampoline) \
V(SubString) \
V(ToNumber) \
+ V(ToLength) \
V(ToString) \
V(ToObject) \
V(VectorStoreICTrampoline) \
@@ -60,6 +60,8 @@ namespace internal {
V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \
V(AllocateHeapNumber) \
+ V(AllocateMutableHeapNumber) \
+ V(AllocateInNewSpace) \
V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
@@ -539,7 +541,7 @@ class TurboFanCodeStub : public CodeStub {
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GenerateCode() override;
- virtual int GetStackParameterCount() const override {
+ int GetStackParameterCount() const override {
return GetCallInterfaceDescriptor().GetStackParameterCount();
}
@@ -597,7 +599,8 @@ class RuntimeCallHelper {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#if V8_TARGET_ARCH_IA32
#include "src/ia32/code-stubs-ia32.h"
@@ -629,9 +632,9 @@ class StubRuntimeCallHelper : public RuntimeCallHelper {
public:
StubRuntimeCallHelper() {}
- virtual void BeforeCall(MacroAssembler* masm) const;
+ void BeforeCall(MacroAssembler* masm) const override;
- virtual void AfterCall(MacroAssembler* masm) const;
+ void AfterCall(MacroAssembler* masm) const override;
};
@@ -640,9 +643,9 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
public:
NopRuntimeCallHelper() {}
- virtual void BeforeCall(MacroAssembler* masm) const {}
+ void BeforeCall(MacroAssembler* masm) const override {}
- virtual void AfterCall(MacroAssembler* masm) const {}
+ void AfterCall(MacroAssembler* masm) const override {}
};
@@ -972,18 +975,14 @@ class CallICStub: public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
- InlineCacheState GetICState() const override { return DEFAULT; }
+ InlineCacheState GetICState() const override { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
protected:
- bool CallAsMethod() const {
- return state().call_type() == CallICState::METHOD;
- }
-
- int arg_count() const { return state().arg_count(); }
+ int arg_count() const { return state().argc(); }
CallICState state() const {
return CallICState(static_cast<ExtraICState>(minor_key_));
@@ -1243,8 +1242,10 @@ class StoreTransitionHelper {
}
static Register MapRegister() {
- return FLAG_vector_stores ? VectorStoreTransitionDescriptor::MapRegister()
- : StoreTransitionDescriptor::MapRegister();
+ if (FLAG_vector_stores) {
+ return VectorStoreTransitionDescriptor::MapRegister();
+ }
+ return StoreTransitionDescriptor::MapRegister();
}
static int ReceiverIndex() {
@@ -1255,26 +1256,25 @@ class StoreTransitionHelper {
static int ValueIndex() { return StoreTransitionDescriptor::kValueIndex; }
- static int SlotIndex() {
- DCHECK(FLAG_vector_stores);
- return VectorStoreTransitionDescriptor::kSlotIndex;
+ static int MapIndex() {
+ DCHECK(static_cast<int>(VectorStoreTransitionDescriptor::kMapIndex) ==
+ static_cast<int>(StoreTransitionDescriptor::kMapIndex));
+ return StoreTransitionDescriptor::kMapIndex;
}
static int VectorIndex() {
DCHECK(FLAG_vector_stores);
+ if (HasVirtualSlotArg()) {
+ return VectorStoreTransitionDescriptor::kVirtualSlotVectorIndex;
+ }
return VectorStoreTransitionDescriptor::kVectorIndex;
}
- static int MapIndex() {
- if (FLAG_vector_stores) {
- return VectorStoreTransitionDescriptor::kMapIndex;
- }
- return StoreTransitionDescriptor::kMapIndex;
+ // Some platforms don't have a slot arg.
+ static bool HasVirtualSlotArg() {
+ if (!FLAG_vector_stores) return false;
+ return SlotRegister().is(no_reg);
}
-
- // Some platforms push Slot, Vector, Map on the stack instead of in
- // registers.
- static bool UsesStackArgs() { return MapRegister().is(no_reg); }
};
@@ -1353,7 +1353,7 @@ class StoreGlobalStub : public HandlerStub {
return isolate->factory()->termination_exception();
}
- Handle<Code> GetCodeCopyFromTemplate(Handle<GlobalObject> global,
+ Handle<Code> GetCodeCopyFromTemplate(Handle<JSGlobalObject> global,
Handle<PropertyCell> cell) {
Code::FindAndReplacePattern pattern;
if (check_global()) {
@@ -1823,9 +1823,11 @@ std::ostream& operator<<(std::ostream& os, const CompareNilICStub::State& s);
class CEntryStub : public PlatformCodeStub {
public:
CEntryStub(Isolate* isolate, int result_size,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs)
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ ArgvMode argv_mode = kArgvOnStack)
: PlatformCodeStub(isolate) {
- minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs);
+ minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs) |
+ ArgvMode::encode(argv_mode == kArgvInRegister);
DCHECK(result_size == 1 || result_size == 2);
#if _WIN64 || V8_TARGET_ARCH_PPC
minor_key_ = ResultSizeBits::update(minor_key_, result_size);
@@ -1840,6 +1842,7 @@ class CEntryStub : public PlatformCodeStub {
private:
bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
+ bool argv_in_register() const { return ArgvMode::decode(minor_key_); }
#if _WIN64 || V8_TARGET_ARCH_PPC
int result_size() const { return ResultSizeBits::decode(minor_key_); }
#endif // _WIN64
@@ -1847,7 +1850,8 @@ class CEntryStub : public PlatformCodeStub {
bool NeedsImmovableCode() override;
class SaveDoublesBits : public BitField<bool, 0, 1> {};
- class ResultSizeBits : public BitField<int, 1, 3> {};
+ class ArgvMode : public BitField<bool, 1, 1> {};
+ class ResultSizeBits : public BitField<int, 2, 3> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(CEntry, PlatformCodeStub);
@@ -1954,38 +1958,6 @@ class RegExpConstructResultStub final : public HydrogenCodeStub {
};
-// TODO(bmeurer): Deprecate the CallFunctionStub in favor of the more general
-// Invoke family of builtins.
-class CallFunctionStub: public PlatformCodeStub {
- public:
- CallFunctionStub(Isolate* isolate, int argc, CallFunctionFlags flags)
- : PlatformCodeStub(isolate) {
- DCHECK(argc >= 0 && argc <= Code::kMaxArguments);
- minor_key_ = ArgcBits::encode(argc) | FlagBits::encode(flags);
- }
-
- private:
- int argc() const { return ArgcBits::decode(minor_key_); }
- int flags() const { return FlagBits::decode(minor_key_); }
-
- bool CallAsMethod() const {
- return flags() == CALL_AS_METHOD || flags() == WRAP_AND_CALL;
- }
-
- bool NeedsChecks() const { return flags() != WRAP_AND_CALL; }
-
- void PrintName(std::ostream& os) const override; // NOLINT
-
- // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
- class FlagBits : public BitField<CallFunctionFlags, 0, 2> {};
- class ArgcBits : public BitField<unsigned, 2, Code::kArgumentsBits> {};
- STATIC_ASSERT(Code::kArgumentsBits + 2 <= kStubMinorKeyBits);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunction);
- DEFINE_PLATFORM_CODE_STUB(CallFunction, PlatformCodeStub);
-};
-
-
class CallConstructStub: public PlatformCodeStub {
public:
CallConstructStub(Isolate* isolate, CallConstructorFlags flags)
@@ -2250,7 +2222,7 @@ class LoadICTrampolineStub : public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
@@ -2286,7 +2258,7 @@ class VectorStoreICTrampolineStub : public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
@@ -2324,7 +2296,7 @@ class CallICTrampolineStub : public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
@@ -2350,7 +2322,7 @@ class LoadICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2373,7 +2345,7 @@ class KeyedLoadICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2396,7 +2368,7 @@ class VectorStoreICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const final { return Code::STORE_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2419,8 +2391,8 @@ class VectorKeyedStoreICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const final { return Code::KEYED_STORE_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
- virtual ExtraICState GetExtraICState() const final {
+ InlineCacheState GetICState() const final { return GENERIC; }
+ ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2650,6 +2622,28 @@ class AllocateHeapNumberStub final : public HydrogenCodeStub {
};
+class AllocateMutableHeapNumberStub final : public HydrogenCodeStub {
+ public:
+ explicit AllocateMutableHeapNumberStub(Isolate* isolate)
+ : HydrogenCodeStub(isolate) {}
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
+ DEFINE_HYDROGEN_CODE_STUB(AllocateMutableHeapNumber, HydrogenCodeStub);
+};
+
+
+class AllocateInNewSpaceStub final : public HydrogenCodeStub {
+ public:
+ explicit AllocateInNewSpaceStub(Isolate* isolate)
+ : HydrogenCodeStub(isolate) {}
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateInNewSpace);
+ DEFINE_HYDROGEN_CODE_STUB(AllocateInNewSpace, HydrogenCodeStub);
+};
+
+
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
ArrayConstructorStubBase(Isolate* isolate,
@@ -3058,6 +3052,15 @@ class ToNumberStub final : public PlatformCodeStub {
};
+class ToLengthStub final : public PlatformCodeStub {
+ public:
+ explicit ToLengthStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ToLength);
+ DEFINE_PLATFORM_CODE_STUB(ToLength, PlatformCodeStub);
+};
+
+
class ToStringStub final : public PlatformCodeStub {
public:
explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index fd039d0f8a..1e806d2ae5 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -122,19 +122,19 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
if (FLAG_trace_codegen || print_source || print_ast) {
base::SmartArrayPointer<char> name = info->GetDebugName();
- PrintF("[generating %s code for %s function: %s]", kind, ftype, name.get());
+ PrintF("[generating %s code for %s function: %s]\n", kind, ftype,
+ name.get());
}
#ifdef DEBUG
if (info->parse_info() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(info->isolate(), info->zone())
- .PrintProgram(info->literal()));
+ PrettyPrinter(info->isolate()).PrintProgram(info->literal()));
}
if (info->parse_info() && print_ast) {
- PrintF("--- AST ---\n%s\n", AstPrinter(info->isolate(), info->zone())
- .PrintProgram(info->literal()));
+ PrintF("--- AST ---\n%s\n",
+ AstPrinter(info->isolate()).PrintProgram(info->literal()));
}
#endif // DEBUG
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 04f130999e..7019d3d106 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -170,6 +170,7 @@ class CodeAgingHelper {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CODEGEN_H_
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 2d7609ec18..2295f4c685 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -236,6 +236,7 @@ class CompilationCache {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILATION_CACHE_H_
diff --git a/deps/v8/src/compilation-dependencies.cc b/deps/v8/src/compilation-dependencies.cc
index 643b88ab0e..c9c194f19f 100644
--- a/deps/v8/src/compilation-dependencies.cc
+++ b/deps/v8/src/compilation-dependencies.cc
@@ -106,6 +106,24 @@ void CompilationDependencies::Rollback() {
}
+void CompilationDependencies::AssumeMapNotDeprecated(Handle<Map> map) {
+ DCHECK(!map->is_deprecated());
+ // Do nothing if the map cannot be deprecated.
+ if (map->CanBeDeprecated()) {
+ Insert(DependentCode::kTransitionGroup, map);
+ }
+}
+
+
+void CompilationDependencies::AssumeMapStable(Handle<Map> map) {
+ DCHECK(map->is_stable());
+ // Do nothing if the map cannot transition.
+ if (map->CanTransition()) {
+ Insert(DependentCode::kPrototypeCheckGroup, map);
+ }
+}
+
+
void CompilationDependencies::AssumeTransitionStable(
Handle<AllocationSite> site) {
// Do nothing if the object doesn't have any useful element transitions left.
diff --git a/deps/v8/src/compilation-dependencies.h b/deps/v8/src/compilation-dependencies.h
index c14220880f..ca09ef5e11 100644
--- a/deps/v8/src/compilation-dependencies.h
+++ b/deps/v8/src/compilation-dependencies.h
@@ -31,6 +31,8 @@ class CompilationDependencies {
void AssumeFieldType(Handle<Map> map) {
Insert(DependentCode::kFieldTypeGroup, map);
}
+ void AssumeMapStable(Handle<Map> map);
+ void AssumeMapNotDeprecated(Handle<Map> map);
void AssumePropertyCell(Handle<PropertyCell> cell) {
Insert(DependentCode::kPropertyCellChangedGroup, cell);
}
@@ -61,7 +63,7 @@ class CompilationDependencies {
DependentCode* Get(Handle<Object> object);
void Set(Handle<Object> object, Handle<DependentCode> dep);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DEPENDENCIES_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 20aa558c3d..d55bf33bab 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -11,15 +11,16 @@
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compiler/pipeline.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/typing.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/gdb-jit.h"
-#include "src/hydrogen.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
-#include "src/lithium.h"
#include "src/log-inl.h"
#include "src/messages.h"
#include "src/parser.h"
@@ -31,7 +32,6 @@
#include "src/scopeinfo.h"
#include "src/scopes.h"
#include "src/snapshot/serialize.h"
-#include "src/typing.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -172,9 +172,6 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
dependencies_(isolate, zone),
bailout_reason_(kNoReason),
prologue_offset_(Code::kPrologueOffsetNotSet),
- no_frame_ranges_(isolate->cpu_profiler()->is_profiling()
- ? new List<OffsetRange>(2)
- : nullptr),
track_positions_(FLAG_hydrogen_track_positions ||
isolate->cpu_profiler()->is_profiling()),
opt_count_(has_shared_info() ? shared_info()->opt_count() : 0),
@@ -200,7 +197,6 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
CompilationInfo::~CompilationInfo() {
DisableFutureOptimization();
delete deferred_handles_;
- delete no_frame_ranges_;
#ifdef DEBUG
// Check that no dependent maps have been added or added dependent maps have
// been rolled back or committed.
@@ -249,13 +245,15 @@ bool CompilationInfo::ShouldSelfOptimize() {
void CompilationInfo::EnsureFeedbackVector() {
if (feedback_vector_.is_null()) {
- feedback_vector_ = isolate()->factory()->NewTypeFeedbackVector(
- literal()->feedback_vector_spec());
+ Handle<TypeFeedbackMetadata> feedback_metadata =
+ TypeFeedbackMetadata::New(isolate(), literal()->feedback_vector_spec());
+ feedback_vector_ = TypeFeedbackVector::New(isolate(), feedback_metadata);
}
// It's very important that recompiles do not alter the structure of the
// type feedback vector.
- CHECK(!feedback_vector_->SpecDiffersFrom(literal()->feedback_vector_spec()));
+ CHECK(!feedback_vector_->metadata()->SpecDiffersFrom(
+ literal()->feedback_vector_spec()));
}
@@ -330,9 +328,8 @@ base::SmartArrayPointer<char> CompilationInfo::GetDebugName() const {
}
-bool CompilationInfo::MustReplaceUndefinedReceiverWithGlobalProxy() {
- return is_sloppy(language_mode()) && !is_native() &&
- scope()->has_this_declaration() && scope()->receiver()->is_used();
+bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
+ return is_sloppy(language_mode()) && !is_native();
}
@@ -441,9 +438,10 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
if (info()->shared_info()->asm_function()) {
if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
info()->MarkAsFunctionContextSpecializing();
- } else if (FLAG_turbo_type_feedback) {
- info()->MarkAsTypeFeedbackEnabled();
- info()->EnsureFeedbackVector();
+ } else if (info()->has_global_object() &&
+ FLAG_native_context_specialization) {
+ info()->MarkAsNativeContextSpecializing();
+ info()->MarkAsTypingEnabled();
}
if (!info()->shared_info()->asm_function() ||
FLAG_turbo_asm_deoptimization) {
@@ -705,15 +703,37 @@ static bool CompileUnoptimizedCode(CompilationInfo* info) {
}
+// TODO(rmcilroy): Remove this temporary work-around when ignition supports
+// catch and eval.
+static bool IgnitionShouldFallbackToFullCodeGen(Scope* scope) {
+ if (!FLAG_ignition_fallback_on_eval_and_catch) return false;
+
+ if (scope->is_eval_scope() || scope->is_catch_scope() ||
+ scope->calls_eval()) {
+ return true;
+ }
+ for (auto inner_scope : *scope->inner_scopes()) {
+ if (IgnitionShouldFallbackToFullCodeGen(inner_scope)) return true;
+ }
+ return false;
+}
+
+
static bool GenerateBytecode(CompilationInfo* info) {
DCHECK(AllowCompilation::IsAllowed(info->isolate()));
- if (!Compiler::Analyze(info->parse_info()) ||
- !interpreter::Interpreter::MakeBytecode(info)) {
+ bool success = false;
+ if (Compiler::Analyze(info->parse_info())) {
+ if (IgnitionShouldFallbackToFullCodeGen(info->scope())) {
+ success = FullCodeGenerator::MakeCode(info);
+ } else {
+ success = interpreter::Interpreter::MakeBytecode(info);
+ }
+ }
+ if (!success) {
Isolate* isolate = info->isolate();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return false;
}
- return true;
+ return success;
}
@@ -730,7 +750,8 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
MaybeDisableOptimization(shared, lit->dont_optimize_reason());
- if (FLAG_ignition && info->closure()->PassesFilter(FLAG_ignition_filter)) {
+ if (FLAG_ignition && !shared->HasBuiltinFunctionId() &&
+ info->closure()->PassesFilter(FLAG_ignition_filter)) {
// Compile bytecode for the interpreter.
if (!GenerateBytecode(info)) return MaybeHandle<Code>();
} else {
@@ -750,6 +771,10 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
// Update the code and feedback vector for the shared function info.
shared->ReplaceCode(*info->code());
shared->set_feedback_vector(*info->feedback_vector());
+ if (info->has_bytecode_array()) {
+ DCHECK(shared->function_data()->IsUndefined());
+ shared->set_function_data(*info->bytecode_array());
+ }
return info->code();
}
@@ -776,7 +801,8 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
- // Context specialization folds-in the context, so no sharing can occur.
+ // Function context specialization folds-in the function context,
+ // so no sharing can occur.
if (info->is_function_context_specializing()) return;
// Frame specialization implies function context specialization.
DCHECK(!info->is_frame_specializing());
@@ -786,19 +812,18 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
if (function->shared()->bound()) return;
// Cache optimized context-specific code.
- if (FLAG_cache_optimized_code) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<LiteralsArray> literals(function->literals());
- Handle<Context> native_context(function->context()->native_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
- literals, info->osr_ast_id());
- }
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<LiteralsArray> literals(function->literals());
+ Handle<Context> native_context(function->context()->native_context());
+ SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
+ literals, info->osr_ast_id());
- // Do not cache context-independent code compiled for OSR.
+ // Do not cache (native) context-independent code compiled for OSR.
if (code->is_turbofanned() && info->is_osr()) return;
- // Cache optimized context-independent code.
- if (FLAG_turbo_cache_shared_code && code->is_turbofanned()) {
+ // Cache optimized (native) context-independent code.
+ if (FLAG_turbo_cache_shared_code && code->is_turbofanned() &&
+ !info->is_native_context_specializing()) {
DCHECK(!info->is_function_context_specializing());
DCHECK(info->osr_ast_id().IsNone());
Handle<SharedFunctionInfo> shared(function->shared());
@@ -841,9 +866,12 @@ bool Compiler::ParseAndAnalyze(ParseInfo* info) {
static bool GetOptimizedCodeNow(CompilationInfo* info) {
+ Isolate* isolate = info->isolate();
+ CanonicalHandleScope canonical(isolate);
+
if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
- TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+ TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
OptimizedCompileJob job(info);
if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED ||
@@ -858,7 +886,7 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) {
}
// Success!
- DCHECK(!info->isolate()->has_pending_exception());
+ DCHECK(!isolate->has_pending_exception());
InsertCodeIntoOptimizedCodeMap(info);
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
info->shared_info());
@@ -868,6 +896,8 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) {
static bool GetOptimizedCodeLater(CompilationInfo* info) {
Isolate* isolate = info->isolate();
+ CanonicalHandleScope canonical(isolate);
+
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
@@ -1146,6 +1176,13 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
}
+// Checks whether top level functions should be passed by {raw_filter}.
+static bool TopLevelFunctionPassesFilter(const char* raw_filter) {
+ Vector<const char> filter = CStrVector(raw_filter);
+ return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
+}
+
+
static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
@@ -1209,8 +1246,14 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
HistogramTimerScope timer(rate);
// Compile the code.
- if (!CompileUnoptimizedCode(info)) {
- return Handle<SharedFunctionInfo>::null();
+ if (FLAG_ignition && TopLevelFunctionPassesFilter(FLAG_ignition_filter)) {
+ if (!GenerateBytecode(info)) {
+ return Handle<SharedFunctionInfo>::null();
+ }
+ } else {
+ if (!CompileUnoptimizedCode(info)) {
+ return Handle<SharedFunctionInfo>::null();
+ }
}
// Allocate function.
@@ -1220,6 +1263,10 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
info->code(),
ScopeInfo::Create(info->isolate(), info->zone(), info->scope()),
info->feedback_vector());
+ if (info->has_bytecode_array()) {
+ DCHECK(result->function_data()->IsUndefined());
+ result->set_function_data(*info->bytecode_array());
+ }
DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
SharedFunctionInfo::InitFromFunctionLiteral(result, lit);
@@ -1230,9 +1277,10 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
result->set_allows_lazy_compilation_without_context(false);
}
- Handle<String> script_name = script->name()->IsString()
- ? Handle<String>(String::cast(script->name()))
- : isolate->factory()->empty_string();
+ Handle<String> script_name =
+ script->name()->IsString()
+ ? Handle<String>(String::cast(script->name()))
+ : isolate->factory()->empty_string();
Logger::LogEventsAndTags log_tag = info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
@@ -1534,13 +1582,6 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
!LiveEditFunctionTracker::IsActive(isolate) &&
(!info.is_debug() || allow_lazy_without_ctx);
- if (outer_info->parse_info()->is_toplevel() && outer_info->will_serialize()) {
- // Make sure that if the toplevel code (possibly to be serialized),
- // the inner function must be allowed to be compiled lazily.
- // This is necessary to serialize toplevel code without inner functions.
- DCHECK(allow_lazy);
- }
-
bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
// Generate code
@@ -1763,7 +1804,7 @@ bool CompilationPhase::ShouldProduceTraceOutput() const {
#if DEBUG
void CompilationInfo::PrintAstForTesting() {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(isolate(), zone()).PrintProgram(literal()));
+ PrettyPrinter(isolate()).PrintProgram(literal()));
}
#endif
} // namespace internal
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 45cf7b5183..d831ac5fd8 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -21,12 +21,6 @@ class JavaScriptFrame;
class ParseInfo;
class ScriptData;
-struct OffsetRange {
- OffsetRange(int from, int to) : from(from), to(to) {}
- int from;
- int to;
-};
-
// This class encapsulates encoding and decoding of sources positions from
// which hydrogen values originated.
@@ -123,14 +117,14 @@ class CompilationInfo {
kSerializing = 1 << 7,
kFunctionContextSpecializing = 1 << 8,
kFrameSpecializing = 1 << 9,
- kInliningEnabled = 1 << 10,
- kTypingEnabled = 1 << 11,
- kDisableFutureOptimization = 1 << 12,
- kSplittingEnabled = 1 << 13,
- kTypeFeedbackEnabled = 1 << 14,
- kDeoptimizationEnabled = 1 << 15,
- kSourcePositionsEnabled = 1 << 16,
- kFirstCompile = 1 << 17,
+ kNativeContextSpecializing = 1 << 10,
+ kInliningEnabled = 1 << 11,
+ kTypingEnabled = 1 << 12,
+ kDisableFutureOptimization = 1 << 13,
+ kSplittingEnabled = 1 << 14,
+ kDeoptimizationEnabled = 1 << 16,
+ kSourcePositionsEnabled = 1 << 17,
+ kFirstCompile = 1 << 18,
};
explicit CompilationInfo(ParseInfo* parse_info);
@@ -179,6 +173,9 @@ class CompilationInfo {
parameter_count_ = parameter_count;
}
+ bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
bool is_tracking_positions() const { return track_positions_; }
bool is_calling() const {
@@ -233,10 +230,12 @@ class CompilationInfo {
bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
- void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
+ void MarkAsNativeContextSpecializing() {
+ SetFlag(kNativeContextSpecializing);
+ }
- bool is_type_feedback_enabled() const {
- return GetFlag(kTypeFeedbackEnabled);
+ bool is_native_context_specializing() const {
+ return GetFlag(kNativeContextSpecializing);
}
void MarkAsDeoptimizationEnabled() { SetFlag(kDeoptimizationEnabled); }
@@ -280,6 +279,10 @@ class CompilationInfo {
}
void SetCode(Handle<Code> code) { code_ = code; }
+ void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
+ bytecode_array_ = bytecode_array;
+ }
+
bool ShouldTrapOnDeopt() const {
return (FLAG_trap_on_deopt && IsOptimizing()) ||
(FLAG_trap_on_stub_deopt && IsStub());
@@ -290,7 +293,7 @@ class CompilationInfo {
(closure()->context()->global_object() != NULL);
}
- GlobalObject* global_object() const {
+ JSGlobalObject* global_object() const {
return has_global_object() ? closure()->context()->global_object() : NULL;
}
@@ -323,7 +326,7 @@ class CompilationInfo {
}
bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
- bool MustReplaceUndefinedReceiverWithGlobalProxy();
+ bool ExpectsJSReceiverAsReceiver();
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
@@ -361,19 +364,6 @@ class CompilationInfo {
prologue_offset_ = prologue_offset;
}
- // Adds offset range [from, to) where fp register does not point
- // to the current frame base. Used in CPU profiler to detect stack
- // samples where top frame is not set up.
- inline void AddNoFrameRange(int from, int to) {
- if (no_frame_ranges_) no_frame_ranges_->Add(OffsetRange(from, to));
- }
-
- List<OffsetRange>* ReleaseNoFrameRanges() {
- List<OffsetRange>* result = no_frame_ranges_;
- no_frame_ranges_ = NULL;
- return result;
- }
-
int start_position_for(uint32_t inlining_id) {
return inlined_function_infos_.at(inlining_id).start_position;
}
@@ -407,12 +397,27 @@ class CompilationInfo {
bool has_simple_parameters();
- typedef std::vector<Handle<SharedFunctionInfo>> InlinedFunctionList;
+ struct InlinedFunctionHolder {
+ Handle<SharedFunctionInfo> shared_info;
+
+ // Root that holds the unoptimized code of the inlined function alive
+ // (and out of reach of code flushing) until we finish compilation.
+ // Do not remove.
+ Handle<Code> inlined_code_object_root;
+
+ explicit InlinedFunctionHolder(
+ Handle<SharedFunctionInfo> inlined_shared_info)
+ : shared_info(inlined_shared_info),
+ inlined_code_object_root(inlined_shared_info->code()) {}
+ };
+
+ typedef std::vector<InlinedFunctionHolder> InlinedFunctionList;
InlinedFunctionList const& inlined_functions() const {
return inlined_functions_;
}
+
void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function) {
- inlined_functions_.push_back(inlined_function);
+ inlined_functions_.push_back(InlinedFunctionHolder(inlined_function));
}
base::SmartArrayPointer<char> GetDebugName() const;
@@ -478,6 +483,11 @@ class CompilationInfo {
// data. Keep track which code we patched.
Handle<Code> unoptimized_code_;
+ // Holds the bytecode array generated by the interpreter.
+ // TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
+ // refactored to avoid us needing to carry the BytcodeArray around.
+ Handle<BytecodeArray> bytecode_array_;
+
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
Zone* zone_;
@@ -491,7 +501,6 @@ class CompilationInfo {
int prologue_offset_;
- List<OffsetRange>* no_frame_ranges_;
std::vector<InlinedFunctionInfo> inlined_function_infos_;
bool track_positions_;
@@ -716,6 +725,7 @@ class CompilationPhase BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_H_
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 7f7a39bb9e..1257e232f7 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -3,4 +3,5 @@ set noparent
bmeurer@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
+mtrofin@chromium.org
titzer@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 8a03ff77f3..ac0be79225 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -3,6 +3,11 @@
// found in the LICENSE file.
#include "src/compiler/access-builder.h"
+
+#include "src/contexts.h"
+#include "src/frames.h"
+#include "src/heap/heap.h"
+#include "src/type-cache.h"
#include "src/types-inl.h"
namespace v8 {
@@ -18,9 +23,18 @@ FieldAccess AccessBuilder::ForMap() {
// static
+FieldAccess AccessBuilder::ForHeapNumberValue() {
+ FieldAccess access = {kTaggedBase, HeapNumber::kValueOffset,
+ MaybeHandle<Name>(), TypeCache().Get().kFloat64,
+ kMachFloat64};
+ return access;
+}
+
+
+// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
- MaybeHandle<Name>(), Type::Any(), kMachAnyTagged};
+ MaybeHandle<Name>(), Type::Internal(), kMachAnyTagged};
return access;
}
@@ -67,13 +81,10 @@ FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
// static
-FieldAccess AccessBuilder::ForFixedArrayLength(Zone* zone) {
- STATIC_ASSERT(FixedArray::kMaxLength <= 1 << 30);
- FieldAccess access = {
- kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
- Type::Intersect(Type::Range(0, FixedArray::kMaxLength, zone),
- Type::TaggedSigned(), zone),
- kMachAnyTagged};
+FieldAccess AccessBuilder::ForFixedArrayLength() {
+ FieldAccess access = {kTaggedBase, FixedArray::kLengthOffset,
+ MaybeHandle<Name>(),
+ TypeCache::Get().kFixedArrayLengthType, kMachAnyTagged};
return access;
}
@@ -98,7 +109,7 @@ FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
// static
FieldAccess AccessBuilder::ForMapBitField3() {
FieldAccess access = {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
- Type::UntaggedUnsigned32(), kMachUint32};
+ TypeCache::Get().kInt32, kMachInt32};
return access;
}
@@ -114,18 +125,39 @@ FieldAccess AccessBuilder::ForMapDescriptors() {
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
- Type::UntaggedUnsigned8(), kMachUint8};
+ TypeCache::Get().kUint8, kMachUint8};
return access;
}
// static
-FieldAccess AccessBuilder::ForStringLength(Zone* zone) {
- FieldAccess access = {
- kTaggedBase, String::kLengthOffset, Handle<Name>(),
- Type::Intersect(Type::Range(0, String::kMaxLength, zone),
- Type::TaggedSigned(), zone),
- kMachAnyTagged};
+FieldAccess AccessBuilder::ForMapPrototype() {
+ FieldAccess access = {kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
+ Type::TaggedPointer(), kMachAnyTagged};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForStringLength() {
+ FieldAccess access = {kTaggedBase, String::kLengthOffset, Handle<Name>(),
+ TypeCache::Get().kStringLengthType, kMachAnyTagged};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
+ FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
+ Handle<Name>(), Type::Receiver(), kMachAnyTagged};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
+ FieldAccess access = {kTaggedBase, JSGlobalObject::kNativeContextOffset,
+ Handle<Name>(), Type::Internal(), kMachAnyTagged};
return access;
}
@@ -139,6 +171,35 @@ FieldAccess AccessBuilder::ForValue() {
// static
+FieldAccess AccessBuilder::ForArgumentsLength() {
+ int offset =
+ JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ kMachAnyTagged};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForArgumentsCallee() {
+ int offset =
+ JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ kMachAnyTagged};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
+ int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ kMachAnyTagged};
+ return access;
+}
+
+
+// static
FieldAccess AccessBuilder::ForContextSlot(size_t index) {
int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
DCHECK_EQ(offset,
@@ -151,8 +212,14 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
// static
FieldAccess AccessBuilder::ForPropertyCellValue() {
+ return ForPropertyCellValue(Type::Tagged());
+}
+
+
+// static
+FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
- Type::Any(), kMachAnyTagged};
+ type, kMachAnyTagged};
return access;
}
@@ -249,8 +316,8 @@ ElementAccess AccessBuilder::ForSeqStringChar(String::Encoding encoding) {
// static
FieldAccess AccessBuilder::ForStatsCounter() {
- FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(), Type::Signed32(),
- kMachInt32};
+ FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
+ TypeCache::Get().kInt32, kMachInt32};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 762ab64d52..125cd5f79f 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -22,6 +22,9 @@ class AccessBuilder final : public AllStatic {
// Provides access to HeapObject::map() field.
static FieldAccess ForMap();
+ // Provides access to HeapNumber::value() field.
+ static FieldAccess ForHeapNumberValue();
+
// Provides access to JSObject::properties() field.
static FieldAccess ForJSObjectProperties();
@@ -41,7 +44,7 @@ class AccessBuilder final : public AllStatic {
static FieldAccess ForJSDateField(JSDate::FieldIndex index);
// Provides access to FixedArray::length() field.
- static FieldAccess ForFixedArrayLength(Zone* zone);
+ static FieldAccess ForFixedArrayLength();
// Provides access to DescriptorArray::enum_cache() field.
static FieldAccess ForDescriptorArrayEnumCache();
@@ -58,17 +61,34 @@ class AccessBuilder final : public AllStatic {
// Provides access to Map::instance_type() field.
static FieldAccess ForMapInstanceType();
+ // Provides access to Map::prototype() field.
+ static FieldAccess ForMapPrototype();
+
// Provides access to String::length() field.
- static FieldAccess ForStringLength(Zone* zone);
+ static FieldAccess ForStringLength();
+
+ // Provides access to JSGlobalObject::global_proxy() field.
+ static FieldAccess ForJSGlobalObjectGlobalProxy();
+
+ // Provides access to JSGlobalObject::native_context() field.
+ static FieldAccess ForJSGlobalObjectNativeContext();
// Provides access to JSValue::value() field.
static FieldAccess ForValue();
- // Provides access Context slots.
+ // Provides access to arguments object fields.
+ static FieldAccess ForArgumentsLength();
+ static FieldAccess ForArgumentsCallee();
+
+ // Provides access to FixedArray slots.
+ static FieldAccess ForFixedArraySlot(size_t index);
+
+ // Provides access to Context slots.
static FieldAccess ForContextSlot(size_t index);
// Provides access to PropertyCell::value() field.
static FieldAccess ForPropertyCellValue();
+ static FieldAccess ForPropertyCellValue(Type* type);
// Provides access to SharedFunctionInfo::feedback_vector() field.
static FieldAccess ForSharedFunctionInfoTypeFeedbackVector();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
new file mode 100644
index 0000000000..218e21af0c
--- /dev/null
+++ b/deps/v8/src/compiler/access-info.cc
@@ -0,0 +1,413 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <ostream>
+
+#include "src/accessors.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-info.h"
+#include "src/field-index-inl.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-cache.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+bool CanInlineElementAccess(Handle<Map> map) {
+ // TODO(bmeurer): IsJSObjectMap
+ // TODO(bmeurer): !map->has_dictionary_elements()
+ // TODO(bmeurer): !map->has_sloppy_arguments_elements()
+ return map->IsJSArrayMap() && map->has_fast_elements() &&
+ !map->has_indexed_interceptor() && !map->is_access_check_needed();
+}
+
+
+bool CanInlinePropertyAccess(Handle<Map> map) {
+ // We can inline property access to prototypes of all primitives, except
+ // the special Oddball ones that have no wrapper counterparts (i.e. Null,
+ // Undefined and TheHole).
+ STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_TYPE);
+ if (map->IsBooleanMap()) return true;
+ if (map->instance_type() < LAST_PRIMITIVE_TYPE) return true;
+ return map->IsJSObjectMap() && !map->is_dictionary_map() &&
+ !map->has_named_interceptor() &&
+ // TODO(verwaest): Whitelist contexts to which we have access.
+ !map->is_access_check_needed();
+}
+
+} // namespace
+
+
+std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
+ switch (access_mode) {
+ case AccessMode::kLoad:
+ return os << "Load";
+ case AccessMode::kStore:
+ return os << "Store";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+// static
+PropertyAccessInfo PropertyAccessInfo::NotFound(Type* receiver_type,
+ MaybeHandle<JSObject> holder) {
+ return PropertyAccessInfo(holder, receiver_type);
+}
+
+
+// static
+PropertyAccessInfo PropertyAccessInfo::DataConstant(
+ Type* receiver_type, Handle<Object> constant,
+ MaybeHandle<JSObject> holder) {
+ return PropertyAccessInfo(holder, constant, receiver_type);
+}
+
+
+// static
+PropertyAccessInfo PropertyAccessInfo::DataField(
+ Type* receiver_type, FieldIndex field_index, Type* field_type,
+ MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
+ return PropertyAccessInfo(holder, transition_map, field_index, field_type,
+ receiver_type);
+}
+
+
+ElementAccessInfo::ElementAccessInfo() : receiver_type_(Type::None()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo()
+ : kind_(kInvalid), receiver_type_(Type::None()), field_type_(Type::Any()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ Type* receiver_type)
+ : kind_(kNotFound),
+ receiver_type_(receiver_type),
+ holder_(holder),
+ field_type_(Type::Any()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ Handle<Object> constant,
+ Type* receiver_type)
+ : kind_(kDataConstant),
+ receiver_type_(receiver_type),
+ constant_(constant),
+ holder_(holder),
+ field_type_(Type::Any()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map,
+ FieldIndex field_index, Type* field_type,
+ Type* receiver_type)
+ : kind_(kDataField),
+ receiver_type_(receiver_type),
+ transition_map_(transition_map),
+ holder_(holder),
+ field_index_(field_index),
+ field_type_(field_type) {}
+
+
+AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
+ Handle<Context> native_context, Zone* zone)
+ : dependencies_(dependencies),
+ native_context_(native_context),
+ isolate_(native_context->GetIsolate()),
+ type_cache_(TypeCache::Get()),
+ zone_(zone) {}
+
+
+bool AccessInfoFactory::ComputeElementAccessInfo(
+ Handle<Map> map, AccessMode access_mode, ElementAccessInfo* access_info) {
+ // Check if it is safe to inline element access for the {map}.
+ if (!CanInlineElementAccess(map)) return false;
+
+ // TODO(bmeurer): Add support for holey elements.
+ ElementsKind elements_kind = map->elements_kind();
+ if (IsHoleyElementsKind(elements_kind)) return false;
+
+ // Certain (monomorphic) stores need a prototype chain check because shape
+ // changes could allow callbacks on elements in the chain that are not
+ // compatible with monomorphic keyed stores.
+ MaybeHandle<JSObject> holder;
+ if (access_mode == AccessMode::kStore && map->prototype()->IsJSObject()) {
+ for (PrototypeIterator i(map); !i.IsAtEnd(); i.Advance()) {
+ Handle<JSReceiver> prototype =
+ PrototypeIterator::GetCurrent<JSReceiver>(i);
+ if (!prototype->IsJSObject()) return false;
+ holder = Handle<JSObject>::cast(prototype);
+ }
+ }
+
+ *access_info =
+ ElementAccessInfo(Type::Class(map, zone()), elements_kind, holder);
+ return true;
+}
+
+
+bool AccessInfoFactory::ComputeElementAccessInfos(
+ MapHandleList const& maps, AccessMode access_mode,
+ ZoneVector<ElementAccessInfo>* access_infos) {
+ for (Handle<Map> map : maps) {
+ if (Map::TryUpdate(map).ToHandle(&map)) {
+ ElementAccessInfo access_info;
+ if (!ComputeElementAccessInfo(map, access_mode, &access_info)) {
+ return false;
+ }
+ access_infos->push_back(access_info);
+ }
+ }
+ return true;
+}
+
+
+bool AccessInfoFactory::ComputePropertyAccessInfo(
+ Handle<Map> map, Handle<Name> name, AccessMode access_mode,
+ PropertyAccessInfo* access_info) {
+ // Check if it is safe to inline property access for the {map}.
+ if (!CanInlinePropertyAccess(map)) return false;
+
+ // Compute the receiver type.
+ Handle<Map> receiver_map = map;
+
+ // We support fast inline cases for certain JSObject getters.
+ if (access_mode == AccessMode::kLoad &&
+ LookupSpecialFieldAccessor(map, name, access_info)) {
+ return true;
+ }
+
+ MaybeHandle<JSObject> holder;
+ do {
+ // Lookup the named property on the {map}.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ int const number = descriptors->SearchWithCache(*name, *map);
+ if (number != DescriptorArray::kNotFound) {
+ PropertyDetails const details = descriptors->GetDetails(number);
+ if (access_mode == AccessMode::kStore) {
+ // Don't bother optimizing stores to read-only properties.
+ if (details.IsReadOnly()) {
+ return false;
+ }
+ // Check for store to data property on a prototype.
+ if (details.kind() == kData && !holder.is_null()) {
+ // Store to property not found on the receiver but on a prototype, we
+ // need to transition to a new data property.
+ // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
+ return LookupTransition(receiver_map, name, holder, access_info);
+ }
+ }
+ if (details.type() == DATA_CONSTANT) {
+ *access_info = PropertyAccessInfo::DataConstant(
+ Type::Class(receiver_map, zone()),
+ handle(descriptors->GetValue(number), isolate()), holder);
+ return true;
+ } else if (details.type() == DATA) {
+ int index = descriptors->GetFieldIndex(number);
+ Representation field_representation = details.representation();
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(
+ *map, index, field_representation.IsDouble());
+ Type* field_type = Type::Tagged();
+ if (field_representation.IsSmi()) {
+ field_type = type_cache_.kSmi;
+ } else if (field_representation.IsDouble()) {
+ field_type = type_cache_.kFloat64;
+ } else if (field_representation.IsHeapObject()) {
+ // Extract the field type from the property details (make sure its
+ // representation is TaggedPointer to reflect the heap object case).
+ field_type = Type::Intersect(
+ Type::Convert<HeapType>(
+ handle(descriptors->GetFieldType(number), isolate()), zone()),
+ Type::TaggedPointer(), zone());
+ if (field_type->Is(Type::None())) {
+ // Store is not safe if the field type was cleared.
+ if (access_mode == AccessMode::kStore) return false;
+
+ // The field type was cleared by the GC, so we don't know anything
+ // about the contents now.
+ // TODO(bmeurer): It would be awesome to make this saner in the
+ // runtime/GC interaction.
+ field_type = Type::TaggedPointer();
+ } else if (!Type::Any()->Is(field_type)) {
+ // Add proper code dependencies in case of stable field map(s).
+ Handle<Map> field_owner_map(map->FindFieldOwner(number), isolate());
+ dependencies()->AssumeFieldType(field_owner_map);
+ }
+ DCHECK(field_type->Is(Type::TaggedPointer()));
+ }
+ *access_info = PropertyAccessInfo::DataField(
+ Type::Class(receiver_map, zone()), field_index, field_type, holder);
+ return true;
+ } else {
+ // TODO(bmeurer): Add support for accessors.
+ return false;
+ }
+ }
+
+ // Don't search on the prototype chain for special indices in case of
+ // integer indexed exotic objects (see ES6 section 9.4.5).
+ if (map->IsJSTypedArrayMap() && name->IsString() &&
+ IsSpecialIndex(isolate()->unicode_cache(), String::cast(*name))) {
+ return false;
+ }
+
+ // Don't lookup private symbols on the prototype chain.
+ if (name->IsPrivate()) return false;
+
+ // Walk up the prototype chain.
+ if (!map->prototype()->IsJSObject()) {
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor;
+ if (Map::GetConstructorFunction(map, native_context())
+ .ToHandle(&constructor)) {
+ map = handle(constructor->initial_map(), isolate());
+ DCHECK(map->prototype()->IsJSObject());
+ } else if (map->prototype()->IsNull()) {
+ // Store to property not found on the receiver or any prototype, we need
+ // to transition to a new data property.
+ // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
+ if (access_mode == AccessMode::kStore) {
+ return LookupTransition(receiver_map, name, holder, access_info);
+ }
+ // The property was not found, return undefined or throw depending
+ // on the language mode of the load operation.
+ // Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
+ *access_info = PropertyAccessInfo::NotFound(
+ Type::Class(receiver_map, zone()), holder);
+ return true;
+ } else {
+ return false;
+ }
+ }
+ Handle<JSObject> map_prototype(JSObject::cast(map->prototype()), isolate());
+ if (map_prototype->map()->is_deprecated()) {
+ // Try to migrate the prototype object so we don't embed the deprecated
+ // map into the optimized code.
+ JSObject::TryMigrateInstance(map_prototype);
+ }
+ map = handle(map_prototype->map(), isolate());
+ holder = map_prototype;
+ } while (CanInlinePropertyAccess(map));
+ return false;
+}
+
+
+bool AccessInfoFactory::ComputePropertyAccessInfos(
+ MapHandleList const& maps, Handle<Name> name, AccessMode access_mode,
+ ZoneVector<PropertyAccessInfo>* access_infos) {
+ for (Handle<Map> map : maps) {
+ if (Map::TryUpdate(map).ToHandle(&map)) {
+ PropertyAccessInfo access_info;
+ if (!ComputePropertyAccessInfo(map, name, access_mode, &access_info)) {
+ return false;
+ }
+ access_infos->push_back(access_info);
+ }
+ }
+ return true;
+}
+
+
+bool AccessInfoFactory::LookupSpecialFieldAccessor(
+ Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) {
+ // Check for special JSObject field accessors.
+ int offset;
+ if (Accessors::IsJSObjectFieldAccessor(map, name, &offset)) {
+ FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
+ Type* field_type = Type::Tagged();
+ if (map->IsStringMap()) {
+ DCHECK(Name::Equals(factory()->length_string(), name));
+ // The String::length property is always a smi in the range
+ // [0, String::kMaxLength].
+ field_type = type_cache_.kStringLengthType;
+ } else if (map->IsJSArrayMap()) {
+ DCHECK(Name::Equals(factory()->length_string(), name));
+ // The JSArray::length property is a smi in the range
+ // [0, FixedDoubleArray::kMaxLength] in case of fast double
+ // elements, a smi in the range [0, FixedArray::kMaxLength]
+ // in case of other fast elements, and [0, kMaxUInt32] in
+ // case of other arrays.
+ if (IsFastDoubleElementsKind(map->elements_kind())) {
+ field_type = type_cache_.kFixedDoubleArrayLengthType;
+ } else if (IsFastElementsKind(map->elements_kind())) {
+ field_type = type_cache_.kFixedArrayLengthType;
+ } else {
+ field_type = type_cache_.kJSArrayLengthType;
+ }
+ }
+ *access_info = PropertyAccessInfo::DataField(Type::Class(map, zone()),
+ field_index, field_type);
+ return true;
+ }
+ return false;
+}
+
+
+bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
+ MaybeHandle<JSObject> holder,
+ PropertyAccessInfo* access_info) {
+ // Check if the {map} has a data transition with the given {name}.
+ if (map->unused_property_fields() == 0) return false;
+ Handle<Map> transition_map;
+ if (TransitionArray::SearchTransition(map, kData, name, NONE)
+ .ToHandle(&transition_map)) {
+ int const number = transition_map->LastAdded();
+ PropertyDetails const details =
+ transition_map->instance_descriptors()->GetDetails(number);
+ // Don't bother optimizing stores to read-only properties.
+ if (details.IsReadOnly()) return false;
+ // TODO(bmeurer): Handle transition to data constant?
+ if (details.type() != DATA) return false;
+ int const index = details.field_index();
+ Representation field_representation = details.representation();
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(
+ *transition_map, index, field_representation.IsDouble());
+ Type* field_type = Type::Tagged();
+ if (field_representation.IsSmi()) {
+ field_type = type_cache_.kSmi;
+ } else if (field_representation.IsDouble()) {
+ field_type = type_cache_.kFloat64;
+ } else if (field_representation.IsHeapObject()) {
+ // Extract the field type from the property details (make sure its
+ // representation is TaggedPointer to reflect the heap object case).
+ field_type = Type::Intersect(
+ Type::Convert<HeapType>(
+ handle(
+ transition_map->instance_descriptors()->GetFieldType(number),
+ isolate()),
+ zone()),
+ Type::TaggedPointer(), zone());
+ if (field_type->Is(Type::None())) {
+ // Store is not safe if the field type was cleared.
+ return false;
+ } else if (!Type::Any()->Is(field_type)) {
+ // Add proper code dependencies in case of stable field map(s).
+ Handle<Map> field_owner_map(transition_map->FindFieldOwner(number),
+ isolate());
+ dependencies()->AssumeFieldType(field_owner_map);
+ }
+ DCHECK(field_type->Is(Type::TaggedPointer()));
+ }
+ dependencies()->AssumeMapNotDeprecated(transition_map);
+ *access_info =
+ PropertyAccessInfo::DataField(Type::Class(map, zone()), field_index,
+ field_type, holder, transition_map);
+ return true;
+ }
+ return false;
+}
+
+
+Factory* AccessInfoFactory::factory() const { return isolate()->factory(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
new file mode 100644
index 0000000000..4f60552111
--- /dev/null
+++ b/deps/v8/src/compiler/access-info.h
@@ -0,0 +1,146 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ACCESS_INFO_H_
+#define V8_COMPILER_ACCESS_INFO_H_
+
+#include <iosfwd>
+
+#include "src/field-index.h"
+#include "src/objects.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+class TypeCache;
+
+
+namespace compiler {
+
+// Whether we are loading a property or storing to a property.
+enum class AccessMode { kLoad, kStore };
+
+std::ostream& operator<<(std::ostream&, AccessMode);
+
+
+// This class encapsulates all information required to access a certain element.
+class ElementAccessInfo final {
+ public:
+ ElementAccessInfo();
+ ElementAccessInfo(Type* receiver_type, ElementsKind elements_kind,
+ MaybeHandle<JSObject> holder)
+ : elements_kind_(elements_kind),
+ holder_(holder),
+ receiver_type_(receiver_type) {}
+
+ MaybeHandle<JSObject> holder() const { return holder_; }
+ ElementsKind elements_kind() const { return elements_kind_; }
+ Type* receiver_type() const { return receiver_type_; }
+
+ private:
+ ElementsKind elements_kind_;
+ MaybeHandle<JSObject> holder_;
+ Type* receiver_type_;
+};
+
+
+// This class encapsulates all information required to access a certain
+// object property, either on the object itself or on the prototype chain.
+class PropertyAccessInfo final {
+ public:
+ enum Kind { kInvalid, kNotFound, kDataConstant, kDataField };
+
+ static PropertyAccessInfo NotFound(Type* receiver_type,
+ MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo DataConstant(Type* receiver_type,
+ Handle<Object> constant,
+ MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo DataField(
+ Type* receiver_type, FieldIndex field_index, Type* field_type,
+ MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
+ MaybeHandle<Map> transition_map = MaybeHandle<Map>());
+
+ PropertyAccessInfo();
+
+ bool IsNotFound() const { return kind() == kNotFound; }
+ bool IsDataConstant() const { return kind() == kDataConstant; }
+ bool IsDataField() const { return kind() == kDataField; }
+
+ bool HasTransitionMap() const { return !transition_map().is_null(); }
+
+ Kind kind() const { return kind_; }
+ MaybeHandle<JSObject> holder() const { return holder_; }
+ MaybeHandle<Map> transition_map() const { return transition_map_; }
+ Handle<Object> constant() const { return constant_; }
+ FieldIndex field_index() const { return field_index_; }
+ Type* field_type() const { return field_type_; }
+ Type* receiver_type() const { return receiver_type_; }
+
+ private:
+ PropertyAccessInfo(MaybeHandle<JSObject> holder, Type* receiver_type);
+ PropertyAccessInfo(MaybeHandle<JSObject> holder, Handle<Object> constant,
+ Type* receiver_type);
+ PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map, FieldIndex field_index,
+ Type* field_type, Type* receiver_type);
+
+ Kind kind_;
+ Type* receiver_type_;
+ Handle<Object> constant_;
+ MaybeHandle<Map> transition_map_;
+ MaybeHandle<JSObject> holder_;
+ FieldIndex field_index_;
+ Type* field_type_;
+};
+
+
+// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
+class AccessInfoFactory final {
+ public:
+ AccessInfoFactory(CompilationDependencies* dependencies,
+ Handle<Context> native_context, Zone* zone);
+
+ bool ComputeElementAccessInfo(Handle<Map> map, AccessMode access_mode,
+ ElementAccessInfo* access_info);
+ bool ComputeElementAccessInfos(MapHandleList const& maps,
+ AccessMode access_mode,
+ ZoneVector<ElementAccessInfo>* access_infos);
+ bool ComputePropertyAccessInfo(Handle<Map> map, Handle<Name> name,
+ AccessMode access_mode,
+ PropertyAccessInfo* access_info);
+ bool ComputePropertyAccessInfos(MapHandleList const& maps, Handle<Name> name,
+ AccessMode access_mode,
+ ZoneVector<PropertyAccessInfo>* access_infos);
+
+ private:
+ bool LookupSpecialFieldAccessor(Handle<Map> map, Handle<Name> name,
+ PropertyAccessInfo* access_info);
+ bool LookupTransition(Handle<Map> map, Handle<Name> name,
+ MaybeHandle<JSObject> holder,
+ PropertyAccessInfo* access_info);
+
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Factory* factory() const;
+ Isolate* isolate() const { return isolate_; }
+ Handle<Context> native_context() const { return native_context_; }
+ Zone* zone() const { return zone_; }
+
+ CompilationDependencies* const dependencies_;
+ Handle<Context> const native_context_;
+ Isolate* const isolate_;
+ TypeCache const& type_cache_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(AccessInfoFactory);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ACCESS_INFO_H_
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 796d132a34..44d70dcd12 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -198,6 +198,48 @@ class OutOfLineLoadInteger final : public OutOfLineCode {
};
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ add(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
Condition FlagsConditionToCondition(FlagsCondition condition) {
switch (condition) {
case kEqual:
@@ -384,6 +426,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -438,6 +485,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ str(value, MemOperand(object, index));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kArmAdd:
__ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
i.OutputSBit());
@@ -843,19 +907,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmStoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ add(index, object, index);
- __ str(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
- __ RecordWrite(object, index, value, lr_status, mode);
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
break;
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index c210c171e4..6852b69d43 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -93,8 +93,7 @@ namespace compiler {
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
- V(ArmPoke) \
- V(ArmStoreWriteBarrier)
+ V(ArmPoke)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index f58a29de8a..19c81262fd 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -61,7 +61,6 @@ class ArmOperandGenerator : public OperandGenerator {
case kArmStrb:
case kArmLdr:
case kArmStr:
- case kArmStoreWriteBarrier:
return value >= -4095 && value <= 4095;
case kArmLdrh:
@@ -351,49 +350,70 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
- Emit(kArmStoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r4),
- g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
-
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kArmVstrF32;
- break;
- case kRepFloat64:
- opcode = kArmVstrF64;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kArmStrb;
- break;
- case kRepWord16:
- opcode = kArmStrh;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kArmStr;
- break;
- default:
- UNREACHABLE();
- return;
- }
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(kRepTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kArmVstrF32;
+ break;
+ case kRepFloat64:
+ opcode = kArmVstrF64;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kArmStrb;
+ break;
+ case kRepWord16:
+ opcode = kArmStrh;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kArmStr;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ }
}
}
@@ -697,6 +717,12 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1104,23 +1130,10 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
+ const CallDescriptor* descriptor,
+ Node* node) {
ArmOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, true);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -1129,8 +1142,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
0, nullptr, 0, nullptr);
// Poke any stack arguments.
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ if (Node* input = (*arguments)[n]) {
int slot = static_cast<int>(n);
Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
g.UseRegister(input));
@@ -1138,131 +1151,16 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ for (Node* input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
if (input == nullptr) continue;
Emit(kArmPush, g.NoOutput(), g.UseRegister(input));
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- ArmOperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- Emit(kArmPush, g.NoOutput(), g.UseRegister(input));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
namespace {
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 257dd6c134..0915b7321d 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -41,8 +41,26 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return ToRegister(instr_->InputAt(index)).W();
}
+ Register InputOrZeroRegister32(size_t index) {
+ DCHECK(instr_->InputAt(index)->IsRegister() ||
+ (instr_->InputAt(index)->IsImmediate() && (InputInt32(index) == 0)));
+ if (instr_->InputAt(index)->IsImmediate()) {
+ return wzr;
+ }
+ return InputRegister32(index);
+ }
+
Register InputRegister64(size_t index) { return InputRegister(index); }
+ Register InputOrZeroRegister64(size_t index) {
+ DCHECK(instr_->InputAt(index)->IsRegister() ||
+ (instr_->InputAt(index)->IsImmediate() && (InputInt64(index) == 0)));
+ if (instr_->InputAt(index)->IsImmediate()) {
+ return xzr;
+ }
+ return InputRegister64(index);
+ }
+
Operand InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
@@ -237,6 +255,48 @@ class OutOfLineLoadZero final : public OutOfLineCode {
};
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlagClear(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Add(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
Condition FlagsConditionToCondition(FlagsCondition condition) {
switch (condition) {
case kEqual:
@@ -459,6 +519,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(x10);
break;
}
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
case kArchPrepareCallCFunction:
// We don't need kArchPrepareCallCFunction on arm64 as the instruction
// selector already perform a Claim to reserve space on the stack and
@@ -506,6 +571,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Str(value, MemOperand(object, index));
+ __ CheckPageFlagSet(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ ool->entry());
+ __ Bind(ool->exit());
+ break;
+ }
case kArm64Float64RoundDown:
__ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -519,28 +601,33 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Add:
- __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Add32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
- __ Adds(i.OutputRegister32(), i.InputRegister32(0),
+ __ Adds(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
} else {
- __ Add(i.OutputRegister32(), i.InputRegister32(0),
+ __ Add(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
}
break;
case kArm64And:
- __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64And32:
- __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Bic:
- __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Bic32:
- __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Bic(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -624,45 +711,48 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Not32:
__ Mvn(i.OutputRegister32(), i.InputOperand32(0));
break;
- case kArm64Neg:
- __ Neg(i.OutputRegister(), i.InputOperand(0));
- break;
- case kArm64Neg32:
- __ Neg(i.OutputRegister32(), i.InputOperand32(0));
- break;
case kArm64Or:
- __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Orr(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Or32:
- __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Orr(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Orn:
- __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Orn(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Orn32:
- __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Orn(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Eor:
- __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Eor(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Eor32:
- __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Eor(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Eon:
- __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Eon(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Eon32:
- __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Eon(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Sub:
- __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Sub32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
- __ Subs(i.OutputRegister32(), i.InputRegister32(0),
+ __ Subs(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
} else {
- __ Sub(i.OutputRegister32(), i.InputRegister32(0),
+ __ Sub(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
}
break;
@@ -743,20 +833,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
break;
}
+ case kArm64Clz:
+ __ Clz(i.OutputRegister64(), i.InputRegister64(0));
+ break;
case kArm64Clz32:
__ Clz(i.OutputRegister32(), i.InputRegister32(0));
break;
case kArm64Cmp:
- __ Cmp(i.InputRegister(0), i.InputOperand(1));
+ __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
case kArm64Cmp32:
- __ Cmp(i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Cmn:
- __ Cmn(i.InputRegister(0), i.InputOperand(1));
+ __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
case kArm64Cmn32:
- __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
+ __ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Tst:
__ Tst(i.InputRegister(0), i.InputOperand(1));
@@ -791,12 +884,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputFloat32Register(1));
break;
case kArm64Float32Max:
- __ Fmax(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ // (b < a) ? a : b
+ __ Fcmp(i.InputFloat32Register(1), i.InputFloat32Register(0));
+ __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1), lo);
break;
case kArm64Float32Min:
- __ Fmin(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ // (a < b) ? a : b
+ __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
+ __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1), lo);
break;
case kArm64Float32Abs:
__ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
@@ -842,12 +939,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArm64Float64Max:
- __ Fmax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ // (b < a) ? a : b
+ __ Fcmp(i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), lo);
break;
case kArm64Float64Min:
- __ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ // (a < b) ? a : b
+ __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), lo);
break;
case kArm64Float64Abs:
__ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
@@ -873,6 +974,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Int32ToFloat64:
__ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
+ case kArm64Int64ToFloat32:
+ __ Scvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
+ break;
+ case kArm64Int64ToFloat64:
+ __ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
+ break;
case kArm64Uint32ToFloat64:
__ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
@@ -950,29 +1057,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64StrD:
__ Str(i.InputDoubleRegister(2), i.MemoryOperand());
break;
- case kArm64StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ Add(index, object, index);
- __ Str(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- // TODO(dcarney): we shouldn't test write barriers from c calls.
- LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
- UseScratchRegisterScope scope(masm());
- Register temp = no_reg;
- if (csp.is(masm()->StackPointer())) {
- temp = scope.AcquireX();
- lr_status = kLRHasBeenSaved;
- __ Push(lr, temp); // Need to push a pair
- }
- __ RecordWrite(object, index, value, lr_status, mode);
- if (csp.is(masm()->StackPointer())) {
- __ Pop(temp, lr);
- }
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
break;
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index c2a52af7cb..cd8b4c56b8 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -18,6 +18,7 @@ namespace compiler {
V(Arm64And32) \
V(Arm64Bic) \
V(Arm64Bic32) \
+ V(Arm64Clz) \
V(Arm64Clz32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
@@ -55,8 +56,6 @@ namespace compiler {
V(Arm64Umod32) \
V(Arm64Not) \
V(Arm64Not32) \
- V(Arm64Neg) \
- V(Arm64Neg32) \
V(Arm64Lsl) \
V(Arm64Lsl32) \
V(Arm64Lsr) \
@@ -109,6 +108,8 @@ namespace compiler {
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \
+ V(Arm64Int64ToFloat32) \
+ V(Arm64Int64ToFloat64) \
V(Arm64Uint32ToFloat64) \
V(Arm64Float64ExtractLowWord32) \
V(Arm64Float64ExtractHighWord32) \
@@ -129,8 +130,7 @@ namespace compiler {
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
- V(Arm64Str) \
- V(Arm64StoreWriteBarrier)
+ V(Arm64Str)
// Addressing modes represent the "shape" of inputs to an instruction.
@@ -158,8 +158,8 @@ namespace compiler {
V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */
-} // namespace internal
} // namespace compiler
+} // namespace internal
} // namespace v8
#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 7a5b84275a..6abad0aa92 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -37,6 +37,15 @@ class Arm64OperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
// Use the provided node if it has the required value, or create a
// TempImmediate otherwise.
InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
@@ -214,14 +223,14 @@ void VisitBinop(InstructionSelector* selector, Node* node,
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
- bool is_cmp = opcode == kArm64Cmp32;
+ bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
// We can commute cmp by switching the inputs and commuting the flags
// continuation.
bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
- // The cmp instruction is encoded as sub with zero output register, and
- // therefore supports the same operand modes.
+ // The cmp and cmn instructions are encoded as sub or add with zero output
+ // register, and therefore support the same operand modes.
bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
m.IsInt64Sub() || is_cmp;
@@ -247,18 +256,18 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} else if (TryMatchAnyShift(selector, node, right_node, &opcode,
!is_add_sub)) {
Matcher m_shift(right_node);
- inputs[input_count++] = g.UseRegister(left_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
if (is_cmp) cont->Commute();
Matcher m_shift(left_node);
- inputs[input_count++] = g.UseRegister(right_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else {
- inputs[input_count++] = g.UseRegister(left_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
}
@@ -384,58 +393,80 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
- Emit(kArm64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, x10),
- g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
- temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- ImmediateMode immediate_mode = kNoImmediate;
- switch (rep) {
- case kRepFloat32:
- opcode = kArm64StrS;
- immediate_mode = kLoadStoreImm32;
- break;
- case kRepFloat64:
- opcode = kArm64StrD;
- immediate_mode = kLoadStoreImm64;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kArm64Strb;
- immediate_mode = kLoadStoreImm8;
- break;
- case kRepWord16:
- opcode = kArm64Strh;
- immediate_mode = kLoadStoreImm16;
- break;
- case kRepWord32:
- opcode = kArm64StrW;
- immediate_mode = kLoadStoreImm32;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kArm64Str;
- immediate_mode = kLoadStoreImm64;
- break;
- default:
- UNREACHABLE();
- return;
- }
- if (g.CanBeImmediate(index, immediate_mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+
+ // TODO(arm64): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(kRepTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ ArchOpcode opcode;
+ ImmediateMode immediate_mode = kNoImmediate;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kArm64StrS;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case kRepFloat64:
+ opcode = kArm64StrD;
+ immediate_mode = kLoadStoreImm64;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kArm64Strb;
+ immediate_mode = kLoadStoreImm8;
+ break;
+ case kRepWord16:
+ opcode = kArm64Strh;
+ immediate_mode = kLoadStoreImm16;
+ break;
+ case kRepWord32:
+ opcode = kArm64StrW;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kArm64Str;
+ immediate_mode = kLoadStoreImm64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, immediate_mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ }
}
}
@@ -912,12 +943,30 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitWord32Clz(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -997,12 +1046,7 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
}
- if (m.left().Is(0)) {
- Emit(kArm64Neg32, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- } else {
- VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
- }
+ VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
}
@@ -1023,11 +1067,7 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
}
}
- if (m.left().Is(0)) {
- Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
- } else {
- VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
- }
+ VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
}
@@ -1276,6 +1316,16 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kArm64Int64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kArm64Int64ToFloat64, node);
+}
+
+
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kArm64Float64ExtractLowWord32, node);
}
@@ -1363,16 +1413,24 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitRRR(this, kArm64Float32Max, node);
+}
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ VisitRRR(this, kArm64Float64Max, node);
+}
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitRRR(this, kArm64Float32Min, node);
+}
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ VisitRRR(this, kArm64Float64Min, node);
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -1410,26 +1468,13 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
+ const CallDescriptor* descriptor,
+ Node* node) {
Arm64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, true);
// Push the arguments to the stack.
- int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
+ int aligned_push_count = static_cast<int>(arguments->size());
bool pushed_count_uneven = aligned_push_count & 1;
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
@@ -1444,163 +1489,21 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
int slot = aligned_push_count - 1;
// Emit the uneven pushes.
if (pushed_count_uneven) {
- Node* input = buffer.pushed_nodes[slot];
+ Node* input = (*arguments)[slot];
Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot));
slot--;
}
// Now all pushes can be done in pairs.
for (; slot >= 0; slot -= 2) {
- Emit(kArm64PokePair, g.NoOutput(),
- g.UseRegister(buffer.pushed_nodes[slot]),
- g.UseRegister(buffer.pushed_nodes[slot - 1]),
- g.TempImmediate(slot));
- }
- }
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler != nullptr) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
+ Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
+ g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
}
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
}
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- Arm64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Push the arguments to the stack.
- int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
- bool pushed_count_uneven = aligned_push_count & 1;
- // TODO(dcarney): claim and poke probably take small immediates,
- // loop here or whatever.
- // Bump the stack pointer(s).
- if (aligned_push_count > 0) {
- // TODO(dcarney): it would be better to bump the csp here only
- // and emit paired stores with increment for non c frames.
- Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count));
- }
- // Move arguments to the stack.
- {
- int slot = aligned_push_count - 1;
- // Emit the uneven pushes.
- if (pushed_count_uneven) {
- Node* input = buffer.pushed_nodes[slot];
- Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot));
- slot--;
- }
- // Now all pushes can be done in pairs.
- for (; slot >= 0; slot -= 2) {
- Emit(kArm64PokePair, g.NoOutput(),
- g.UseRegister(buffer.pushed_nodes[slot]),
- g.UseRegister(buffer.pushed_nodes[slot - 1]),
- g.TempImmediate(slot));
- }
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
namespace {
@@ -1646,8 +1549,29 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitBinop<Int32BinopMatcher>(selector, node, kArm64Cmp32, kArithmeticImm,
- cont);
+ Int32BinopMatcher m(node);
+ ArchOpcode opcode = kArm64Cmp32;
+
+ // Select negated compare for comparisons with negated right input.
+ if (m.right().IsInt32Sub()) {
+ Node* sub = m.right().node();
+ Int32BinopMatcher msub(sub);
+ if (msub.left().Is(0)) {
+ bool can_cover = selector->CanCover(node, sub);
+ node->ReplaceInput(1, msub.right().node());
+ // Even if the comparison node covers the subtraction, after the input
+ // replacement above, the node still won't cover the input to the
+ // subtraction; the subtraction still uses it.
+ // In order to get shifted operations to work, we must remove the rhs
+ // input to the subtraction, as TryMatchAnyShift requires this node to
+ // cover the input shift. We do this by setting it to the lhs input,
+ // as we know it's zero, and the result of the subtraction isn't used by
+ // any other node.
+ if (can_cover) sub->ReplaceInput(1, msub.left().node());
+ opcode = kArm64Cmn32;
+ }
+ }
+ VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
}
@@ -2107,7 +2031,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64RoundDown |
+ return MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32ShiftIsSafe |
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index f8f010d816..a58587dfba 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -7,7 +7,6 @@
#include "src/compiler.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
-#include "src/compiler/js-type-feedback.h"
#include "src/compiler/linkage.h"
#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/machine-operator.h"
@@ -15,7 +14,6 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/state-values-utils.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -212,7 +210,7 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
public:
explicit DeferredCommands(AstGraphBuilder* owner)
- : owner_(owner), deferred_(owner->zone()) {}
+ : owner_(owner), deferred_(owner->local_zone()) {}
// One recorded control-flow command.
struct Entry {
@@ -279,7 +277,7 @@ class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
: ControlScope(owner), target_(target), control_(control) {}
protected:
- virtual bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node* value) override {
if (target != target_) return false; // We are not the command target.
switch (cmd) {
case CMD_BREAK:
@@ -307,7 +305,7 @@ class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
: ControlScope(owner), target_(target), control_(control) {}
protected:
- virtual bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node* value) override {
if (target != target_) return false; // We are not the command target.
switch (cmd) {
case CMD_BREAK:
@@ -343,7 +341,7 @@ class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
}
protected:
- virtual bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node* value) override {
switch (cmd) {
case CMD_THROW:
control_->Throw(value);
@@ -374,7 +372,7 @@ class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
}
protected:
- virtual bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node* value) override {
Node* token = commands_->RecordCommand(cmd, target, value);
control_->LeaveTry(token, value);
return true;
@@ -396,8 +394,9 @@ class AstGraphBuilder::FrameStateBeforeAndAfter {
: builder_->environment()->Checkpoint(id_before);
}
- void AddToNode(Node* node, BailoutId id_after,
- OutputFrameStateCombine combine) {
+ void AddToNode(
+ Node* node, BailoutId id_after,
+ OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore()) {
int count = OperatorProperties::GetFrameStateInputCount(node->op());
DCHECK_LE(count, 2);
@@ -429,9 +428,9 @@ class AstGraphBuilder::FrameStateBeforeAndAfter {
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
- JSTypeFeedbackTable* js_type_feedback)
- : local_zone_(local_zone),
+ JSGraph* jsgraph, LoopAssignmentAnalysis* loop)
+ : isolate_(info->isolate()),
+ local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph),
environment_(nullptr),
@@ -451,9 +450,8 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
info->scope()->num_stack_slots(), info->shared_info(),
- CALL_MAINTAINS_NATIVE_CONTEXT)),
- js_type_feedback_(js_type_feedback) {
- InitializeAstVisitor(info->isolate(), local_zone);
+ CALL_MAINTAINS_NATIVE_CONTEXT)) {
+ InitializeAstVisitor(info->isolate());
}
@@ -485,9 +483,9 @@ Node* AstGraphBuilder::GetFunctionClosure() {
Node* AstGraphBuilder::GetFunctionContext() {
if (!function_context_.is_set()) {
- // Parameter (arity + 1) is special for the outer context of the function
+ // Parameter (arity + 2) is special for the outer context of the function
const Operator* op = common()->Parameter(
- info()->num_parameters_including_this(), "%context");
+ info()->num_parameters_including_this() + 1, "%context");
Node* node = NewNode(op, graph()->start());
function_context_.set(node);
}
@@ -500,8 +498,9 @@ bool AstGraphBuilder::CreateGraph(bool stack_check) {
DCHECK(graph() != NULL);
// Set up the basic structure of the graph. Outputs for {Start} are the formal
- // parameters (including the receiver) plus context and closure.
- int actual_parameter_count = info()->num_parameters_including_this() + 2;
+ // parameters (including the receiver) plus number of arguments, context and
+ // closure.
+ int actual_parameter_count = info()->num_parameters_including_this() + 3;
graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
// Initialize the top-level environment.
@@ -527,18 +526,10 @@ bool AstGraphBuilder::CreateGraph(bool stack_check) {
env.RawParameterBind(0, jsgraph()->TheHoleConstant());
}
- // Build receiver check for sloppy mode if necessary.
- // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
- if (scope->has_this_declaration()) {
- Node* original_receiver = env.RawParameterLookup(0);
- Node* patched_receiver = BuildPatchReceiverToGlobalProxy(original_receiver);
- env.RawParameterBind(0, patched_receiver);
- }
-
- // Build function context only if there are context allocated variables.
+ // Build local context only if there are context allocated variables.
if (info()->num_heap_slots() > 0) {
- // Push a new inner context scope for the function.
- Node* inner_context = BuildLocalFunctionContext(GetFunctionContext());
+ // Push a new inner context scope for the current activation.
+ Node* inner_context = BuildLocalActivationContext(GetFunctionContext());
ContextScope top_context(this, scope, inner_context);
CreateGraphBody(stack_check);
} else {
@@ -1359,7 +1350,7 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
// Bind value and do loop body.
VectorSlotPair feedback =
CreateVectorSlotPair(stmt->EachFeedbackSlot());
- VisitForInAssignment(stmt->each(), value, feedback,
+ VisitForInAssignment(stmt->each(), value, feedback, stmt->FilterId(),
stmt->AssignmentId());
VisitIterationBody(stmt, &for_loop);
}
@@ -1395,8 +1386,6 @@ void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
TryCatchBuilder try_control(this);
- ExternalReference message_object =
- ExternalReference::address_of_pending_message_obj(isolate());
// Evaluate the try-block inside a control scope. This simulates a handler
// that is intercepting 'throw' control commands.
@@ -1410,14 +1399,17 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
}
try_control.EndTry();
- // TODO(mstarzinger): We are only using a runtime call to get a lazy bailout
- // point, there is no need to really emit an actual call. Optimize this!
- Node* guard = NewNode(javascript()->CallRuntime(Runtime::kMaxSmi, 0));
- PrepareFrameState(guard, stmt->HandlerId());
+ // Insert lazy bailout point.
+ // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
+ // point. Ideally, we whould not re-enter optimized code when deoptimized
+ // lazily. Tracked by issue v8:4195.
+ NewNode(common()->LazyBailout(),
+ jsgraph()->ZeroConstant(), // dummy target.
+ environment()->Checkpoint(stmt->HandlerId())); // frame state.
// Clear message object as we enter the catch block.
Node* the_hole = jsgraph()->TheHoleConstant();
- BuildStoreExternal(message_object, kMachAnyTagged, the_hole);
+ NewNode(javascript()->StoreMessage(), the_hole);
// Create a catch scope that binds the exception.
Node* exception = try_control.GetExceptionNode();
@@ -1433,8 +1425,6 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
TryFinallyBuilder try_control(this);
- ExternalReference message_object =
- ExternalReference::address_of_pending_message_obj(isolate());
// We keep a record of all paths that enter the finally-block to be able to
// dispatch to the correct continuation point after the statements in the
@@ -1447,7 +1437,7 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// 3. By exiting the try-block with a thrown exception.
Node* fallthrough_result = jsgraph()->TheHoleConstant();
ControlScope::DeferredCommands* commands =
- new (zone()) ControlScope::DeferredCommands(this);
+ new (local_zone()) ControlScope::DeferredCommands(this);
// Evaluate the try-block inside a control scope. This simulates a handler
// that is intercepting all control commands.
@@ -1461,10 +1451,13 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
}
try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
- // TODO(mstarzinger): We are only using a runtime call to get a lazy bailout
- // point, there is no need to really emit an actual call. Optimize this!
- Node* guard = NewNode(javascript()->CallRuntime(Runtime::kMaxSmi, 0));
- PrepareFrameState(guard, stmt->HandlerId());
+ // Insert lazy bailout point.
+ // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
+ // point. Ideally, we whould not re-enter optimized code when deoptimized
+ // lazily. Tracked by issue v8:4195.
+ NewNode(common()->LazyBailout(),
+ jsgraph()->ZeroConstant(), // dummy target.
+ environment()->Checkpoint(stmt->HandlerId())); // frame state.
// The result value semantics depend on how the block was entered:
// - ReturnStatement: It represents the return value being returned.
@@ -1476,14 +1469,14 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// The result value, dispatch token and message is expected on the operand
// stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
- Node* message = BuildLoadExternal(message_object, kMachAnyTagged);
+ Node* message = NewNode(javascript()->LoadMessage());
environment()->Push(token); // TODO(mstarzinger): Cook token!
environment()->Push(result);
environment()->Push(message);
// Clear message object as we enter the finally block.
Node* the_hole = jsgraph()->TheHoleConstant();
- BuildStoreExternal(message_object, kMachAnyTagged, the_hole);
+ NewNode(javascript()->StoreMessage(), the_hole);
// Evaluate the finally-block.
Visit(stmt->finally_block());
@@ -1494,7 +1487,7 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
message = environment()->Pop();
result = environment()->Pop();
token = environment()->Pop(); // TODO(mstarzinger): Uncook token!
- BuildStoreExternal(message_object, kMachAnyTagged, message);
+ NewNode(javascript()->StoreMessage(), message);
// Dynamic dispatch after the finally-block.
commands->ApplyDeferredCommands(token, result);
@@ -1527,20 +1520,15 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
- if (expr->scope() == NULL) {
- // Visit class literal in the same scope, no declarations.
+ // Visit declarations and class literal in a block scope.
+ if (expr->scope()->ContextLocalCount() > 0) {
+ Node* context = BuildLocalBlockContext(expr->scope());
+ ContextScope scope(this, expr->scope(), context);
+ VisitDeclarations(expr->scope()->declarations());
VisitClassLiteralContents(expr);
} else {
- // Visit declarations and class literal in a block scope.
- if (expr->scope()->ContextLocalCount() > 0) {
- Node* context = BuildLocalBlockContext(expr->scope());
- ContextScope scope(this, expr->scope(), context);
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- } else {
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- }
+ VisitDeclarations(expr->scope()->declarations());
+ VisitClassLiteralContents(expr);
}
}
@@ -1638,13 +1626,12 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
literal = NewNode(op, literal, proto);
// Assign to class variable.
- if (expr->scope() != NULL) {
- DCHECK_NOT_NULL(expr->class_variable_proxy());
+ if (expr->class_variable_proxy() != nullptr) {
Variable* var = expr->class_variable_proxy()->var();
FrameStateBeforeAndAfter states(this, BailoutId::None());
VectorSlotPair feedback = CreateVectorSlotPair(
expr->NeedsProxySlot() ? expr->ProxySlot()
- : FeedbackVectorICSlot::Invalid());
+ : FeedbackVectorSlot::Invalid());
BuildVariableAssignment(var, literal, Token::INIT_CONST, feedback,
BailoutId::None(), states);
}
@@ -1657,6 +1644,13 @@ void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
}
+void AstGraphBuilder::VisitDoExpression(DoExpression* expr) {
+ VisitBlock(expr->block());
+ VisitVariableProxy(expr->result());
+ ast_context()->ReplaceValue();
+}
+
+
void AstGraphBuilder::VisitConditional(Conditional* expr) {
IfBuilder compare_if(this);
VisitForTest(expr->condition());
@@ -1723,7 +1717,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Create nodes to store computed values into the literal.
int property_index = 0;
- AccessorTable accessor_table(zone());
+ AccessorTable accessor_table(local_zone());
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1747,8 +1741,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Name> name = key->AsPropertyName();
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(0));
- Node* store = BuildNamedStore(literal, name, value, feedback,
- TypeFeedbackId::None());
+ Node* store = BuildNamedStore(literal, name, value, feedback);
states.AddToNode(store, key->id(),
OutputFrameStateCombine::Ignore());
BuildSetHomeObject(value, literal, property, 1);
@@ -1911,7 +1904,6 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
- expr->BuildConstantElements(isolate());
Node* literals_array =
BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* literal_index = jsgraph()->Constant(expr->literal_index());
@@ -1938,13 +1930,10 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForValue(subexpr);
{
FrameStateBeforeAndAfter states(this, subexpr->id());
+ VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(array_index);
- // TODO(turbofan): More efficient code could be generated here. Consider
- // that the store will be generic because we don't have a feedback vector
- // slot.
- Node* store = BuildKeyedStore(literal, index, value, VectorSlotPair(),
- TypeFeedbackId::None());
+ Node* store = BuildKeyedStore(literal, index, value, pair);
states.AddToNode(store, expr->GetIdForElement(array_index),
OutputFrameStateCombine::Ignore());
}
@@ -1963,21 +1952,23 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
if (subexpr->IsSpread()) {
VisitForValue(subexpr->AsSpread()->expression());
+ FrameStateBeforeAndAfter states(this,
+ subexpr->AsSpread()->expression()->id());
Node* iterable = environment()->Pop();
Node* function = BuildLoadNativeContextField(
Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX);
- result = NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS,
- language_mode()),
- function, array, iterable);
+ result = NewNode(javascript()->CallFunction(3, language_mode()), function,
+ array, iterable);
+ states.AddToNode(result, expr->GetIdForElement(array_index));
} else {
VisitForValue(subexpr);
Node* value = environment()->Pop();
const Operator* op =
javascript()->CallRuntime(Runtime::kAppendElement, 2);
result = NewNode(op, array, value);
+ PrepareFrameState(result, expr->GetIdForElement(array_index));
}
- PrepareFrameState(result, expr->GetIdForElement(array_index));
environment()->Push(result);
}
@@ -1987,7 +1978,8 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
const VectorSlotPair& feedback,
- BailoutId bailout_id) {
+ BailoutId bailout_id_before,
+ BailoutId bailout_id_after) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
// Left-hand side can only be a property, a global or a variable slot.
@@ -1998,9 +1990,11 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
- FrameStateBeforeAndAfter states(this, BailoutId::None());
- BuildVariableAssignment(var, value, Token::ASSIGN, feedback, bailout_id,
- states);
+ environment()->Push(value);
+ FrameStateBeforeAndAfter states(this, bailout_id_before);
+ value = environment()->Pop();
+ BuildVariableAssignment(var, value, Token::ASSIGN, feedback,
+ bailout_id_after, states);
break;
}
case NAMED_PROPERTY: {
@@ -2010,9 +2004,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
Node* object = environment()->Pop();
value = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback,
- TypeFeedbackId::None());
- states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ Node* store = BuildNamedStore(object, name, value, feedback);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case KEYED_PROPERTY: {
@@ -2023,9 +2017,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = environment()->Pop();
- Node* store =
- BuildKeyedStore(object, key, value, feedback, TypeFeedbackId::None());
- states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ Node* store = BuildKeyedStore(object, key, value, feedback);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2037,9 +2031,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
Node* receiver = environment()->Pop();
value = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value,
- TypeFeedbackId::None());
- states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case KEYED_SUPER_PROPERTY: {
@@ -2052,9 +2046,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
value = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value,
- TypeFeedbackId::None());
- states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
}
@@ -2197,8 +2191,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback,
- expr->AssignmentFeedbackId());
+ Node* store = BuildNamedStore(object, name, value, feedback);
store_states.AddToNode(store, expr->id(),
ast_context()->GetStateCombine());
break;
@@ -2206,8 +2199,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, feedback,
- expr->AssignmentFeedbackId());
+ Node* store = BuildKeyedStore(object, key, value, feedback);
store_states.AddToNode(store, expr->id(),
ast_context()->GetStateCombine());
break;
@@ -2216,8 +2208,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value,
- expr->AssignmentFeedbackId());
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
store_states.AddToNode(store, expr->id(),
ast_context()->GetStateCombine());
break;
@@ -2226,8 +2217,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* key = environment()->Pop();
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value,
- expr->AssignmentFeedbackId());
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
store_states.AddToNode(store, expr->id(),
ast_context()->GetStateCombine());
break;
@@ -2314,9 +2304,9 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
- CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
- Node* receiver_value = NULL;
- Node* callee_value = NULL;
+ ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
+ Node* receiver_value = nullptr;
+ Node* callee_value = nullptr;
bool possibly_eval = false;
switch (call_type) {
case Call::GLOBAL_CALL: {
@@ -2326,6 +2316,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
callee_value =
BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
pair, OutputFrameStateCombine::Push());
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
}
@@ -2338,65 +2329,88 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* pair = NewNode(op, current_context(), name);
callee_value = NewNode(common()->Projection(0), pair);
receiver_value = NewNode(common()->Projection(1), pair);
-
PrepareFrameState(pair, expr->LookupId(),
OutputFrameStateCombine::Push(2));
break;
}
- case Call::PROPERTY_CALL: {
+ case Call::NAMED_PROPERTY_CALL: {
Property* property = callee->AsProperty();
- VectorSlotPair pair =
+ VectorSlotPair feedback =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- if (!property->IsSuperAccess()) {
- VisitForValue(property->obj());
- Node* object = environment()->Top();
-
- if (property->key()->IsPropertyName()) {
- FrameStateBeforeAndAfter states(this, property->obj()->id());
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- callee_value = BuildNamedLoad(object, name, pair);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- } else {
- VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
- Node* key = environment()->Pop();
- callee_value = BuildKeyedLoad(object, key, pair);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- }
- receiver_value = environment()->Pop();
- // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
- // object for sloppy callees. This could also be modeled explicitly
- // here,
- // thereby obsoleting the need for a flag to the call operator.
- flags = CALL_AS_METHOD;
-
- } else {
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- Node* home_object = environment()->Pop();
- receiver_value = environment()->Pop();
- if (property->key()->IsPropertyName()) {
- FrameStateBeforeAndAfter states(this, property->obj()->id());
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- callee_value =
- BuildNamedSuperLoad(receiver_value, home_object, name, pair);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
-
- } else {
- VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
- Node* key = environment()->Pop();
- callee_value =
- BuildKeyedSuperLoad(receiver_value, home_object, key, pair);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- }
- }
-
+ VisitForValue(property->obj());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* object = environment()->Top();
+ callee_value = BuildNamedLoad(object, name, feedback);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Top();
+ callee_value = BuildKeyedLoad(object, key, feedback);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
+ }
+ case Call::NAMED_SUPER_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ SuperPropertyReference* super_ref =
+ property->obj()->AsSuperPropertyReference();
+ VisitForValue(super_ref->home_object());
+ VisitForValue(super_ref->this_var());
+ Node* home = environment()->Peek(1);
+ Node* object = environment()->Top();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ callee_value = BuildNamedSuperLoad(object, home, name, VectorSlotPair());
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. Since the receiver is not the target of
+ // the load, it could very well be null or undefined at this point.
+ receiver_value = environment()->Pop();
+ environment()->Drop(1);
+ break;
+ }
+ case Call::KEYED_SUPER_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ SuperPropertyReference* super_ref =
+ property->obj()->AsSuperPropertyReference();
+ VisitForValue(super_ref->home_object());
+ VisitForValue(super_ref->this_var());
+ environment()->Push(environment()->Top()); // Duplicate this_var.
+ environment()->Push(environment()->Peek(2)); // Duplicate home_obj.
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ Node* home = environment()->Pop();
+ Node* object = environment()->Pop();
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. Since the receiver is not the target of
+ // the load, it could very well be null or undefined at this point.
+ receiver_value = environment()->Pop();
+ environment()->Drop(1);
break;
}
case Call::SUPER_CALL:
@@ -2419,6 +2433,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
case Call::OTHER_CALL:
VisitForValue(callee);
callee_value = environment()->Pop();
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
}
@@ -2442,7 +2457,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* source = environment()->Peek(arg_count - 1);
// Create node to ask for help resolving potential eval call. This will
- // provide a fully resolved callee and the corresponding receiver.
+ // provide a fully resolved callee to patch into the environment.
Node* function = GetFunctionClosure();
Node* language = jsgraph()->Constant(language_mode());
Node* position = jsgraph()->Constant(current_scope()->start_position());
@@ -2459,10 +2474,13 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// Create node to perform the function call.
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
- const Operator* call = javascript()->CallFunction(args->length() + 2, flags,
- language_mode(), feedback);
+ const Operator* call = javascript()->CallFunction(
+ args->length() + 2, language_mode(), feedback, receiver_hint);
+ FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ environment()->Push(value->InputAt(0)); // The callee passed to the call.
+ states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+ environment()->Drop(1);
ast_context()->ProduceValue(value);
}
@@ -2517,7 +2535,6 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
// The callee and the receiver both have to be pushed onto the operand stack
// before arguments are being evaluated.
- CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
Node* callee_value = BuildLoadNativeContextField(expr->context_index());
Node* receiver_value = jsgraph()->UndefinedConstant();
@@ -2530,9 +2547,10 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
// Create node to perform the JS runtime call.
const Operator* call =
- javascript()->CallFunction(args->length() + 2, flags, language_mode());
+ javascript()->CallFunction(args->length() + 2, language_mode());
+ FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -2592,7 +2610,9 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Reserve space for result of postfix operation.
bool is_postfix = expr->is_postfix() && !ast_context()->IsEffect();
- if (is_postfix) environment()->Push(jsgraph()->UndefinedConstant());
+ if (is_postfix && assign_type != VARIABLE) {
+ environment()->Push(jsgraph()->ZeroConstant());
+ }
// Evaluate LHS expression and get old value.
Node* old_value = NULL;
@@ -2675,13 +2695,19 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
OutputFrameStateCombine::Push());
}
- // TODO(titzer): combine this framestate with the above?
- FrameStateBeforeAndAfter store_states(this, assign_type == KEYED_PROPERTY
- ? expr->ToNumberId()
- : BailoutId::None());
+ // Create a proper eager frame state for the stores.
+ environment()->Push(old_value);
+ FrameStateBeforeAndAfter store_states(this, expr->ToNumberId());
+ old_value = environment()->Pop();
// Save result for postfix expressions at correct stack depth.
- if (is_postfix) environment()->Poke(stack_depth, old_value);
+ if (is_postfix) {
+ if (assign_type != VARIABLE) {
+ environment()->Poke(stack_depth, old_value);
+ } else {
+ environment()->Push(old_value);
+ }
+ }
// Create node to perform +1/-1 operation.
Node* value;
@@ -2710,8 +2736,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback,
- expr->CountStoreFeedbackId());
+ Node* store = BuildNamedStore(object, name, value, feedback);
environment()->Push(value);
store_states.AddToNode(store, expr->AssignmentId(),
OutputFrameStateCombine::Ignore());
@@ -2721,8 +2746,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, feedback,
- expr->CountStoreFeedbackId());
+ Node* store = BuildKeyedStore(object, key, value, feedback);
environment()->Push(value);
store_states.AddToNode(store, expr->AssignmentId(),
OutputFrameStateCombine::Ignore());
@@ -2733,8 +2757,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value,
- expr->CountStoreFeedbackId());
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
environment()->Push(value);
store_states.AddToNode(store, expr->AssignmentId(),
OutputFrameStateCombine::Ignore());
@@ -2745,8 +2768,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* key = environment()->Pop();
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value,
- expr->CountStoreFeedbackId());
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
environment()->Push(value);
store_states.AddToNode(store, expr->AssignmentId(),
OutputFrameStateCombine::Ignore());
@@ -3017,11 +3039,22 @@ LanguageMode AstGraphBuilder::language_mode() const {
VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
- FeedbackVectorICSlot slot) const {
+ FeedbackVectorSlot slot) const {
return VectorSlotPair(handle(info()->shared_info()->feedback_vector()), slot);
}
+namespace {
+
+// Limit of context chain length to which inline check is possible.
+const int kMaxCheckDepth = 30;
+
+// Sentinel for {TryLoadDynamicVariable} disabling inline checks.
+const uint32_t kFullCheckRequired = -1;
+
+} // namespace
+
+
uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
bool found_eval_scope = false;
@@ -3034,9 +3067,7 @@ uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
if (s->is_eval_scope()) found_eval_scope = true;
if (!s->calls_sloppy_eval() && !found_eval_scope) continue;
int depth = current_scope()->ContextChainLength(s);
- if (depth > DynamicGlobalAccess::kMaxCheckDepth) {
- return DynamicGlobalAccess::kFullCheckRequired;
- }
+ if (depth > kMaxCheckDepth) return kFullCheckRequired;
check_depths |= 1 << depth;
}
return check_depths;
@@ -3050,9 +3081,7 @@ uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
if (s->num_heap_slots() <= 0) continue;
if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
int depth = current_scope()->ContextChainLength(s);
- if (depth > DynamicContextAccess::kMaxCheckDepth) {
- return DynamicContextAccess::kFullCheckRequired;
- }
+ if (depth > kMaxCheckDepth) return kFullCheckRequired;
check_depths |= 1 << depth;
if (s == variable->scope()) break;
}
@@ -3071,37 +3100,13 @@ Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
}
-Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object). Otherwise there is nothing left to do here.
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- IfBuilder receiver_check(this);
- Node* undefined = jsgraph()->UndefinedConstant();
- Node* check = NewNode(javascript()->StrictEqual(), receiver, undefined);
- receiver_check.If(check);
- receiver_check.Then();
- Node* proxy = BuildLoadGlobalProxy();
- environment()->Push(proxy);
- receiver_check.Else();
- environment()->Push(receiver);
- receiver_check.End();
- return environment()->Pop();
- } else {
- return receiver;
- }
-}
-
-
-Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context) {
+Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
Scope* scope = info()->scope();
- Node* closure = GetFunctionClosure();
// Allocate a new local context.
- Node* local_context =
- scope->is_script_scope()
- ? BuildLocalScriptContext(scope)
- : NewNode(javascript()->CreateFunctionContext(), closure);
+ Node* local_context = scope->is_script_scope()
+ ? BuildLocalScriptContext(scope)
+ : BuildLocalFunctionContext(scope);
if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
Node* receiver = environment()->RawParameterLookup(0);
@@ -3128,6 +3133,18 @@ Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context) {
}
+Node* AstGraphBuilder::BuildLocalFunctionContext(Scope* scope) {
+ DCHECK(scope->is_function_scope());
+
+ // Allocate a new local context.
+ int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ const Operator* op = javascript()->CreateFunctionContext(slot_count);
+ Node* local_context = NewNode(op, GetFunctionClosure());
+
+ return local_context;
+}
+
+
Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
DCHECK(scope->is_script_scope());
@@ -3135,7 +3152,8 @@ Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
const Operator* op = javascript()->CreateScriptContext(scope_info);
Node* local_context = NewNode(op, GetFunctionClosure());
- PrepareFrameState(local_context, BailoutId::Prologue());
+ PrepareFrameState(local_context, BailoutId::ScriptContext(),
+ OutputFrameStateCombine::Push());
return local_context;
}
@@ -3197,6 +3215,7 @@ Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
const Operator* op =
javascript()->CallRuntime(Runtime::kGetOriginalConstructor, 0);
Node* object = NewNode(op);
+ PrepareFrameState(object, BailoutId::None());
// Assign the object to the {new.target} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
@@ -3279,22 +3298,9 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
- Node* script_context = current_context();
- int slot_index = -1;
- if (variable->index() > 0) {
- DCHECK(variable->IsStaticGlobalObjectProperty());
- slot_index = variable->index();
- int depth = current_scope()->ContextChainLength(variable->scope());
- if (depth > 0) {
- const Operator* op = javascript()->LoadContext(
- depth - 1, Context::PREVIOUS_INDEX, true);
- script_context = NewNode(op, current_context());
- }
- }
- Node* global = BuildLoadGlobalObject();
Handle<Name> name = variable->name();
- Node* value = BuildGlobalLoad(script_context, global, name, feedback,
- typeof_mode, slot_index);
+ if (Node* node = TryLoadGlobalConstant(name)) return node;
+ Node* value = BuildGlobalLoad(name, feedback, typeof_mode);
states.AddToNode(value, bailout_id, combine);
return value;
}
@@ -3342,40 +3348,15 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
}
case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
- Node* value = jsgraph()->TheHoleConstant();
Handle<String> name = variable->name();
- if (mode == DYNAMIC_GLOBAL) {
- uint32_t check_bitset = ComputeBitsetForDynamicGlobal(variable);
- const Operator* op = javascript()->LoadDynamicGlobal(
- name, check_bitset, feedback, typeof_mode);
- value = NewNode(op, BuildLoadFeedbackVector(), current_context());
- states.AddToNode(value, bailout_id, combine);
- } else if (mode == DYNAMIC_LOCAL) {
- Variable* local = variable->local_if_not_shadowed();
- DCHECK(local->location() ==
- VariableLocation::CONTEXT); // Must be context.
- int depth = current_scope()->ContextChainLength(local->scope());
- uint32_t check_bitset = ComputeBitsetForDynamicContext(variable);
- const Operator* op = javascript()->LoadDynamicContext(
- name, check_bitset, depth, local->index());
- value = NewNode(op, current_context());
- PrepareFrameState(value, bailout_id, combine);
- VariableMode local_mode = local->mode();
- if (local_mode == CONST_LEGACY) {
- // Perform check for uninitialized legacy const variables.
- Node* undefined = jsgraph()->UndefinedConstant();
- value = BuildHoleCheckSilent(value, undefined, value);
- } else if (local_mode == LET || local_mode == CONST) {
- // Perform check for uninitialized let/const variables.
- value = BuildHoleCheckThenThrow(value, local, value, bailout_id);
- }
- } else if (mode == DYNAMIC) {
- uint32_t check_bitset = DynamicGlobalAccess::kFullCheckRequired;
- const Operator* op = javascript()->LoadDynamicGlobal(
- name, check_bitset, feedback, typeof_mode);
- value = NewNode(op, BuildLoadFeedbackVector(), current_context());
- states.AddToNode(value, bailout_id, combine);
+ if (Node* node =
+ TryLoadDynamicVariable(variable, name, bailout_id, states,
+ feedback, combine, typeof_mode)) {
+ return node;
}
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* value = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(value, bailout_id, combine);
return value;
}
}
@@ -3429,23 +3410,8 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
- Node* script_context = current_context();
- int slot_index = -1;
- if (variable->index() > 0) {
- DCHECK(variable->IsStaticGlobalObjectProperty());
- slot_index = variable->index();
- int depth = current_scope()->ContextChainLength(variable->scope());
- if (depth > 0) {
- const Operator* op = javascript()->LoadContext(
- depth - 1, Context::PREVIOUS_INDEX, true);
- script_context = NewNode(op, current_context());
- }
- }
- Node* global = BuildLoadGlobalObject();
Handle<Name> name = variable->name();
- Node* store =
- BuildGlobalStore(script_context, global, name, value, feedback,
- TypeFeedbackId::None(), slot_index);
+ Node* store = BuildGlobalStore(name, value, feedback);
states.AddToNode(store, bailout_id, combine);
return store;
}
@@ -3466,13 +3432,19 @@ Node* AstGraphBuilder::BuildVariableAssignment(
return BuildThrowConstAssignError(bailout_id);
}
return value;
+ } else if (mode == LET && op == Token::INIT_LET) {
+ // No initialization check needed because scoping guarantees it. Note
+ // that we still perform a lookup to keep the variable live, because
+ // baseline code might contain debug code that inspects the variable.
+ Node* current = environment()->Lookup(variable);
+ CHECK_NOT_NULL(current);
} else if (mode == LET && op != Token::INIT_LET) {
// Perform an initialization check for let declared variables.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
- value = BuildThrowReferenceError(variable, bailout_id);
+ return BuildThrowReferenceError(variable, bailout_id);
} else if (current->opcode() == IrOpcode::kPhi) {
- value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value, bailout_id);
}
} else if (mode == CONST && op == Token::INIT_CONST) {
// Perform an initialization check for const {this} variables.
@@ -3556,63 +3528,37 @@ Node* AstGraphBuilder::BuildVariableAssignment(
}
-static inline Node* Record(JSTypeFeedbackTable* js_type_feedback, Node* node,
- FeedbackVectorICSlot slot) {
- if (js_type_feedback) {
- js_type_feedback->Record(node, slot);
- }
- return node;
-}
-
-
-static inline Node* Record(JSTypeFeedbackTable* js_type_feedback, Node* node,
- TypeFeedbackId id) {
- if (js_type_feedback) {
- js_type_feedback->Record(node, id);
- }
- return node;
-}
-
-
Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
const VectorSlotPair& feedback) {
- const Operator* op = javascript()->LoadProperty(feedback, language_mode());
+ const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
- return Record(js_type_feedback_, node, feedback.slot());
+ return node;
}
Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
const VectorSlotPair& feedback) {
- const Operator* op = javascript()->LoadNamed(name, feedback, language_mode());
+ const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
Node* node = NewNode(op, object, BuildLoadFeedbackVector());
- return Record(js_type_feedback_, node, feedback.slot());
+ return node;
}
Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
- const VectorSlotPair& feedback,
- TypeFeedbackId id) {
+ const VectorSlotPair& feedback) {
const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
- if (FLAG_vector_stores) {
- return Record(js_type_feedback_, node, feedback.slot());
- }
- return Record(js_type_feedback_, node, id);
+ return node;
}
Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
Node* value,
- const VectorSlotPair& feedback,
- TypeFeedbackId id) {
+ const VectorSlotPair& feedback) {
const Operator* op =
javascript()->StoreNamed(language_mode(), name, feedback);
Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
- if (FLAG_vector_stores) {
- return Record(js_type_feedback_, node, feedback.slot());
- }
- return Record(js_type_feedback_, node, id);
+ return node;
}
@@ -3623,7 +3569,7 @@ Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
Node* language = jsgraph()->Constant(language_mode());
const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper, 4);
Node* node = NewNode(op, receiver, home_object, name_node, language);
- return Record(js_type_feedback_, node, feedback.slot());
+ return node;
}
@@ -3634,58 +3580,48 @@ Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
const Operator* op =
javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
Node* node = NewNode(op, receiver, home_object, key, language);
- return Record(js_type_feedback_, node, feedback.slot());
+ return node;
}
Node* AstGraphBuilder::BuildKeyedSuperStore(Node* receiver, Node* home_object,
- Node* key, Node* value,
- TypeFeedbackId id) {
+ Node* key, Node* value) {
Runtime::FunctionId function_id = is_strict(language_mode())
? Runtime::kStoreKeyedToSuper_Strict
: Runtime::kStoreKeyedToSuper_Sloppy;
const Operator* op = javascript()->CallRuntime(function_id, 4);
Node* node = NewNode(op, receiver, home_object, key, value);
- return Record(js_type_feedback_, node, id);
+ return node;
}
Node* AstGraphBuilder::BuildNamedSuperStore(Node* receiver, Node* home_object,
- Handle<Name> name, Node* value,
- TypeFeedbackId id) {
+ Handle<Name> name, Node* value) {
Node* name_node = jsgraph()->Constant(name);
Runtime::FunctionId function_id = is_strict(language_mode())
? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy;
const Operator* op = javascript()->CallRuntime(function_id, 4);
Node* node = NewNode(op, receiver, home_object, name_node, value);
- return Record(js_type_feedback_, node, id);
+ return node;
}
-Node* AstGraphBuilder::BuildGlobalLoad(Node* script_context, Node* global,
- Handle<Name> name,
+Node* AstGraphBuilder::BuildGlobalLoad(Handle<Name> name,
const VectorSlotPair& feedback,
- TypeofMode typeof_mode, int slot_index) {
- const Operator* op =
- javascript()->LoadGlobal(name, feedback, typeof_mode, slot_index);
- Node* node = NewNode(op, script_context, global, BuildLoadFeedbackVector());
- return Record(js_type_feedback_, node, feedback.slot());
+ TypeofMode typeof_mode) {
+ const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
+ Node* node = NewNode(op, BuildLoadFeedbackVector());
+ return node;
}
-Node* AstGraphBuilder::BuildGlobalStore(Node* script_context, Node* global,
- Handle<Name> name, Node* value,
- const VectorSlotPair& feedback,
- TypeFeedbackId id, int slot_index) {
+Node* AstGraphBuilder::BuildGlobalStore(Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback) {
const Operator* op =
- javascript()->StoreGlobal(language_mode(), name, feedback, slot_index);
- Node* node =
- NewNode(op, script_context, global, value, BuildLoadFeedbackVector());
- if (FLAG_vector_stores) {
- return Record(js_type_feedback_, node, feedback.slot());
- }
- return Record(js_type_feedback_, node, id);
+ javascript()->StoreGlobal(language_mode(), name, feedback);
+ Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+ return node;
}
@@ -3712,19 +3648,11 @@ Node* AstGraphBuilder::BuildLoadGlobalObject() {
Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
Node* global = BuildLoadGlobalObject();
Node* native_context =
- BuildLoadObjectField(global, GlobalObject::kNativeContextOffset);
+ BuildLoadObjectField(global, JSGlobalObject::kNativeContextOffset);
return NewNode(javascript()->LoadContext(0, index, true), native_context);
}
-Node* AstGraphBuilder::BuildLoadGlobalProxy() {
- Node* global = BuildLoadGlobalObject();
- Node* proxy =
- BuildLoadObjectField(global, JSGlobalObject::kGlobalProxyOffset);
- return proxy;
-}
-
-
Node* AstGraphBuilder::BuildLoadFeedbackVector() {
if (!feedback_vector_.is_set()) {
Node* closure = GetFunctionClosure();
@@ -3738,60 +3666,14 @@ Node* AstGraphBuilder::BuildLoadFeedbackVector() {
}
-Node* AstGraphBuilder::BuildLoadExternal(ExternalReference reference,
- MachineType type) {
- return NewNode(jsgraph()->machine()->Load(type),
- jsgraph()->ExternalConstant(reference),
- jsgraph()->IntPtrConstant(0));
-}
-
-
-Node* AstGraphBuilder::BuildStoreExternal(ExternalReference reference,
- MachineType type, Node* value) {
- StoreRepresentation representation(type, kNoWriteBarrier);
- return NewNode(jsgraph()->machine()->Store(representation),
- jsgraph()->ExternalConstant(reference),
- jsgraph()->IntPtrConstant(0), value);
-}
-
-
Node* AstGraphBuilder::BuildToBoolean(Node* input) {
- // TODO(bmeurer, mstarzinger): Refactor this into a separate optimization
- // method.
- switch (input->opcode()) {
- case IrOpcode::kNumberConstant: {
- NumberMatcher m(input);
- return jsgraph_->BooleanConstant(!m.Is(0) && !m.IsNaN());
- }
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> object = HeapObjectMatcher(input).Value();
- return jsgraph_->BooleanConstant(object->BooleanValue());
- }
- case IrOpcode::kJSEqual:
- case IrOpcode::kJSNotEqual:
- case IrOpcode::kJSStrictEqual:
- case IrOpcode::kJSStrictNotEqual:
- case IrOpcode::kJSLessThan:
- case IrOpcode::kJSLessThanOrEqual:
- case IrOpcode::kJSGreaterThan:
- case IrOpcode::kJSGreaterThanOrEqual:
- case IrOpcode::kJSUnaryNot:
- case IrOpcode::kJSToBoolean:
- case IrOpcode::kJSDeleteProperty:
- case IrOpcode::kJSHasProperty:
- case IrOpcode::kJSInstanceOf:
- return input;
- default:
- break;
- }
+ if (Node* node = TryFastToBoolean(input)) return node;
return NewNode(javascript()->ToBoolean(), input);
}
Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
- // TODO(turbofan): Possible optimization is to NOP on name constants. But the
- // same caveat as with BuildToBoolean applies, and it should be factored out
- // into a JSOperatorReducer.
+ if (Node* node = TryFastToName(input)) return node;
Node* name = NewNode(javascript()->ToName(), input);
PrepareFrameState(name, bailout_id);
return name;
@@ -3814,8 +3696,7 @@ Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
FrameStateBeforeAndAfter states(this, BailoutId::None());
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(slot_number));
- Node* store = BuildNamedStore(value, name, home_object, feedback,
- TypeFeedbackId::None());
+ Node* store = BuildNamedStore(value, name, home_object, feedback);
states.AddToNode(store, BailoutId::None(), OutputFrameStateCombine::Ignore());
return store;
}
@@ -3936,6 +3817,159 @@ Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
}
+Node* AstGraphBuilder::TryLoadGlobalConstant(Handle<Name> name) {
+ // Optimize global constants like "undefined", "Infinity", and "NaN".
+ Handle<Object> constant_value = isolate()->factory()->GlobalConstantFor(name);
+ if (!constant_value.is_null()) return jsgraph()->Constant(constant_value);
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::TryLoadDynamicVariable(
+ Variable* variable, Handle<String> name, BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states, const VectorSlotPair& feedback,
+ OutputFrameStateCombine combine, TypeofMode typeof_mode) {
+ VariableMode mode = variable->mode();
+
+ if (mode == DYNAMIC_GLOBAL) {
+ uint32_t bitset = ComputeBitsetForDynamicGlobal(variable);
+ if (bitset == kFullCheckRequired) return nullptr;
+
+ // We are using two blocks to model fast and slow cases.
+ BlockBuilder fast_block(this);
+ BlockBuilder slow_block(this);
+ environment()->Push(jsgraph()->TheHoleConstant());
+ slow_block.BeginBlock();
+ environment()->Pop();
+ fast_block.BeginBlock();
+
+ // Perform checks whether the fast mode applies, by looking for any
+ // extension object which might shadow the optimistic declaration.
+ for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
+ if ((bitset & 1) == 0) continue;
+ Node* load = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ current_context());
+ Node* check =
+ NewNode(javascript()->CallRuntime(Runtime::kInlineIsSmi, 1), load);
+ fast_block.BreakUnless(check, BranchHint::kTrue);
+ }
+
+ // Fast case, because variable is not shadowed. Perform global slot load.
+ Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
+ states.AddToNode(fast, bailout_id, combine);
+ environment()->Push(fast);
+ slow_block.Break();
+ environment()->Pop();
+ fast_block.EndBlock();
+
+ // Slow case, because variable potentially shadowed. Perform dynamic lookup.
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(slow, bailout_id, combine);
+ environment()->Push(slow);
+ slow_block.EndBlock();
+
+ return environment()->Pop();
+ }
+
+ if (mode == DYNAMIC_LOCAL) {
+ uint32_t bitset = ComputeBitsetForDynamicContext(variable);
+ if (bitset == kFullCheckRequired) return nullptr;
+
+ // We are using two blocks to model fast and slow cases.
+ BlockBuilder fast_block(this);
+ BlockBuilder slow_block(this);
+ environment()->Push(jsgraph()->TheHoleConstant());
+ slow_block.BeginBlock();
+ environment()->Pop();
+ fast_block.BeginBlock();
+
+ // Perform checks whether the fast mode applies, by looking for any
+ // extension object which might shadow the optimistic declaration.
+ for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
+ if ((bitset & 1) == 0) continue;
+ Node* load = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ current_context());
+ Node* check =
+ NewNode(javascript()->CallRuntime(Runtime::kInlineIsSmi, 1), load);
+ fast_block.BreakUnless(check, BranchHint::kTrue);
+ }
+
+ // Fast case, because variable is not shadowed. Perform context slot load.
+ Variable* local = variable->local_if_not_shadowed();
+ DCHECK(local->location() == VariableLocation::CONTEXT); // Must be context.
+ Node* fast = BuildVariableLoad(local, bailout_id, states, feedback, combine,
+ typeof_mode);
+ environment()->Push(fast);
+ slow_block.Break();
+ environment()->Pop();
+ fast_block.EndBlock();
+
+ // Slow case, because variable potentially shadowed. Perform dynamic lookup.
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(slow, bailout_id, combine);
+ environment()->Push(slow);
+ slow_block.EndBlock();
+
+ return environment()->Pop();
+ }
+
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
+ switch (input->opcode()) {
+ case IrOpcode::kNumberConstant: {
+ NumberMatcher m(input);
+ return jsgraph_->BooleanConstant(!m.Is(0) && !m.IsNaN());
+ }
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> object = HeapObjectMatcher(input).Value();
+ return jsgraph_->BooleanConstant(object->BooleanValue());
+ }
+ case IrOpcode::kJSEqual:
+ case IrOpcode::kJSNotEqual:
+ case IrOpcode::kJSStrictEqual:
+ case IrOpcode::kJSStrictNotEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSUnaryNot:
+ case IrOpcode::kJSToBoolean:
+ case IrOpcode::kJSDeleteProperty:
+ case IrOpcode::kJSHasProperty:
+ case IrOpcode::kJSInstanceOf:
+ return input;
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::TryFastToName(Node* input) {
+ switch (input->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> object = HeapObjectMatcher(input).Value();
+ if (object->IsName()) return input;
+ break;
+ }
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSTypeOf:
+ return input;
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+
bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
if (info()->osr_ast_id() == stmt->OsrEntryId()) {
info()->set_osr_expr_stack_height(std::max(
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 8b90f072a0..f5b662224a 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -19,7 +19,6 @@ namespace compiler {
class ControlBuilder;
class Graph;
-class JSTypeFeedbackTable;
class LoopAssignmentAnalysis;
class LoopBuilder;
class Node;
@@ -31,8 +30,7 @@ class Node;
class AstGraphBuilder : public AstVisitor {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- LoopAssignmentAnalysis* loop_assignment = NULL,
- JSTypeFeedbackTable* js_type_feedback = NULL);
+ LoopAssignmentAnalysis* loop_assignment = NULL);
// Creates a graph by visiting the entire AST.
bool CreateGraph(bool stack_check = true);
@@ -70,6 +68,7 @@ class AstGraphBuilder : public AstVisitor {
class FrameStateBeforeAndAfter;
friend class ControlBuilder;
+ Isolate* isolate_;
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
@@ -115,9 +114,6 @@ class AstGraphBuilder : public AstVisitor {
// Function info for frame state construction.
const FrameStateFunctionInfo* const frame_state_function_info_;
- // Type feedback table.
- JSTypeFeedbackTable* js_type_feedback_;
-
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
static const int kInputBufferSizeIncrement = 64;
@@ -129,6 +125,7 @@ class AstGraphBuilder : public AstVisitor {
ContextScope* execution_context() const { return execution_context_; }
CommonOperatorBuilder* common() const { return jsgraph_->common(); }
CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return isolate_; }
LanguageMode language_mode() const;
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
@@ -233,7 +230,7 @@ class AstGraphBuilder : public AstVisitor {
Node** EnsureInputBufferSize(int size);
// Named and keyed loads require a VectorSlotPair for successful lowering.
- VectorSlotPair CreateVectorSlotPair(FeedbackVectorICSlot slot) const;
+ VectorSlotPair CreateVectorSlotPair(FeedbackVectorSlot slot) const;
// Determine which contexts need to be checked for extension objects that
// might shadow the optimistic declaration of dynamic lookup variables.
@@ -245,11 +242,9 @@ class AstGraphBuilder : public AstVisitor {
// resulting node. The operand stack height remains the same, variables and
// other dependencies tracked by the environment might be mutated though.
- // Builder to create a receiver check for sloppy mode.
- Node* BuildPatchReceiverToGlobalProxy(Node* receiver);
-
// Builders to create local function, script and block contexts.
- Node* BuildLocalFunctionContext(Node* context);
+ Node* BuildLocalActivationContext(Node* context);
+ Node* BuildLocalFunctionContext(Scope* scope);
Node* BuildLocalScriptContext(Scope* scope);
Node* BuildLocalBlockContext(Scope* scope);
@@ -283,42 +278,35 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildNamedLoad(Node* receiver, Handle<Name> name,
const VectorSlotPair& feedback);
Node* BuildKeyedStore(Node* receiver, Node* key, Node* value,
- const VectorSlotPair& feedback, TypeFeedbackId id);
+ const VectorSlotPair& feedback);
Node* BuildNamedStore(Node* receiver, Handle<Name> name, Node* value,
- const VectorSlotPair& feedback, TypeFeedbackId id);
+ const VectorSlotPair& feedback);
// Builders for super property loads and stores.
Node* BuildKeyedSuperStore(Node* receiver, Node* home_object, Node* key,
- Node* value, TypeFeedbackId id);
+ Node* value);
Node* BuildNamedSuperStore(Node* receiver, Node* home_object,
- Handle<Name> name, Node* value, TypeFeedbackId id);
+ Handle<Name> name, Node* value);
Node* BuildNamedSuperLoad(Node* receiver, Node* home_object,
Handle<Name> name, const VectorSlotPair& feedback);
Node* BuildKeyedSuperLoad(Node* receiver, Node* home_object, Node* key,
const VectorSlotPair& feedback);
// Builders for global variable loads and stores.
- Node* BuildGlobalLoad(Node* script_context, Node* global, Handle<Name> name,
- const VectorSlotPair& feedback, TypeofMode typeof_mode,
- int slot_index);
- Node* BuildGlobalStore(Node* script_context, Node* global, Handle<Name> name,
- Node* value, const VectorSlotPair& feedback,
- TypeFeedbackId id, int slot_index);
+ Node* BuildGlobalLoad(Handle<Name> name, const VectorSlotPair& feedback,
+ TypeofMode typeof_mode);
+ Node* BuildGlobalStore(Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback);
// Builders for accessing the function context.
Node* BuildLoadGlobalObject();
Node* BuildLoadNativeContextField(int index);
- Node* BuildLoadGlobalProxy();
Node* BuildLoadFeedbackVector();
// Builder for accessing a (potentially immutable) object field.
Node* BuildLoadObjectField(Node* object, int offset);
Node* BuildLoadImmutableObjectField(Node* object, int offset);
- // Builders for accessing external references.
- Node* BuildLoadExternal(ExternalReference ref, MachineType type);
- Node* BuildStoreExternal(ExternalReference ref, MachineType type, Node* val);
-
// Builders for automatic type conversion.
Node* BuildToBoolean(Node* input);
Node* BuildToName(Node* input, BailoutId bailout_id);
@@ -359,6 +347,28 @@ class AstGraphBuilder : public AstVisitor {
Node* ProcessArguments(const Operator* op, int arity);
// ===========================================================================
+ // The following build methods have the same contract as the above ones, but
+ // they can also return {NULL} to indicate that no fragment was built. Note
+ // that these are optimizations, disabling any of them should still produce
+ // correct graphs.
+
+ // Optimization for variable load from global object.
+ Node* TryLoadGlobalConstant(Handle<Name> name);
+
+ // Optimization for variable load of dynamic lookup slot that is most likely
+ // to resolve to a global slot or context slot (inferred from scope chain).
+ Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
+ BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states,
+ const VectorSlotPair& feedback,
+ OutputFrameStateCombine combine,
+ TypeofMode typeof_mode);
+
+ // Optimizations for automatic type conversion.
+ Node* TryFastToBoolean(Node* input);
+ Node* TryFastToName(Node* input);
+
+ // ===========================================================================
// The following visitation methods all recursively visit a subtree of the
// underlying AST and extent the graph. The operand stack is mutated in a way
// consistent with other compilers:
@@ -401,7 +411,8 @@ class AstGraphBuilder : public AstVisitor {
// Dispatched from VisitForInStatement.
void VisitForInAssignment(Expression* expr, Node* value,
const VectorSlotPair& feedback,
- BailoutId bailout_id);
+ BailoutId bailout_id_before,
+ BailoutId bailout_id_after);
// Dispatched from VisitObjectLiteral.
void VisitObjectLiteralAccessor(Node* home_object,
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index d9ec109e40..040999aa05 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -13,13 +13,13 @@ namespace compiler {
typedef class AstLoopAssignmentAnalyzer ALAA; // for code shortitude.
ALAA::AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info)
- : info_(info), loop_stack_(zone) {
- InitializeAstVisitor(info->isolate(), zone);
+ : info_(info), zone_(zone), loop_stack_(zone) {
+ InitializeAstVisitor(info->isolate());
}
LoopAssignmentAnalysis* ALAA::Analyze() {
- LoopAssignmentAnalysis* a = new (zone()) LoopAssignmentAnalysis(zone());
+ LoopAssignmentAnalysis* a = new (zone_) LoopAssignmentAnalysis(zone_);
result_ = a;
VisitStatements(info()->literal()->body());
result_ = NULL;
@@ -30,7 +30,7 @@ LoopAssignmentAnalysis* ALAA::Analyze() {
void ALAA::Enter(IterationStatement* loop) {
int num_variables = 1 + info()->scope()->num_parameters() +
info()->scope()->num_stack_slots();
- BitVector* bits = new (zone()) BitVector(num_variables, zone());
+ BitVector* bits = new (zone_) BitVector(num_variables, zone_);
if (info()->is_osr() && info()->osr_ast_id() == loop->OsrEntryId())
bits->AddAll();
loop_stack_.push_back(bits);
@@ -77,6 +77,12 @@ void ALAA::VisitSuperCallReference(SuperCallReference* leaf) {}
void ALAA::VisitBlock(Block* stmt) { VisitStatements(stmt->statements()); }
+void ALAA::VisitDoExpression(DoExpression* expr) {
+ Visit(expr->block());
+ Visit(expr->result());
+}
+
+
void ALAA::VisitExpressionStatement(ExpressionStatement* stmt) {
Visit(stmt->expression());
}
@@ -255,7 +261,9 @@ void ALAA::VisitForInStatement(ForInStatement* loop) {
void ALAA::VisitForOfStatement(ForOfStatement* loop) {
+ Visit(loop->assign_iterator());
Enter(loop);
+ Visit(loop->assign_each());
Visit(loop->each());
Visit(loop->subject());
Visit(loop->body());
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
index cd56d0a7ef..d7b390009d 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
@@ -53,6 +53,7 @@ class AstLoopAssignmentAnalyzer : public AstVisitor {
private:
CompilationInfo* info_;
+ Zone* zone_;
ZoneDeque<BitVector*> loop_stack_;
LoopAssignmentAnalysis* result_;
@@ -70,8 +71,8 @@ class AstLoopAssignmentAnalyzer : public AstVisitor {
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstLoopAssignmentAnalyzer);
};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
diff --git a/deps/v8/src/compiler/binary-operator-reducer.cc b/deps/v8/src/compiler/binary-operator-reducer.cc
new file mode 100644
index 0000000000..43d26d8884
--- /dev/null
+++ b/deps/v8/src/compiler/binary-operator-reducer.cc
@@ -0,0 +1,128 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/binary-operator-reducer.h"
+
+#include <algorithm>
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BinaryOperatorReducer::BinaryOperatorReducer(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine)
+ : AdvancedReducer(editor),
+ graph_(graph),
+ common_(common),
+ machine_(machine),
+ dead_(graph->NewNode(common->Dead())) {}
+
+
+Reduction BinaryOperatorReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kFloat64Mul:
+ return ReduceFloat52Mul(node);
+ case IrOpcode::kFloat64Div:
+ return ReduceFloat52Div(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction BinaryOperatorReducer::ReduceFloat52Mul(Node* node) {
+ if (!machine()->Is64()) return NoChange();
+
+ Float64BinopMatcher m(node);
+ if (!m.left().IsChangeInt32ToFloat64() ||
+ !m.right().IsChangeInt32ToFloat64()) {
+ return NoChange();
+ }
+
+ Type* type = NodeProperties::GetType(node);
+ Type::RangeType* range = type->GetRange();
+
+ // JavaScript has 52 bit precision in multiplication
+ if (range == nullptr || range->Min() < 0.0 ||
+ range->Max() > 0xFFFFFFFFFFFFFULL) {
+ return NoChange();
+ }
+
+ Node* mul = graph()->NewNode(machine()->Int64Mul(), m.left().InputAt(0),
+ m.right().InputAt(0));
+ Revisit(mul);
+
+ Type* range_type = Type::Range(range->Min(), range->Max(), graph()->zone());
+
+ // TODO(indutny): Is Type::Number() a proper thing here? It looks like
+ // every other place is using Type:Internal() for int64 values.
+ // Should we off-load range propagation to Typer?
+ NodeProperties::SetType(
+ mul, Type::Intersect(range_type, Type::Number(), graph()->zone()));
+
+ Node* out = graph()->NewNode(machine()->RoundInt64ToFloat64(), mul);
+ return Replace(out);
+}
+
+
+Reduction BinaryOperatorReducer::ReduceFloat52Div(Node* node) {
+ if (!machine()->Is64()) return NoChange();
+
+ Float64BinopMatcher m(node);
+ if (!m.left().IsRoundInt64ToFloat64()) return NoChange();
+
+ // Right value should be positive...
+ if (!m.right().HasValue() || m.right().Value() <= 0) return NoChange();
+
+ // ...integer...
+ int64_t value = static_cast<int64_t>(m.right().Value());
+ if (value != static_cast<int64_t>(m.right().Value())) return NoChange();
+
+ // ...and should be a power of two.
+ if (!base::bits::IsPowerOfTwo64(value)) return NoChange();
+
+ Node* left = m.left().InputAt(0);
+ Type::RangeType* range = NodeProperties::GetType(left)->GetRange();
+
+ // The result should fit into 32bit word
+ int64_t min = static_cast<int64_t>(range->Min()) / value;
+ int64_t max = static_cast<int64_t>(range->Max()) / value;
+ if (min < 0 || max > 0xFFFFFFFLL) {
+ return NoChange();
+ }
+
+ int64_t shift = WhichPowerOf2_64(static_cast<int64_t>(m.right().Value()));
+
+ // Replace division with 64bit right shift
+ Node* shr =
+ graph()->NewNode(machine()->Word64Shr(), left,
+ graph()->NewNode(common()->Int64Constant(shift)));
+ Revisit(shr);
+
+ Node* out = graph()->NewNode(machine()->RoundInt64ToFloat64(), shr);
+ return Replace(out);
+}
+
+
+Reduction BinaryOperatorReducer::Change(Node* node, Operator const* op,
+ Node* a) {
+ node->ReplaceInput(0, a);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/binary-operator-reducer.h b/deps/v8/src/compiler/binary-operator-reducer.h
new file mode 100644
index 0000000000..fd0d381c30
--- /dev/null
+++ b/deps/v8/src/compiler/binary-operator-reducer.h
@@ -0,0 +1,52 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BINARY_OPERATOR_REDUCER_H_
+#define V8_COMPILER_BINARY_OPERATOR_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+class MachineOperatorBuilder;
+class Operator;
+
+
+// Performs strength reduction on nodes that have common operators.
+class BinaryOperatorReducer final : public AdvancedReducer {
+ public:
+ BinaryOperatorReducer(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine);
+ ~BinaryOperatorReducer() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceFloat52Mul(Node* node);
+ Reduction ReduceFloat52Div(Node* node);
+
+ Reduction Change(Node* node, Operator const* op, Node* a);
+
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ Node* dead() const { return dead_; }
+
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ MachineOperatorBuilder* const machine_;
+ Node* const dead_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BINARY_OPERATOR_REDUCER_H_
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
new file mode 100644
index 0000000000..bc56e73a08
--- /dev/null
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -0,0 +1,269 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/branch-elimination.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ node_conditions_(zone, js_graph->graph()->NodeCount()),
+ zone_(zone),
+ dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
+
+
+BranchElimination::~BranchElimination() {}
+
+
+Reduction BranchElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kDead:
+ return NoChange();
+ case IrOpcode::kMerge:
+ return ReduceMerge(node);
+ case IrOpcode::kLoop:
+ return ReduceLoop(node);
+ case IrOpcode::kBranch:
+ return ReduceBranch(node);
+ case IrOpcode::kIfFalse:
+ return ReduceIf(node, false);
+ case IrOpcode::kIfTrue:
+ return ReduceIf(node, true);
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ if (node->op()->ControlOutputCount() > 0) {
+ return ReduceOtherControl(node);
+ }
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction BranchElimination::ReduceBranch(Node* node) {
+ Node* condition = node->InputAt(0);
+ Node* control_input = NodeProperties::GetControlInput(node, 0);
+ const ControlPathConditions* from_input = node_conditions_.Get(control_input);
+ if (from_input != nullptr) {
+ Maybe<bool> condition_value = from_input->LookupCondition(condition);
+ // If we know the condition we can discard the branch.
+ if (condition_value.IsJust()) {
+ bool known_value = condition_value.FromJust();
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ Replace(use, known_value ? control_input : dead());
+ break;
+ case IrOpcode::kIfFalse:
+ Replace(use, known_value ? dead() : control_input);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return Replace(dead());
+ }
+ }
+ return TakeConditionsFromFirstControl(node);
+}
+
+
+Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
+ // Add the condition to the list arriving from the input branch.
+ Node* branch = NodeProperties::GetControlInput(node, 0);
+ const ControlPathConditions* from_branch = node_conditions_.Get(branch);
+ // If we do not know anything about the predecessor, do not propagate just
+ // yet because we will have to recompute anyway once we compute the
+ // predecessor.
+ if (from_branch == nullptr) {
+ DCHECK(node_conditions_.Get(node) == nullptr);
+ return NoChange();
+ }
+ Node* condition = branch->InputAt(0);
+ return UpdateConditions(
+ node, from_branch->AddCondition(zone_, condition, is_true_branch));
+}
+
+
+Reduction BranchElimination::ReduceLoop(Node* node) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just use
+ // the information from the loop entry edge.
+ return TakeConditionsFromFirstControl(node);
+}
+
+
+Reduction BranchElimination::ReduceMerge(Node* node) {
+ // Shortcut for the case when we do not know anything about some
+ // input.
+ for (int i = 0; i < node->InputCount(); i++) {
+ if (node_conditions_.Get(node->InputAt(i)) == nullptr) {
+ DCHECK(node_conditions_.Get(node) == nullptr);
+ return NoChange();
+ }
+ }
+
+ const ControlPathConditions* first = node_conditions_.Get(node->InputAt(0));
+ // Make a copy of the first input's conditions and merge with the conditions
+ // from other inputs.
+ ControlPathConditions* conditions =
+ new (zone_->New(sizeof(ControlPathConditions)))
+ ControlPathConditions(*first);
+ for (int i = 1; i < node->InputCount(); i++) {
+ conditions->Merge(*(node_conditions_.Get(node->InputAt(i))));
+ }
+
+ return UpdateConditions(node, conditions);
+}
+
+
+Reduction BranchElimination::ReduceStart(Node* node) {
+ return UpdateConditions(node, ControlPathConditions::Empty(zone_));
+}
+
+
+const BranchElimination::ControlPathConditions*
+BranchElimination::PathConditionsForControlNodes::Get(Node* node) {
+ if (static_cast<size_t>(node->id()) < info_for_node_.size()) {
+ return info_for_node_[node->id()];
+ }
+ return nullptr;
+}
+
+
+void BranchElimination::PathConditionsForControlNodes::Set(
+ Node* node, const ControlPathConditions* conditions) {
+ size_t index = static_cast<size_t>(node->id());
+ if (index >= info_for_node_.size()) {
+ info_for_node_.resize(index + 1, nullptr);
+ }
+ info_for_node_[index] = conditions;
+}
+
+
+Reduction BranchElimination::ReduceOtherControl(Node* node) {
+ DCHECK_EQ(1, node->op()->ControlInputCount());
+ return TakeConditionsFromFirstControl(node);
+}
+
+
+Reduction BranchElimination::TakeConditionsFromFirstControl(Node* node) {
+ // We just propagate the information from the control input (ideally,
+ // we would only revisit control uses if there is change).
+ const ControlPathConditions* from_input =
+ node_conditions_.Get(NodeProperties::GetControlInput(node, 0));
+ return UpdateConditions(node, from_input);
+}
+
+
+Reduction BranchElimination::UpdateConditions(
+ Node* node, const ControlPathConditions* conditions) {
+ const ControlPathConditions* original = node_conditions_.Get(node);
+ // Only signal that the node has Changed if the condition information has
+ // changed.
+ if (conditions != original) {
+ if (original == nullptr || *conditions != *original) {
+ node_conditions_.Set(node, conditions);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+// static
+const BranchElimination::ControlPathConditions*
+BranchElimination::ControlPathConditions::Empty(Zone* zone) {
+ return new (zone->New(sizeof(ControlPathConditions)))
+ ControlPathConditions(nullptr, 0);
+}
+
+
+void BranchElimination::ControlPathConditions::Merge(
+ const ControlPathConditions& other) {
+ // Change the current condition list to a longest common tail
+ // of this condition list and the other list. (The common tail
+ // should correspond to the list from the common dominator.)
+
+ // First, we throw away the prefix of the longer list, so that
+ // we have lists of the same length.
+ size_t other_size = other.condition_count_;
+ BranchCondition* other_condition = other.head_;
+ while (other_size > condition_count_) {
+ other_condition = other_condition->next;
+ other_size--;
+ }
+ while (condition_count_ > other_size) {
+ head_ = head_->next;
+ condition_count_--;
+ }
+
+ // Then we go through both lists in lock-step until we find
+ // the common tail.
+ while (head_ != other_condition) {
+ DCHECK(condition_count_ > 0);
+ condition_count_--;
+ other_condition = other_condition->next;
+ head_ = head_->next;
+ }
+}
+
+
+const BranchElimination::ControlPathConditions*
+BranchElimination::ControlPathConditions::AddCondition(Zone* zone,
+ Node* condition,
+ bool is_true) const {
+ DCHECK(LookupCondition(condition).IsNothing());
+
+ BranchCondition* new_head = new (zone->New(sizeof(BranchCondition)))
+ BranchCondition(condition, is_true, head_);
+
+ ControlPathConditions* conditions =
+ new (zone->New(sizeof(ControlPathConditions)))
+ ControlPathConditions(new_head, condition_count_ + 1);
+ return conditions;
+}
+
+
+Maybe<bool> BranchElimination::ControlPathConditions::LookupCondition(
+ Node* condition) const {
+ for (BranchCondition* current = head_; current != nullptr;
+ current = current->next) {
+ if (current->condition == condition) {
+ return Just<bool>(current->is_true);
+ }
+ }
+ return Nothing<bool>();
+}
+
+
+bool BranchElimination::ControlPathConditions::operator==(
+ const ControlPathConditions& other) const {
+ if (condition_count_ != other.condition_count_) return false;
+ BranchCondition* this_condition = head_;
+ BranchCondition* other_condition = other.head_;
+ while (true) {
+ if (this_condition == other_condition) return true;
+ if (this_condition->condition != other_condition->condition ||
+ this_condition->is_true != other_condition->is_true) {
+ return false;
+ }
+ this_condition = this_condition->next;
+ other_condition = other_condition->next;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
new file mode 100644
index 0000000000..a7ac926c7a
--- /dev/null
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+#define V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSGraph;
+
+
+class BranchElimination final : public AdvancedReducer {
+ public:
+ BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone);
+ ~BranchElimination() final;
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ struct BranchCondition {
+ Node* condition;
+ bool is_true;
+ BranchCondition* next;
+
+ BranchCondition(Node* condition, bool is_true, BranchCondition* next)
+ : condition(condition), is_true(is_true), next(next) {}
+ };
+
+ // Class for tracking information about branch conditions.
+ // At the moment it is a linked list of conditions and their values
+ // (true or false).
+ class ControlPathConditions {
+ public:
+ Maybe<bool> LookupCondition(Node* condition) const;
+
+ const ControlPathConditions* AddCondition(Zone* zone, Node* condition,
+ bool is_true) const;
+ static const ControlPathConditions* Empty(Zone* zone);
+ void Merge(const ControlPathConditions& other);
+
+ bool operator==(const ControlPathConditions& other) const;
+ bool operator!=(const ControlPathConditions& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ ControlPathConditions(BranchCondition* head, size_t condition_count)
+ : head_(head), condition_count_(condition_count) {}
+
+ BranchCondition* head_;
+ // We keep track of the list length so that we can find the longest
+ // common tail easily.
+ size_t condition_count_;
+ };
+
+ // Maps each control node to the condition information known about the node.
+ // If the information is nullptr, then we have not calculated the information
+ // yet.
+ class PathConditionsForControlNodes {
+ public:
+ PathConditionsForControlNodes(Zone* zone, size_t size_hint)
+ : info_for_node_(size_hint, nullptr, zone) {}
+ const ControlPathConditions* Get(Node* node);
+ void Set(Node* node, const ControlPathConditions* conditions);
+
+ private:
+ ZoneVector<const ControlPathConditions*> info_for_node_;
+ };
+
+ Reduction ReduceBranch(Node* node);
+ Reduction ReduceIf(Node* node, bool is_true_branch);
+ Reduction ReduceLoop(Node* node);
+ Reduction ReduceMerge(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceOtherControl(Node* node);
+
+ Reduction TakeConditionsFromFirstControl(Node* node);
+ Reduction UpdateConditions(Node* node,
+ const ControlPathConditions* conditions);
+
+ Node* dead() const { return dead_; }
+
+ PathConditionsForControlNodes node_conditions_;
+ Zone* zone_;
+ Node* dead_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 5ba18ffc97..e113833dc1 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -188,7 +188,14 @@ void BytecodeGraphBuilder::VisitLdaZero(
void BytecodeGraphBuilder::VisitLdaSmi8(
const interpreter::BytecodeArrayIterator& iterator) {
- Node* node = jsgraph()->Constant(iterator.GetSmi8Operand(0));
+ Node* node = jsgraph()->Constant(iterator.GetImmediateOperand(0));
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
environment()->BindAccumulator(node);
}
@@ -249,31 +256,228 @@ void BytecodeGraphBuilder::VisitStar(
}
-void BytecodeGraphBuilder::VisitLdaGlobal(
+void BytecodeGraphBuilder::VisitLdaGlobalSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitStaGlobalSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitStaGlobalStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+void BytecodeGraphBuilder::VisitStaGlobalSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitStaGlobalStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLdaContextSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitStaContextSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICSloppy(
const interpreter::BytecodeArrayIterator& iterator) {
UNIMPLEMENTED();
}
-void BytecodeGraphBuilder::VisitLoadIC(
+void BytecodeGraphBuilder::VisitStoreICStrict(
const interpreter::BytecodeArrayIterator& iterator) {
UNIMPLEMENTED();
}
-void BytecodeGraphBuilder::VisitKeyedLoadIC(
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppy(
const interpreter::BytecodeArrayIterator& iterator) {
UNIMPLEMENTED();
}
-void BytecodeGraphBuilder::VisitStoreIC(
+void BytecodeGraphBuilder::VisitKeyedStoreICStrict(
const interpreter::BytecodeArrayIterator& iterator) {
UNIMPLEMENTED();
}
-void BytecodeGraphBuilder::VisitKeyedStoreIC(
+void BytecodeGraphBuilder::VisitStoreICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitPushContext(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitPopContext(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitCreateClosure(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitCreateMappedArguments(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitCreateUnmappedArguments(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitCreateRegExpLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitCreateArrayLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitCreateObjectLiteral(
const interpreter::BytecodeArrayIterator& iterator) {
UNIMPLEMENTED();
}
@@ -285,6 +489,30 @@ void BytecodeGraphBuilder::VisitCall(
}
+void BytecodeGraphBuilder::VisitCallRuntime(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitCallJSRuntime(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitNew(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitThrow(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
void BytecodeGraphBuilder::BuildBinaryOp(
const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
@@ -332,6 +560,78 @@ void BytecodeGraphBuilder::VisitMod(
}
+void BytecodeGraphBuilder::VisitBitwiseOr(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->BitwiseOr(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitBitwiseXor(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->BitwiseXor(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitBitwiseAnd(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->BitwiseAnd(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitShiftLeft(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->ShiftLeft(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitShiftRight(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->ShiftRight(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitShiftRightLogical(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->ShiftRightLogical(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitInc(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitDec(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLogicalNot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTypeOf(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitDeletePropertyStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitDeletePropertySloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
void BytecodeGraphBuilder::VisitTestEqual(
const interpreter::BytecodeArrayIterator& iterator) {
UNIMPLEMENTED();
@@ -393,7 +693,25 @@ void BytecodeGraphBuilder::VisitTestInstanceOf(
void BytecodeGraphBuilder::VisitToBoolean(
- const interpreter::BytecodeArrayIterator& ToBoolean) {
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitToName(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitToNumber(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitToObject(
+ const interpreter::BytecodeArrayIterator& iterator) {
UNIMPLEMENTED();
}
@@ -434,6 +752,54 @@ void BytecodeGraphBuilder::VisitJumpIfFalseConstant(
}
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfNull(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfNullConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfUndefined(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
void BytecodeGraphBuilder::VisitReturn(
const interpreter::BytecodeArrayIterator& iterator) {
Node* control =
@@ -442,6 +808,24 @@ void BytecodeGraphBuilder::VisitReturn(
}
+void BytecodeGraphBuilder::VisitForInPrepare(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitForInNext(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitForInDone(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
if (size > input_buffer_size_) {
size = size + kInputBufferSizeIncrement + input_buffer_size_;
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 76ddd2ed7d..1c91f7c118 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -15,7 +15,7 @@ namespace compiler {
namespace {
LinkageLocation regloc(Register reg) {
- return LinkageLocation::ForRegister(Register::ToAllocationIndex(reg));
+ return LinkageLocation::ForRegister(reg.code());
}
@@ -223,6 +223,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
CallDescriptor::kNoFlags, // flags
"c-call");
}
-}
-}
-}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
index 4421c4f3e3..8552cdf792 100644
--- a/deps/v8/src/compiler/change-lowering.cc
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -66,7 +66,7 @@ Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
Callable callable = CodeFactory::AllocateHeapNumber(isolate());
Node* target = jsgraph()->HeapConstant(callable.code());
Node* context = jsgraph()->NoContextConstant();
- Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
+ Node* effect = graph()->NewNode(common()->BeginRegion(), graph()->start());
if (!allocate_heap_number_operator_.is_set()) {
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate(), jsgraph()->zone(), callable.descriptor(), 0,
@@ -78,7 +78,7 @@ Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
Node* store = graph()->NewNode(
machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
- return graph()->NewNode(common()->Finish(1), heap_number, store);
+ return graph()->NewNode(common()->FinishRegion(), heap_number, store);
}
@@ -151,7 +151,78 @@ Reduction ChangeLowering::ChangeBoolToBit(Node* value) {
Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
- return Replace(AllocateHeapNumberWithValue(value, control));
+ Type* const value_type = NodeProperties::GetType(value);
+ Node* const value32 = graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(TruncationMode::kRoundToZero), value);
+ // TODO(bmeurer): This fast case must be disabled until we kill the asm.js
+ // support in the generic JavaScript pipeline, because LoadBuffer is lying
+ // about its result.
+ // if (value_type->Is(Type::Signed32())) {
+ // return ChangeInt32ToTagged(value32, control);
+ // }
+ Node* check_same = graph()->NewNode(
+ machine()->Float64Equal(), value,
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
+ Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
+
+ Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
+ Node* vsmi;
+ Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
+ Node* vbox;
+
+ // We only need to check for -0 if the {value} can potentially contain -0.
+ if (value_type->Maybe(Type::MinusZero())) {
+ Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+ jsgraph()->Int32Constant(0));
+ Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_zero, if_smi);
+
+ Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+ Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = graph()->NewNode(
+ machine()->Int32LessThan(),
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(0));
+ Node* branch_negative = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), check_negative, if_zero);
+
+ Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
+ Node* if_notnegative =
+ graph()->NewNode(common()->IfFalse(), branch_negative);
+
+ // We need to create a box for negative 0.
+ if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
+ if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
+ }
+
+ // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
+ // machines we need to deal with potential overflow and fallback to boxing.
+ if (machine()->Is64() || value_type->Is(Type::SignedSmall())) {
+ vsmi = ChangeInt32ToSmi(value32);
+ } else {
+ Node* smi_tag =
+ graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
+
+ Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
+ Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_ovf, if_smi);
+
+ Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
+ if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
+
+ if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
+ vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
+ }
+
+ // Allocate the box for the {value}.
+ vbox = AllocateHeapNumberWithValue(value, if_box);
+
+ control = graph()->NewNode(common()->Merge(2), if_smi, if_box);
+ value =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vsmi, vbox, control);
+ return Replace(value);
}
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 83cbd22604..b801bdfc70 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -43,6 +43,10 @@ class InstructionOperandConverter {
return ToConstant(instr_->InputAt(index)).ToInt32();
}
+ int64_t InputInt64(size_t index) {
+ return ToConstant(instr_->InputAt(index)).ToInt64();
+ }
+
int8_t InputInt8(size_t index) {
return static_cast<int8_t>(InputInt32(index));
}
@@ -96,12 +100,11 @@ class InstructionOperandConverter {
}
Register ToRegister(InstructionOperand* op) {
- return Register::FromAllocationIndex(RegisterOperand::cast(op)->index());
+ return LocationOperand::cast(op)->GetRegister();
}
DoubleRegister ToDoubleRegister(InstructionOperand* op) {
- return DoubleRegister::FromAllocationIndex(
- DoubleRegisterOperand::cast(op)->index());
+ return LocationOperand::cast(op)->GetDoubleRegister();
}
Constant ToConstant(InstructionOperand* op) {
@@ -144,12 +147,15 @@ class OutOfLineCode : public ZoneObject {
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
+ Frame* frame() const { return frame_; }
+ Isolate* isolate() const { return masm()->isolate(); }
MacroAssembler* masm() const { return masm_; }
OutOfLineCode* next() const { return next_; }
private:
Label entry_;
Label exit_;
+ Frame* const frame_;
MacroAssembler* const masm_;
OutOfLineCode* const next_;
};
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 91602a02a3..97f780d1cb 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -4,11 +4,11 @@
#include "src/compiler/code-generator.h"
+#include "src/address-map.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/frames-inl.h"
-#include "src/snapshot/serialize.h" // TODO(turbofan): RootIndexMap
namespace v8 {
namespace internal {
@@ -83,9 +83,9 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
- for (auto shared_info : info->inlined_functions()) {
- if (!shared_info.is_identical_to(info->shared_info())) {
- DefineDeoptimizationLiteral(shared_info);
+ for (auto& inlined : info->inlined_functions()) {
+ if (!inlined.shared_info.is_identical_to(info->shared_info())) {
+ DefineDeoptimizationLiteral(inlined.shared_info);
}
}
inlined_function_count_ = deoptimization_literals_.size();
@@ -193,7 +193,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
PopulateDeoptimizationData(result);
// Ensure there is space for lazy deoptimization in the relocation info.
- if (!info->ShouldEnsureSpaceForLazyDeopt()) {
+ if (info->ShouldEnsureSpaceForLazyDeopt()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
}
@@ -220,15 +220,14 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
for (auto& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
- int index = StackSlotOperand::cast(operand).index();
+ int index = LocationOperand::cast(operand).index();
DCHECK(index >= 0);
// Safepoint table indices are 0-based from the beginning of the spill
// slot area, adjust appropriately.
index -= stackSlotToSpillSlotDelta;
safepoint.DefinePointerSlot(index, zone());
} else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
- Register reg =
- Register::FromAllocationIndex(RegisterOperand::cast(operand).index());
+ Register reg = LocationOperand::cast(operand).GetRegister();
safepoint.DefinePointerRegister(reg, zone());
}
}
@@ -590,21 +589,20 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
MachineType type) {
if (op->IsStackSlot()) {
if (type == kMachBool || type == kRepBit) {
- translation->StoreBoolStackSlot(StackSlotOperand::cast(op)->index());
+ translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
} else if (type == kMachInt32 || type == kMachInt8 || type == kMachInt16) {
- translation->StoreInt32StackSlot(StackSlotOperand::cast(op)->index());
+ translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
} else if (type == kMachUint32 || type == kMachUint16 ||
type == kMachUint8) {
- translation->StoreUint32StackSlot(StackSlotOperand::cast(op)->index());
+ translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
} else if ((type & kRepMask) == kRepTagged) {
- translation->StoreStackSlot(StackSlotOperand::cast(op)->index());
+ translation->StoreStackSlot(LocationOperand::cast(op)->index());
} else {
CHECK(false);
}
} else if (op->IsDoubleStackSlot()) {
DCHECK((type & (kRepFloat32 | kRepFloat64)) != 0);
- translation->StoreDoubleStackSlot(
- DoubleStackSlotOperand::cast(op)->index());
+ translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
} else if (op->IsRegister()) {
InstructionOperandConverter converter(this, instr);
if (type == kMachBool || type == kRepBit) {
@@ -633,6 +631,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
constant_object =
isolate()->factory()->NewNumberFromInt(constant.ToInt32());
break;
+ case Constant::kFloat32:
+ DCHECK((type & (kRepFloat32 | kRepTagged)) != 0);
+ constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
+ break;
case Constant::kFloat64:
DCHECK((type & (kRepFloat64 | kRepTagged)) != 0);
constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
@@ -662,7 +664,7 @@ void CodeGenerator::MarkLazyDeoptSite() {
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
- : masm_(gen->masm()), next_(gen->ools_) {
+ : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
gen->ools_ = this;
}
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index e4af2ad1f9..e3c2ecad6e 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -67,6 +67,8 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
return ReduceReturn(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
+ case IrOpcode::kGuard:
+ return ReduceGuard(node);
default:
break;
}
@@ -358,6 +360,16 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
}
+Reduction CommonOperatorReducer::ReduceGuard(Node* node) {
+ DCHECK_EQ(IrOpcode::kGuard, node->opcode());
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetTypeOrAny(input);
+ Type* const guard_type = OpParameter<Type*>(node);
+ if (input_type->Is(guard_type)) return Replace(input);
+ return NoChange();
+}
+
+
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
node->ReplaceInput(0, a);
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 8582d6b633..7184755885 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -35,6 +35,7 @@ class CommonOperatorReducer final : public AdvancedReducer {
Reduction ReducePhi(Node* node);
Reduction ReduceReturn(Node* node);
Reduction ReduceSelect(Node* node);
+ Reduction ReduceGuard(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index bacaae980f..51f93da3b4 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -123,10 +123,17 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
V(Deoptimize, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
- V(Return, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
- V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)
+ V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
+ V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0) \
+ V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
+
+
+#define CACHED_RETURN_LIST(V) \
+ V(1) \
+ V(2) \
+ V(3)
#define CACHED_END_LIST(V) \
@@ -249,6 +256,19 @@ struct CommonOperatorGlobalCache final {
CACHED_END_LIST(CACHED_END)
#undef CACHED_END
+ template <size_t kInputCount>
+ struct ReturnOperator final : public Operator {
+ ReturnOperator()
+ : Operator( // --
+ IrOpcode::kReturn, Operator::kNoThrow, // opcode
+ "Return", // name
+ kInputCount, 1, 1, 0, 0, 1) {} // counts
+ };
+#define CACHED_RETURN(input_count) \
+ ReturnOperator<input_count> kReturn##input_count##Operator;
+ CACHED_RETURN_LIST(CACHED_RETURN)
+#undef CACHED_RETURN
+
template <BranchHint kBranchHint>
struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
@@ -397,6 +417,24 @@ const Operator* CommonOperatorBuilder::End(size_t control_input_count) {
}
+const Operator* CommonOperatorBuilder::Return(int value_input_count) {
+ switch (value_input_count) {
+#define CACHED_RETURN(input_count) \
+ case input_count: \
+ return &cache_.kReturn##input_count##Operator;
+ CACHED_RETURN_LIST(CACHED_RETURN)
+#undef CACHED_RETURN
+ default:
+ break;
+ }
+ // Uncached.
+ return new (zone()) Operator( //--
+ IrOpcode::kReturn, Operator::kNoThrow, // opcode
+ "Return", // name
+ value_input_count, 1, 1, 0, 0, 1); // counts
+}
+
+
const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
switch (hint) {
case BranchHint::kNone:
@@ -633,6 +671,15 @@ const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
}
+const Operator* CommonOperatorBuilder::Guard(Type* type) {
+ return new (zone()) Operator1<Type*>( // --
+ IrOpcode::kGuard, Operator::kKontrol, // opcode
+ "Guard", // name
+ 1, 0, 1, 1, 0, 0, // counts
+ type); // parameter
+}
+
+
const Operator* CommonOperatorBuilder::EffectSet(int arguments) {
DCHECK(arguments > 1); // Disallow empty/singleton sets.
return new (zone()) Operator( // --
@@ -642,24 +689,6 @@ const Operator* CommonOperatorBuilder::EffectSet(int arguments) {
}
-const Operator* CommonOperatorBuilder::ValueEffect(int arguments) {
- DCHECK(arguments > 0); // Disallow empty value effects.
- return new (zone()) Operator( // --
- IrOpcode::kValueEffect, Operator::kPure, // opcode
- "ValueEffect", // name
- arguments, 0, 0, 0, 1, 0); // counts
-}
-
-
-const Operator* CommonOperatorBuilder::Finish(int arguments) {
- DCHECK(arguments > 0); // Disallow empty finishes.
- return new (zone()) Operator( // --
- IrOpcode::kFinish, Operator::kPure, // opcode
- "Finish", // name
- 1, arguments, 0, 1, 0, 0); // counts
-}
-
-
const Operator* CommonOperatorBuilder::StateValues(int arguments) {
switch (arguments) {
#define CACHED_STATE_VALUES(arguments) \
@@ -720,6 +749,11 @@ const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
}
+const Operator* CommonOperatorBuilder::LazyBailout() {
+ return Call(Linkage::GetLazyBailoutDescriptor(zone()));
+}
+
+
const Operator* CommonOperatorBuilder::TailCall(
const CallDescriptor* descriptor) {
class TailCallOperator final : public Operator1<const CallDescriptor*> {
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 22490f7fe1..2ef2880f6d 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -14,6 +14,10 @@ namespace internal {
// Forward declarations.
class ExternalReference;
+template <class>
+class TypeImpl;
+struct ZoneTypeConfig;
+typedef TypeImpl<ZoneTypeConfig> Type;
namespace compiler {
@@ -121,7 +125,7 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* IfDefault();
const Operator* Throw();
const Operator* Deoptimize();
- const Operator* Return();
+ const Operator* Return(int value_input_count = 1);
const Operator* Terminate();
const Operator* Start(int value_output_count);
@@ -145,8 +149,9 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* Phi(MachineType type, int value_input_count);
const Operator* EffectPhi(int effect_input_count);
const Operator* EffectSet(int arguments);
- const Operator* ValueEffect(int arguments);
- const Operator* Finish(int arguments);
+ const Operator* Guard(Type* type);
+ const Operator* BeginRegion();
+ const Operator* FinishRegion();
const Operator* StateValues(int arguments);
const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
const Operator* FrameState(BailoutId bailout_id,
@@ -155,6 +160,7 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* Call(const CallDescriptor* descriptor);
const Operator* TailCall(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
+ const Operator* LazyBailout();
// Constructs a new merge or phi operator with the same opcode as {op}, but
// with {size} inputs.
diff --git a/deps/v8/src/compiler/control-builders.cc b/deps/v8/src/compiler/control-builders.cc
index bb0ed140d9..6905ef589f 100644
--- a/deps/v8/src/compiler/control-builders.cc
+++ b/deps/v8/src/compiler/control-builders.cc
@@ -153,6 +153,16 @@ void BlockBuilder::BreakWhen(Node* condition, BranchHint hint) {
}
+void BlockBuilder::BreakUnless(Node* condition, BranchHint hint) {
+ IfBuilder control_if(builder_);
+ control_if.If(condition, hint);
+ control_if.Then();
+ control_if.Else();
+ Break();
+ control_if.End();
+}
+
+
void BlockBuilder::EndBlock() {
break_environment_->Merge(environment());
set_environment(break_environment_);
diff --git a/deps/v8/src/compiler/control-builders.h b/deps/v8/src/compiler/control-builders.h
index 9f3afce836..0c2fa73936 100644
--- a/deps/v8/src/compiler/control-builders.h
+++ b/deps/v8/src/compiler/control-builders.h
@@ -133,6 +133,7 @@ class BlockBuilder final : public ControlBuilder {
// Compound control commands for conditional break.
void BreakWhen(Node* condition, BranchHint = BranchHint::kNone);
+ void BreakUnless(Node* condition, BranchHint hint = BranchHint::kNone);
private:
Environment* break_environment_; // Environment after the block exits.
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 0b066783c3..9764b261ef 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -184,8 +184,8 @@ class FrameOffset {
static const int kFromSp = 1;
static const int kFromFp = 0;
};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_FRAME_H_
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index bad0a92274..4107b0f7bf 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -75,7 +75,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
InstructionOperand source = move->source();
- if (source.EqualsModuloType(destination)) {
+ if (source.EqualsCanonicalized(destination)) {
move->Eliminate();
return;
}
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 1be0b6dec7..6f583d6b6a 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -23,6 +23,9 @@ enum class GraphReducer::State : uint8_t {
};
+void Reducer::Finalize() {}
+
+
GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
: graph_(graph),
dead_(dead),
@@ -58,7 +61,11 @@ void GraphReducer::ReduceNode(Node* node) {
Push(node);
}
} else {
- break;
+ // Run all finalizers.
+ for (Reducer* const reducer : reducers_) reducer->Finalize();
+
+ // Check if we have new nodes to revisit.
+ if (revisit_.empty()) break;
}
}
DCHECK(revisit_.empty());
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 39c302f892..273b5dd0cf 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -47,6 +47,11 @@ class Reducer {
// Try to reduce a node if possible.
virtual Reduction Reduce(Node* node) = 0;
+ // Invoked by the {GraphReducer} when all nodes are done. Can be used to
+ // do additional reductions at the end, which in turn can cause a new round
+ // of reductions.
+ virtual void Finalize();
+
// Helper functions for subclasses to produce reductions for a node.
static Reduction NoChange() { return Reduction(); }
static Reduction Replace(Node* node) { return Reduction(node); }
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 07ca04532b..1b0997a6bd 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -674,8 +674,9 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
for (int j = instruction_block->first_instruction_index();
j <= instruction_block->last_instruction_index(); j++) {
PrintIndent();
- PrintableInstruction printable = {RegisterConfiguration::ArchDefault(),
- instructions->InstructionAt(j)};
+ PrintableInstruction printable = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ instructions->InstructionAt(j)};
os_ << j << " " << printable << " <|@\n";
}
}
@@ -719,13 +720,13 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
- int assigned_reg = op.index();
if (op.IsDoubleRegister()) {
- os_ << " \"" << DoubleRegister::AllocationIndexToString(assigned_reg)
- << "\"";
+ DoubleRegister assigned_reg = op.GetDoubleRegister();
+ os_ << " \"" << assigned_reg.ToString() << "\"";
} else {
DCHECK(op.IsRegister());
- os_ << " \"" << Register::AllocationIndexToString(assigned_reg) << "\"";
+ Register assigned_reg = op.GetRegister();
+ os_ << " \"" << assigned_reg.ToString() << "\"";
}
} else if (range->spilled()) {
auto top = range->TopLevel();
diff --git a/deps/v8/src/compiler/greedy-allocator.cc b/deps/v8/src/compiler/greedy-allocator.cc
index e0368bf366..683b75d49f 100644
--- a/deps/v8/src/compiler/greedy-allocator.cc
+++ b/deps/v8/src/compiler/greedy-allocator.cc
@@ -50,19 +50,6 @@ LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
}
-// TODO(mtrofin): explain why splitting in gap START is always OK.
-LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
- int instruction_index) {
- LifetimePosition ret = LifetimePosition::Invalid();
-
- ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
- if (range->Start() >= ret || ret >= range->End()) {
- return LifetimePosition::Invalid();
- }
- return ret;
-}
-
-
} // namespace
@@ -249,7 +236,8 @@ void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
float eviction_weight = group_weight;
int eviction_reg = -1;
int free_reg = -1;
- for (int reg = 0; reg < num_registers(); ++reg) {
+ for (int i = 0; i < num_allocatable_registers(); ++i) {
+ int reg = allocatable_register_code(i);
float weight = GetMaximumConflictingWeight(reg, group, group_weight);
if (weight == LiveRange::kInvalidWeight) {
free_reg = reg;
@@ -313,19 +301,20 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// Seek either the first free register, or, from the set of registers
// where the maximum conflict is lower than the candidate's weight, the one
// with the smallest such weight.
- for (int i = 0; i < num_registers(); i++) {
+ for (int i = 0; i < num_allocatable_registers(); i++) {
+ int reg = allocatable_register_code(i);
// Skip unnecessarily re-visiting the hinted register, if any.
- if (i == hinted_reg) continue;
+ if (reg == hinted_reg) continue;
float max_conflict_weight =
- GetMaximumConflictingWeight(i, range, competing_weight);
+ GetMaximumConflictingWeight(reg, range, competing_weight);
if (max_conflict_weight == LiveRange::kInvalidWeight) {
- free_reg = i;
+ free_reg = reg;
break;
}
if (max_conflict_weight < range->weight() &&
max_conflict_weight < smallest_weight) {
smallest_weight = max_conflict_weight;
- evictable_reg = i;
+ evictable_reg = reg;
}
}
}
@@ -372,43 +361,6 @@ void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
}
-void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
- size_t initial_range_count = data()->live_ranges().size();
- for (size_t i = 0; i < initial_range_count; ++i) {
- TopLevelLiveRange* range = data()->live_ranges()[i];
- if (!CanProcessRange(range)) continue;
- if (!range->HasSpillOperand()) continue;
-
- LifetimePosition start = range->Start();
- TRACE("Live range %d:%d is defined by a spill operand.\n",
- range->TopLevel()->vreg(), range->relative_id());
- auto next_pos = start;
- if (next_pos.IsGapPosition()) {
- next_pos = next_pos.NextStart();
- }
- auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
- // If the range already has a spill operand and it doesn't need a
- // register immediately, split it and spill the first part of the range.
- if (pos == nullptr) {
- Spill(range);
- } else if (pos->pos() > range->Start().NextStart()) {
- // Do not spill live range eagerly if use position that can benefit from
- // the register is too close to the start of live range.
- auto split_pos = GetSplitPositionForInstruction(
- range, pos->pos().ToInstructionIndex());
- // There is no place to split, so we can't split and spill.
- if (!split_pos.IsValid()) continue;
-
- split_pos =
- FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
-
- Split(range, data(), split_pos);
- Spill(range);
- }
- }
-}
-
-
void GreedyAllocator::AllocateRegisters() {
CHECK(scheduler().empty());
CHECK(allocations_.empty());
@@ -416,7 +368,7 @@ void GreedyAllocator::AllocateRegisters() {
TRACE("Begin allocating function %s with the Greedy Allocator\n",
data()->debug_name());
- SplitAndSpillRangesDefinedByMemoryOperand();
+ SplitAndSpillRangesDefinedByMemoryOperand(true);
GroupLiveRanges();
ScheduleAllocationCandidates();
PreallocateFixedRanges();
diff --git a/deps/v8/src/compiler/greedy-allocator.h b/deps/v8/src/compiler/greedy-allocator.h
index 45bbd87da8..b61ba4242f 100644
--- a/deps/v8/src/compiler/greedy-allocator.h
+++ b/deps/v8/src/compiler/greedy-allocator.h
@@ -128,18 +128,10 @@ class GreedyAllocator final : public RegisterAllocator {
// Evict and reschedule conflicts of a given range, at a given register.
void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
- // Find the optimal split for ranges defined by a memory operand, e.g.
- // constants or function parameters passed on the stack.
- void SplitAndSpillRangesDefinedByMemoryOperand();
-
void TryAllocateCandidate(const AllocationCandidate& candidate);
void TryAllocateLiveRange(LiveRange* range);
void TryAllocateGroup(LiveRangeGroup* group);
- bool CanProcessRange(LiveRange* range) const {
- return range != nullptr && !range->IsEmpty() && range->kind() == mode();
- }
-
// Calculate the weight of a candidate for allocation.
void EnsureValidRangeWeight(LiveRange* range);
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index d4fe21505c..5ca9c20396 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -217,6 +217,46 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
XMMRegister const input_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ operand_(operand),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ lea(scratch1_, operand_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Operand const operand_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
} // namespace
@@ -308,7 +348,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(reg);
}
RecordCallPosition(instr);
break;
@@ -348,6 +389,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
break;
}
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, i.TempRegister(0));
@@ -401,6 +447,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ bind(ool->exit());
break;
}
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ Register value = i.InputRegister(index);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
+ scratch0, scratch1, mode);
+ __ mov(operand, value);
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ not_zero, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kIA32Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -508,6 +572,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kIA32Lzcnt:
__ Lzcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kIA32Tzcnt:
+ __ Tzcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kIA32Popcnt:
+ __ Popcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
case kSSEFloat32Cmp:
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -908,24 +978,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kIA32StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register value = i.InputRegister(2);
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (HasImmediateInput(instr, 1)) {
- int index = i.InputInt32(1);
- Register scratch = i.TempRegister(1);
- __ mov(Operand(object, index), value);
- __ RecordWriteContextSlot(object, index, value, scratch, mode);
- } else {
- Register index = i.InputRegister(1);
- __ mov(Operand(object, index, times_1, 0), value);
- __ lea(index, Operand(object, index, times_1, 0));
- __ RecordWrite(object, index, value, mode);
- }
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
@@ -1362,7 +1414,7 @@ void CodeGenerator::AssembleReturn() {
}
size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
// Might need ecx for scratch if pop_size is too big.
- DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & ecx.bit());
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
__ Ret(static_cast<int>(pop_size), ecx);
}
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 2119947e94..97dec17c03 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -31,6 +31,8 @@ namespace compiler {
V(IA32Sar) \
V(IA32Ror) \
V(IA32Lzcnt) \
+ V(IA32Tzcnt) \
+ V(IA32Popcnt) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -94,7 +96,6 @@ namespace compiler {
V(IA32Lea) \
V(IA32Push) \
V(IA32Poke) \
- V(IA32StoreWriteBarrier) \
V(IA32StackCheck)
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 792d1d5a47..8225c96b12 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -215,66 +215,89 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+
+ if (write_barrier_kind != kNoWriteBarrier) {
DCHECK_EQ(kRepTagged, rep);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
- InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister()};
- Emit(kIA32StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
- g.UseImmediate(index), g.UseFixed(value, ecx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
} else {
- InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
- Emit(kIA32StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
- g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kIA32Movss;
+ break;
+ case kRepFloat64:
+ opcode = kIA32Movsd;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kIA32Movb;
+ break;
+ case kRepWord16:
+ opcode = kIA32Movw;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kIA32Movl;
+ break;
+ default:
+ UNREACHABLE();
+ return;
}
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kIA32Movss;
- break;
- case kRepFloat64:
- opcode = kIA32Movsd;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kIA32Movb;
- break;
- case kRepWord16:
- opcode = kIA32Movw;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kIA32Movl;
- break;
- default:
- UNREACHABLE();
- return;
- }
+ InstructionOperand val;
+ if (g.CanBeImmediate(value)) {
+ val = g.UseImmediate(value);
+ } else if (rep == kRepWord8 || rep == kRepBit) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
- InstructionOperand val;
- if (g.CanBeImmediate(value)) {
- val = g.UseImmediate(value);
- } else if (rep == kRepWord8 || rep == kRepBit) {
- val = g.UseByteRegister(value);
- } else {
- val = g.UseRegister(value);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ inputs[input_count++] = val;
+ Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
@@ -553,6 +576,18 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
@@ -831,20 +866,10 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
+ const CallDescriptor* descriptor,
+ Node* node) {
IA32OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -855,8 +880,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
0, nullptr, 0, nullptr, temp_count, temps);
// Poke any stack arguments.
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ if (Node* input = (*arguments)[n]) {
int const slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(node)
? g.UseImmediate(input)
@@ -866,7 +891,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ for (Node* input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
if (input == nullptr) continue;
// TODO(titzer): IA32Push cannot handle stack->stack double moves
@@ -881,124 +906,10 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Emit(kIA32Push, g.NoOutput(), value);
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- IA32OperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
-
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): Handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
- Emit(kIA32Push, g.NoOutput(), value);
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t output_count = buffer.outputs.size();
- auto* outputs = &buffer.outputs.front();
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
namespace {
@@ -1357,7 +1268,11 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kWord32ShiftIsSafe;
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kWord32Ctz;
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ flags |= MachineOperatorBuilder::kWord32Popcnt;
+ }
if (CpuFeatures::IsSupported(SSE4_1)) {
flags |= MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index cb47be6446..d4cc2db9a6 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -33,38 +33,44 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Modes for ArchStoreWithWriteBarrier below.
+enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
+
+
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define ARCH_OPCODE_LIST(V) \
- V(ArchCallCodeObject) \
- V(ArchTailCallCodeObject) \
- V(ArchCallJSFunction) \
- V(ArchTailCallJSFunction) \
- V(ArchPrepareCallCFunction) \
- V(ArchCallCFunction) \
- V(ArchJmp) \
- V(ArchLookupSwitch) \
- V(ArchTableSwitch) \
- V(ArchNop) \
- V(ArchDeoptimize) \
- V(ArchRet) \
- V(ArchStackPointer) \
- V(ArchFramePointer) \
- V(ArchTruncateDoubleToI) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadWord64) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreWord64) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
+#define ARCH_OPCODE_LIST(V) \
+ V(ArchCallCodeObject) \
+ V(ArchTailCallCodeObject) \
+ V(ArchCallJSFunction) \
+ V(ArchTailCallJSFunction) \
+ V(ArchPrepareCallCFunction) \
+ V(ArchCallCFunction) \
+ V(ArchLazyBailout) \
+ V(ArchJmp) \
+ V(ArchLookupSwitch) \
+ V(ArchTableSwitch) \
+ V(ArchNop) \
+ V(ArchDeoptimize) \
+ V(ArchRet) \
+ V(ArchStackPointer) \
+ V(ArchFramePointer) \
+ V(ArchTruncateDoubleToI) \
+ V(ArchStoreWithWriteBarrier) \
+ V(CheckedLoadInt8) \
+ V(CheckedLoadUint8) \
+ V(CheckedLoadInt16) \
+ V(CheckedLoadUint16) \
+ V(CheckedLoadWord32) \
+ V(CheckedLoadWord64) \
+ V(CheckedLoadFloat32) \
+ V(CheckedLoadFloat64) \
+ V(CheckedStoreWord8) \
+ V(CheckedStoreWord16) \
+ V(CheckedStoreWord32) \
+ V(CheckedStoreWord64) \
+ V(CheckedStoreFloat32) \
+ V(CheckedStoreFloat64) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 88283d4898..cd41e42eff 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -51,15 +51,13 @@ class OperandGenerator {
InstructionOperand DefineAsFixed(Node* node, Register reg) {
return Define(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg),
- GetVReg(node)));
+ reg.code(), GetVReg(node)));
}
InstructionOperand DefineAsFixed(Node* node, DoubleRegister reg) {
return Define(node,
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg),
- GetVReg(node)));
+ reg.code(), GetVReg(node)));
}
InstructionOperand DefineAsConstant(Node* node) {
@@ -80,6 +78,12 @@ class OperandGenerator {
GetVReg(node)));
}
+ InstructionOperand UseAny(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::ANY,
+ UnallocatedOperand::USED_AT_START,
+ GetVReg(node)));
+ }
+
InstructionOperand UseRegister(Node* node) {
return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START,
@@ -107,15 +111,18 @@ class OperandGenerator {
InstructionOperand UseFixed(Node* node, Register reg) {
return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg),
- GetVReg(node)));
+ reg.code(), GetVReg(node)));
}
InstructionOperand UseFixed(Node* node, DoubleRegister reg) {
return Use(node,
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg),
- GetVReg(node)));
+ reg.code(), GetVReg(node)));
+ }
+
+ InstructionOperand UseExplicit(Register reg) {
+ MachineType machine_type = InstructionSequence::DefaultRepresentation();
+ return ExplicitOperand(LocationOperand::REGISTER, machine_type, reg.code());
}
InstructionOperand UseImmediate(Node* node) {
@@ -142,8 +149,7 @@ class OperandGenerator {
}
InstructionOperand TempRegister(Register reg) {
- return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg),
+ return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, reg.code(),
InstructionOperand::kInvalidVirtualRegister);
}
@@ -317,33 +323,6 @@ class FlagsContinuation final {
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
};
-
-// An internal helper class for generating the operands to calls.
-// TODO(bmeurer): Get rid of the CallBuffer business and make
-// InstructionSelector::VisitCall platform independent instead.
-struct CallBuffer {
- CallBuffer(Zone* zone, const CallDescriptor* descriptor,
- FrameStateDescriptor* frame_state);
-
- const CallDescriptor* descriptor;
- FrameStateDescriptor* frame_state_descriptor;
- NodeVector output_nodes;
- InstructionOperandVector outputs;
- InstructionOperandVector instruction_args;
- NodeVector pushed_nodes;
-
- size_t input_count() const { return descriptor->InputCount(); }
-
- size_t frame_state_count() const { return descriptor->FrameStateCount(); }
-
- size_t frame_state_value_count() const {
- return (frame_state_descriptor == NULL)
- ? 0
- : (frame_state_descriptor->GetTotalSize() +
- 1); // Include deopt id.
- }
-};
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 7200bf0e7a..eac5571e9c 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -254,21 +254,121 @@ void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
}
+namespace {
+
+enum class FrameStateInputKind { kAny, kStackSlot };
+
+
+InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
+ FrameStateInputKind kind) {
+ switch (input->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat32Constant:
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kHeapConstant:
+ return g->UseImmediate(input);
+ default:
+ switch (kind) {
+ case FrameStateInputKind::kStackSlot:
+ return g->UseUniqueSlot(input);
+ case FrameStateInputKind::kAny:
+ return g->UseAny(input);
+ }
+ UNREACHABLE();
+ return InstructionOperand();
+ }
+}
+
+
+void AddFrameStateInputs(Node* state, OperandGenerator* g,
+ InstructionOperandVector* inputs,
+ FrameStateDescriptor* descriptor,
+ FrameStateInputKind kind, Zone* zone) {
+ DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
+
+ if (descriptor->outer_state()) {
+ AddFrameStateInputs(state->InputAt(kFrameStateOuterStateInput), g, inputs,
+ descriptor->outer_state(), kind, zone);
+ }
+
+ Node* parameters = state->InputAt(kFrameStateParametersInput);
+ Node* locals = state->InputAt(kFrameStateLocalsInput);
+ Node* stack = state->InputAt(kFrameStateStackInput);
+ Node* context = state->InputAt(kFrameStateContextInput);
+ Node* function = state->InputAt(kFrameStateFunctionInput);
+
+ DCHECK_EQ(descriptor->parameters_count(),
+ StateValuesAccess(parameters).size());
+ DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
+ DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
+
+ ZoneVector<MachineType> types(zone);
+ types.reserve(descriptor->GetSize());
+
+ size_t value_index = 0;
+ inputs->push_back(OperandForDeopt(g, function, kind));
+ descriptor->SetType(value_index++, kMachAnyTagged);
+ for (StateValuesAccess::TypedNode input_node :
+ StateValuesAccess(parameters)) {
+ inputs->push_back(OperandForDeopt(g, input_node.node, kind));
+ descriptor->SetType(value_index++, input_node.type);
+ }
+ if (descriptor->HasContext()) {
+ inputs->push_back(OperandForDeopt(g, context, kind));
+ descriptor->SetType(value_index++, kMachAnyTagged);
+ }
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
+ inputs->push_back(OperandForDeopt(g, input_node.node, kind));
+ descriptor->SetType(value_index++, input_node.type);
+ }
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
+ inputs->push_back(OperandForDeopt(g, input_node.node, kind));
+ descriptor->SetType(value_index++, input_node.type);
+ }
+ DCHECK(value_index == descriptor->GetSize());
+}
+
+} // namespace
+
+
+// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
-CallBuffer::CallBuffer(Zone* zone, const CallDescriptor* d,
- FrameStateDescriptor* frame_desc)
- : descriptor(d),
- frame_state_descriptor(frame_desc),
- output_nodes(zone),
- outputs(zone),
- instruction_args(zone),
- pushed_nodes(zone) {
- output_nodes.reserve(d->ReturnCount());
- outputs.reserve(d->ReturnCount());
- pushed_nodes.reserve(input_count());
- instruction_args.reserve(input_count() + frame_state_value_count());
-}
+struct CallBuffer {
+ CallBuffer(Zone* zone, const CallDescriptor* descriptor,
+ FrameStateDescriptor* frame_state)
+ : descriptor(descriptor),
+ frame_state_descriptor(frame_state),
+ output_nodes(zone),
+ outputs(zone),
+ instruction_args(zone),
+ pushed_nodes(zone) {
+ output_nodes.reserve(descriptor->ReturnCount());
+ outputs.reserve(descriptor->ReturnCount());
+ pushed_nodes.reserve(input_count());
+ instruction_args.reserve(input_count() + frame_state_value_count());
+ }
+
+
+ const CallDescriptor* descriptor;
+ FrameStateDescriptor* frame_state_descriptor;
+ NodeVector output_nodes;
+ InstructionOperandVector outputs;
+ InstructionOperandVector instruction_args;
+ NodeVector pushed_nodes;
+
+ size_t input_count() const { return descriptor->InputCount(); }
+
+ size_t frame_state_count() const { return descriptor->FrameStateCount(); }
+
+ size_t frame_state_value_count() const {
+ return (frame_state_descriptor == NULL)
+ ? 0
+ : (frame_state_descriptor->GetTotalSize() +
+ 1); // Include deopt id.
+ }
+};
// TODO(bmeurer): Get rid of the CallBuffer business and make
@@ -345,6 +445,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
buffer->descriptor->GetInputType(0)));
break;
+ case CallDescriptor::kLazyBailout:
+ // The target is ignored, but we still need to pass a value here.
+ buffer->instruction_args.push_back(g.UseImmediate(callee));
+ break;
}
DCHECK_EQ(1u, buffer->instruction_args.size());
@@ -359,9 +463,9 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
Node* frame_state =
call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
- AddFrameStateInputs(frame_state, &buffer->instruction_args,
+ AddFrameStateInputs(frame_state, &g, &buffer->instruction_args,
buffer->frame_state_descriptor,
- FrameStateInputKind::kStackSlot);
+ FrameStateInputKind::kStackSlot, instruction_zone());
}
DCHECK(1 + buffer->frame_state_value_count() ==
buffer->instruction_args.size());
@@ -504,7 +608,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
case BasicBlock::kReturn: {
DCHECK_EQ(IrOpcode::kReturn, input->opcode());
- return VisitReturn(input->InputAt(0));
+ return VisitReturn(input);
}
case BasicBlock::kDeoptimize: {
// If the result itself is a return, return its input.
@@ -545,12 +649,15 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
case IrOpcode::kTerminate:
+ case IrOpcode::kBeginRegion:
// No code needed for these graph artifacts.
return;
case IrOpcode::kIfException:
return MarkAsReference(node), VisitIfException(node);
- case IrOpcode::kFinish:
- return MarkAsReference(node), VisitFinish(node);
+ case IrOpcode::kFinishRegion:
+ return MarkAsReference(node), VisitFinishRegion(node);
+ case IrOpcode::kGuard:
+ return MarkAsReference(node), VisitGuard(node);
case IrOpcode::kParameter: {
MachineType type =
linkage()->GetParameterType(ParameterIndexOf(node->op()));
@@ -611,6 +718,12 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitWord32Equal(node);
case IrOpcode::kWord32Clz:
return MarkAsWord32(node), VisitWord32Clz(node);
+ case IrOpcode::kWord32Ctz:
+ return MarkAsWord32(node), VisitWord32Ctz(node);
+ case IrOpcode::kWord32Popcnt:
+ return MarkAsWord32(node), VisitWord32Popcnt(node);
+ case IrOpcode::kWord64Popcnt:
+ return MarkAsWord32(node), VisitWord64Popcnt(node);
case IrOpcode::kWord64And:
return MarkAsWord64(node), VisitWord64And(node);
case IrOpcode::kWord64Or:
@@ -625,6 +738,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitWord64Sar(node);
case IrOpcode::kWord64Ror:
return MarkAsWord64(node), VisitWord64Ror(node);
+ case IrOpcode::kWord64Clz:
+ return MarkAsWord64(node), VisitWord64Clz(node);
+ case IrOpcode::kWord64Ctz:
+ return MarkAsWord64(node), VisitWord64Ctz(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
@@ -699,6 +816,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
case IrOpcode::kTruncateInt64ToInt32:
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+ case IrOpcode::kRoundInt64ToFloat32:
+ return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
+ case IrOpcode::kRoundInt64ToFloat64:
+ return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
case IrOpcode::kBitcastFloat32ToInt32:
return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
case IrOpcode::kBitcastFloat64ToInt64:
@@ -858,6 +979,15 @@ void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
+
+
void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
@@ -913,6 +1043,16 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
@@ -925,7 +1065,14 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
#endif // V8_TARGET_ARCH_32_BIT
-void InstructionSelector::VisitFinish(Node* node) {
+void InstructionSelector::VisitFinishRegion(Node* node) {
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
+
+void InstructionSelector::VisitGuard(Node* node) {
OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
@@ -1002,6 +1149,140 @@ void InstructionSelector::VisitConstant(Node* node) {
}
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+ OperandGenerator g(this);
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor = GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on some architectures it's probably better to use
+ // the code object in a register if there are multiple uses of it.
+ // Improve constant pool and the heuristics in the register allocator
+ // for where to emit constants.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode = kArchNop;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
+ break;
+ case CallDescriptor::kLazyBailout:
+ opcode = kArchLazyBailout | MiscField::encode(flags);
+ break;
+ }
+
+ // Emit the call instruction.
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())
+ ->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitTailCall(Node* node) {
+ OperandGenerator g(this);
+ CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, IsTailCallAddressImmediate());
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ } else {
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, IsTailCallAddressImmediate());
+
+ EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t output_count = buffer.outputs.size();
+ auto* outputs = &buffer.outputs.front();
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())
+ ->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
+ }
+}
+
+
void InstructionSelector::VisitGoto(BasicBlock* target) {
// jump to the next block.
OperandGenerator g(this);
@@ -1009,15 +1290,19 @@ void InstructionSelector::VisitGoto(BasicBlock* target) {
}
-void InstructionSelector::VisitReturn(Node* value) {
- DCHECK_NOT_NULL(value);
+void InstructionSelector::VisitReturn(Node* ret) {
OperandGenerator g(this);
if (linkage()->GetIncomingDescriptor()->ReturnCount() == 0) {
Emit(kArchRet, g.NoOutput());
} else {
- Emit(kArchRet, g.NoOutput(),
- g.UseLocation(value, linkage()->GetReturnLocation(),
- linkage()->GetReturnType()));
+ const int ret_count = ret->op()->ValueInputCount();
+ auto value_locations = zone()->NewArray<InstructionOperand>(ret_count);
+ for (int i = 0; i < ret_count; ++i) {
+ value_locations[i] =
+ g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i),
+ linkage()->GetReturnType(i));
+ }
+ Emit(kArchRet, 0, nullptr, ret_count, value_locations);
}
}
@@ -1035,7 +1320,8 @@ void InstructionSelector::VisitDeoptimize(Node* value) {
sequence()->AddFrameStateDescriptor(desc);
args.push_back(g.TempImmediate(state_id.ToInt()));
- AddFrameStateInputs(value, &args, desc, FrameStateInputKind::kAny);
+ AddFrameStateInputs(value, &g, &args, desc, FrameStateInputKind::kAny,
+ instruction_zone());
DCHECK_EQ(args.size(), arg_count);
@@ -1078,76 +1364,6 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
}
-InstructionOperand InstructionSelector::OperandForDeopt(
- OperandGenerator* g, Node* input, FrameStateInputKind kind) {
- switch (input->opcode()) {
- case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kHeapConstant:
- return g->UseImmediate(input);
- default:
- switch (kind) {
- case FrameStateInputKind::kStackSlot:
- return g->UseUniqueSlot(input);
- case FrameStateInputKind::kAny:
- return g->Use(input);
- }
- UNREACHABLE();
- return InstructionOperand();
- }
-}
-
-
-void InstructionSelector::AddFrameStateInputs(Node* state,
- InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor,
- FrameStateInputKind kind) {
- DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
-
- if (descriptor->outer_state()) {
- AddFrameStateInputs(state->InputAt(kFrameStateOuterStateInput), inputs,
- descriptor->outer_state(), kind);
- }
-
- Node* parameters = state->InputAt(kFrameStateParametersInput);
- Node* locals = state->InputAt(kFrameStateLocalsInput);
- Node* stack = state->InputAt(kFrameStateStackInput);
- Node* context = state->InputAt(kFrameStateContextInput);
- Node* function = state->InputAt(kFrameStateFunctionInput);
-
- DCHECK_EQ(descriptor->parameters_count(),
- StateValuesAccess(parameters).size());
- DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
- DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
-
- ZoneVector<MachineType> types(instruction_zone());
- types.reserve(descriptor->GetSize());
-
- OperandGenerator g(this);
- size_t value_index = 0;
- inputs->push_back(OperandForDeopt(&g, function, kind));
- descriptor->SetType(value_index++, kMachAnyTagged);
- for (StateValuesAccess::TypedNode input_node :
- StateValuesAccess(parameters)) {
- inputs->push_back(OperandForDeopt(&g, input_node.node, kind));
- descriptor->SetType(value_index++, input_node.type);
- }
- if (descriptor->HasContext()) {
- inputs->push_back(OperandForDeopt(&g, context, kind));
- descriptor->SetType(value_index++, kMachAnyTagged);
- }
- for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
- inputs->push_back(OperandForDeopt(&g, input_node.node, kind));
- descriptor->SetType(value_index++, input_node.type);
- }
- for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
- inputs->push_back(OperandForDeopt(&g, input_node.node, kind));
- descriptor->SetType(value_index++, input_node.type);
- }
- DCHECK(value_index == descriptor->GetSize());
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index b8354fcfd1..68a45157f9 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -172,16 +172,10 @@ class InstructionSelector final {
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool call_code_immediate,
bool call_address_immediate);
+ bool IsTailCallAddressImmediate();
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
- enum class FrameStateInputKind { kAny, kStackSlot };
- void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor,
- FrameStateInputKind kind);
- static InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
- FrameStateInputKind kind);
-
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
// ===========================================================================
@@ -200,7 +194,8 @@ class InstructionSelector final {
MACHINE_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
- void VisitFinish(Node* node);
+ void VisitFinishRegion(Node* node);
+ void VisitGuard(Node* node);
void VisitParameter(Node* node);
void VisitIfException(Node* node);
void VisitOsrValue(Node* node);
@@ -213,9 +208,12 @@ class InstructionSelector final {
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(Node* value);
- void VisitReturn(Node* value);
+ void VisitReturn(Node* ret);
void VisitThrow(Node* value);
+ void EmitPrepareArguments(NodeVector* arguments,
+ const CallDescriptor* descriptor, Node* node);
+
// ===========================================================================
Schedule* schedule() const { return schedule_; }
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 0fbb94979e..1f9543a635 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -74,11 +74,15 @@ std::ostream& operator<<(std::ostream& os,
case UnallocatedOperand::NONE:
return os;
case UnallocatedOperand::FIXED_REGISTER:
- return os << "(=" << conf->general_register_name(
- unalloc->fixed_register_index()) << ")";
+ return os << "(="
+ << conf->GetGeneralRegisterName(
+ unalloc->fixed_register_index())
+ << ")";
case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
- return os << "(=" << conf->double_register_name(
- unalloc->fixed_register_index()) << ")";
+ return os << "(="
+ << conf->GetDoubleRegisterName(
+ unalloc->fixed_register_index())
+ << ")";
case UnallocatedOperand::MUST_HAVE_REGISTER:
return os << "(R)";
case UnallocatedOperand::MUST_HAVE_SLOT:
@@ -101,25 +105,22 @@ std::ostream& operator<<(std::ostream& os,
return os << "[immediate:" << imm.indexed_value() << "]";
}
}
+ case InstructionOperand::EXPLICIT:
case InstructionOperand::ALLOCATED: {
- auto allocated = AllocatedOperand::cast(op);
- switch (allocated.allocated_kind()) {
- case AllocatedOperand::STACK_SLOT:
- os << "[stack:" << StackSlotOperand::cast(op).index();
- break;
- case AllocatedOperand::DOUBLE_STACK_SLOT:
- os << "[double_stack:" << DoubleStackSlotOperand::cast(op).index();
- break;
- case AllocatedOperand::REGISTER:
- os << "["
- << conf->general_register_name(RegisterOperand::cast(op).index())
- << "|R";
- break;
- case AllocatedOperand::DOUBLE_REGISTER:
- os << "["
- << conf->double_register_name(
- DoubleRegisterOperand::cast(op).index()) << "|R";
- break;
+ auto allocated = LocationOperand::cast(op);
+ if (op.IsStackSlot()) {
+ os << "[stack:" << LocationOperand::cast(op).index();
+ } else if (op.IsDoubleStackSlot()) {
+ os << "[double_stack:" << LocationOperand::cast(op).index();
+ } else if (op.IsRegister()) {
+ os << "[" << LocationOperand::cast(op).GetRegister().ToString() << "|R";
+ } else {
+ DCHECK(op.IsDoubleRegister());
+ os << "[" << LocationOperand::cast(op).GetDoubleRegister().ToString()
+ << "|R";
+ }
+ if (allocated.IsExplicit()) {
+ os << "|E";
}
switch (allocated.machine_type()) {
case kRepWord32:
@@ -178,11 +179,11 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
MoveOperands* to_eliminate = nullptr;
for (auto curr : *this) {
if (curr->IsEliminated()) continue;
- if (curr->destination().EqualsModuloType(move->source())) {
+ if (curr->destination().EqualsCanonicalized(move->source())) {
DCHECK(!replacement);
replacement = curr;
if (to_eliminate != nullptr) break;
- } else if (curr->destination().EqualsModuloType(move->destination())) {
+ } else if (curr->destination().EqualsCanonicalized(move->destination())) {
DCHECK(!to_eliminate);
to_eliminate = curr;
if (replacement != nullptr) break;
@@ -194,6 +195,16 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
}
+ExplicitOperand::ExplicitOperand(LocationKind kind, MachineType machine_type,
+ int index)
+ : LocationOperand(EXPLICIT, kind, machine_type, index) {
+ DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(machine_type),
+ Register::from_code(index).IsAllocatable());
+ DCHECK_IMPLIES(kind == REGISTER && IsFloatingPoint(machine_type),
+ DoubleRegister::from_code(index).IsAllocatable());
+}
+
+
Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
@@ -260,7 +271,7 @@ std::ostream& operator<<(std::ostream& os,
void ReferenceMap::RecordReference(const AllocatedOperand& op) {
// Do not record arguments as pointers.
- if (op.IsStackSlot() && StackSlotOperand::cast(op).index() < 0) return;
+ if (op.IsStackSlot() && LocationOperand::cast(op).index() < 0) return;
DCHECK(!op.IsDoubleRegister() && !op.IsDoubleStackSlot());
reference_operands_.push_back(op);
}
@@ -269,8 +280,9 @@ void ReferenceMap::RecordReference(const AllocatedOperand& op) {
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
os << "{";
bool first = true;
- PrintableInstructionOperand poi = {RegisterConfiguration::ArchDefault(),
- InstructionOperand()};
+ PrintableInstructionOperand poi = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ InstructionOperand()};
for (auto& op : pm.reference_operands_) {
if (!first) {
os << ";";
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index a0718f3c21..7ab2b90778 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -14,8 +14,9 @@
#include "src/compiler/frame.h"
#include "src/compiler/instruction-codes.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/register-configuration.h"
#include "src/compiler/source-position.h"
+#include "src/macro-assembler.h"
+#include "src/register-configuration.h"
#include "src/zone-allocator.h"
namespace v8 {
@@ -30,7 +31,7 @@ class InstructionOperand {
// TODO(dcarney): recover bit. INVALID can be represented as UNALLOCATED with
// kInvalidVirtualRegister and some DCHECKS.
- enum Kind { INVALID, UNALLOCATED, CONSTANT, IMMEDIATE, ALLOCATED };
+ enum Kind { INVALID, UNALLOCATED, CONSTANT, IMMEDIATE, EXPLICIT, ALLOCATED };
InstructionOperand() : InstructionOperand(INVALID) {}
@@ -39,9 +40,25 @@ class InstructionOperand {
#define INSTRUCTION_OPERAND_PREDICATE(name, type) \
bool Is##name() const { return kind() == type; }
INSTRUCTION_OPERAND_PREDICATE(Invalid, INVALID)
+ // UnallocatedOperands are place-holder operands created before register
+ // allocation. They later are assigned registers and become AllocatedOperands.
INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
+ // Constant operands participate in register allocation. They are allocated to
+ // registers but have a special "spilling" behavior. When a ConstantOperand
+ // value must be rematerialized, it is loaded from an immediate constant
+ // rather from an unspilled slot.
INSTRUCTION_OPERAND_PREDICATE(Constant, CONSTANT)
+ // ImmediateOperands do not participate in register allocation and are only
+ // embedded directly in instructions, e.g. small integers and on some
+ // platforms Objects.
INSTRUCTION_OPERAND_PREDICATE(Immediate, IMMEDIATE)
+ // ExplicitOperands do not participate in register allocation. They are
+ // created by the instruction selector for direct access to registers and
+ // stack slots, completely bypassing the register allocator. They are never
+ // associated with a virtual register
+ INSTRUCTION_OPERAND_PREDICATE(Explicit, EXPLICIT)
+ // AllocatedOperands are registers or stack slots that are assigned by the
+ // register allocator and are always associated with a virtual register.
INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
#undef INSTRUCTION_OPERAND_PREDICATE
@@ -69,18 +86,18 @@ class InstructionOperand {
return this->value_ < that.value_;
}
- bool EqualsModuloType(const InstructionOperand& that) const {
- return this->GetValueModuloType() == that.GetValueModuloType();
+ bool EqualsCanonicalized(const InstructionOperand& that) const {
+ return this->GetCanonicalizedValue() == that.GetCanonicalizedValue();
}
- bool CompareModuloType(const InstructionOperand& that) const {
- return this->GetValueModuloType() < that.GetValueModuloType();
+ bool CompareCanonicalized(const InstructionOperand& that) const {
+ return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
}
protected:
explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
- inline uint64_t GetValueModuloType() const;
+ inline uint64_t GetCanonicalizedValue() const;
class KindField : public BitField64<Kind, 0, 3> {};
@@ -352,42 +369,44 @@ class ImmediateOperand : public InstructionOperand {
};
-class AllocatedOperand : public InstructionOperand {
+class LocationOperand : public InstructionOperand {
public:
- // TODO(dcarney): machine_type makes this now redundant. Just need to know is
- // the operand is a slot or a register.
- enum AllocatedKind {
- STACK_SLOT,
- DOUBLE_STACK_SLOT,
- REGISTER,
- DOUBLE_REGISTER
- };
+ enum LocationKind { REGISTER, STACK_SLOT };
- AllocatedOperand(AllocatedKind kind, MachineType machine_type, int index)
- : InstructionOperand(ALLOCATED) {
- DCHECK_IMPLIES(kind == REGISTER || kind == DOUBLE_REGISTER, index >= 0);
+ LocationOperand(InstructionOperand::Kind operand_kind,
+ LocationOperand::LocationKind location_kind,
+ MachineType machine_type, int index)
+ : InstructionOperand(operand_kind) {
+ DCHECK_IMPLIES(location_kind == REGISTER, index >= 0);
DCHECK(IsSupportedMachineType(machine_type));
- value_ |= AllocatedKindField::encode(kind);
+ value_ |= LocationKindField::encode(location_kind);
value_ |= MachineTypeField::encode(machine_type);
value_ |= static_cast<int64_t>(index) << IndexField::kShift;
}
int index() const {
+ DCHECK(IsStackSlot() || IsDoubleStackSlot());
return static_cast<int64_t>(value_) >> IndexField::kShift;
}
- AllocatedKind allocated_kind() const {
- return AllocatedKindField::decode(value_);
+ Register GetRegister() const {
+ DCHECK(IsRegister());
+ return Register::from_code(static_cast<int64_t>(value_) >>
+ IndexField::kShift);
}
- MachineType machine_type() const { return MachineTypeField::decode(value_); }
+ DoubleRegister GetDoubleRegister() const {
+ DCHECK(IsDoubleRegister());
+ return DoubleRegister::from_code(static_cast<int64_t>(value_) >>
+ IndexField::kShift);
+ }
- static AllocatedOperand* New(Zone* zone, AllocatedKind kind,
- MachineType machine_type, int index) {
- return InstructionOperand::New(zone,
- AllocatedOperand(kind, machine_type, index));
+ LocationKind location_kind() const {
+ return LocationKindField::decode(value_);
}
+ MachineType machine_type() const { return MachineTypeField::decode(value_); }
+
static bool IsSupportedMachineType(MachineType machine_type) {
if (RepresentationOf(machine_type) != machine_type) return false;
switch (machine_type) {
@@ -402,71 +421,99 @@ class AllocatedOperand : public InstructionOperand {
}
}
- INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
+ static LocationOperand* cast(InstructionOperand* op) {
+ DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+ return static_cast<LocationOperand*>(op);
+ }
+
+ static const LocationOperand* cast(const InstructionOperand* op) {
+ DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+ return static_cast<const LocationOperand*>(op);
+ }
+
+ static LocationOperand cast(const InstructionOperand& op) {
+ DCHECK(ALLOCATED == op.kind() || EXPLICIT == op.kind());
+ return *static_cast<const LocationOperand*>(&op);
+ }
STATIC_ASSERT(KindField::kSize == 3);
- class AllocatedKindField : public BitField64<AllocatedKind, 3, 2> {};
+ class LocationKindField : public BitField64<LocationKind, 3, 2> {};
class MachineTypeField : public BitField64<MachineType, 5, 16> {};
class IndexField : public BitField64<int32_t, 35, 29> {};
};
+class ExplicitOperand : public LocationOperand {
+ public:
+ ExplicitOperand(LocationKind kind, MachineType machine_type, int index);
+
+ static ExplicitOperand* New(Zone* zone, LocationKind kind,
+ MachineType machine_type, int index) {
+ return InstructionOperand::New(zone,
+ ExplicitOperand(kind, machine_type, index));
+ }
+
+ INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT);
+};
+
+
+class AllocatedOperand : public LocationOperand {
+ public:
+ AllocatedOperand(LocationKind kind, MachineType machine_type, int index)
+ : LocationOperand(ALLOCATED, kind, machine_type, index) {}
+
+ static AllocatedOperand* New(Zone* zone, LocationKind kind,
+ MachineType machine_type, int index) {
+ return InstructionOperand::New(zone,
+ AllocatedOperand(kind, machine_type, index));
+ }
+
+ INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
+};
+
+
#undef INSTRUCTION_OPERAND_CASTS
-#define ALLOCATED_OPERAND_LIST(V) \
- V(StackSlot, STACK_SLOT) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
- V(Register, REGISTER) \
- V(DoubleRegister, DOUBLE_REGISTER)
-
-
-#define ALLOCATED_OPERAND_IS(SubKind, kOperandKind) \
- bool InstructionOperand::Is##SubKind() const { \
- return IsAllocated() && \
- AllocatedOperand::cast(this)->allocated_kind() == \
- AllocatedOperand::kOperandKind; \
- }
-ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_IS)
-#undef ALLOCATED_OPERAND_IS
-
-
-// TODO(dcarney): these subkinds are now pretty useless, nuke.
-#define ALLOCATED_OPERAND_CLASS(SubKind, kOperandKind) \
- class SubKind##Operand final : public AllocatedOperand { \
- public: \
- explicit SubKind##Operand(MachineType machine_type, int index) \
- : AllocatedOperand(kOperandKind, machine_type, index) {} \
- \
- static SubKind##Operand* New(Zone* zone, MachineType machine_type, \
- int index) { \
- return InstructionOperand::New(zone, \
- SubKind##Operand(machine_type, index)); \
- } \
- \
- static SubKind##Operand* cast(InstructionOperand* op) { \
- DCHECK_EQ(kOperandKind, AllocatedOperand::cast(op)->allocated_kind()); \
- return reinterpret_cast<SubKind##Operand*>(op); \
- } \
- \
- static const SubKind##Operand* cast(const InstructionOperand* op) { \
- DCHECK_EQ(kOperandKind, AllocatedOperand::cast(op)->allocated_kind()); \
- return reinterpret_cast<const SubKind##Operand*>(op); \
- } \
- \
- static SubKind##Operand cast(const InstructionOperand& op) { \
- DCHECK_EQ(kOperandKind, AllocatedOperand::cast(op).allocated_kind()); \
- return *static_cast<const SubKind##Operand*>(&op); \
- } \
- };
-ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_CLASS)
-#undef ALLOCATED_OPERAND_CLASS
+bool InstructionOperand::IsRegister() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::REGISTER &&
+ !IsFloatingPoint(LocationOperand::cast(this)->machine_type());
+}
+bool InstructionOperand::IsDoubleRegister() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::REGISTER &&
+ IsFloatingPoint(LocationOperand::cast(this)->machine_type());
+}
+
+bool InstructionOperand::IsStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ !IsFloatingPoint(LocationOperand::cast(this)->machine_type());
+}
+
+bool InstructionOperand::IsDoubleStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ IsFloatingPoint(LocationOperand::cast(this)->machine_type());
+}
-uint64_t InstructionOperand::GetValueModuloType() const {
- if (IsAllocated()) {
+uint64_t InstructionOperand::GetCanonicalizedValue() const {
+ if (IsAllocated() || IsExplicit()) {
// TODO(dcarney): put machine type last and mask.
- return AllocatedOperand::MachineTypeField::update(this->value_, kMachNone);
+ MachineType canonicalized_machine_type =
+ IsFloatingPoint(LocationOperand::cast(this)->machine_type())
+ ? kMachFloat64
+ : kMachNone;
+ return InstructionOperand::KindField::update(
+ LocationOperand::MachineTypeField::update(this->value_,
+ canonicalized_machine_type),
+ LocationOperand::EXPLICIT);
}
return this->value_;
}
@@ -476,7 +523,7 @@ uint64_t InstructionOperand::GetValueModuloType() const {
struct CompareOperandModuloType {
bool operator()(const InstructionOperand& a,
const InstructionOperand& b) const {
- return a.CompareModuloType(b);
+ return a.CompareCanonicalized(b);
}
};
@@ -508,14 +555,14 @@ class MoveOperands final : public ZoneObject {
// True if this move a move into the given destination operand.
bool Blocks(const InstructionOperand& operand) const {
- return !IsEliminated() && source().EqualsModuloType(operand);
+ return !IsEliminated() && source().EqualsCanonicalized(operand);
}
// A move is redundant if it's been eliminated or if its source and
// destination are the same.
bool IsRedundant() const {
DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
- return IsEliminated() || source_.EqualsModuloType(destination_);
+ return IsEliminated() || source_.EqualsCanonicalized(destination_);
}
// We clear both operands to indicate move that's been eliminated.
diff --git a/deps/v8/src/compiler/interpreter-assembler.cc b/deps/v8/src/compiler/interpreter-assembler.cc
index 1f5c0a26a5..ed056cfe56 100644
--- a/deps/v8/src/compiler/interpreter-assembler.cc
+++ b/deps/v8/src/compiler/interpreter-assembler.cc
@@ -35,6 +35,8 @@ InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
end_nodes_(zone),
accumulator_(
raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
+ context_(
+ raw_assembler_->Parameter(Linkage::kInterpreterContextParameter)),
code_generated_(false) {}
@@ -66,19 +68,16 @@ Handle<Code> InterpreterAssembler::GenerateCode() {
}
-Node* InterpreterAssembler::GetAccumulator() {
- return accumulator_;
-}
+Node* InterpreterAssembler::GetAccumulator() { return accumulator_; }
-void InterpreterAssembler::SetAccumulator(Node* value) {
- accumulator_ = value;
-}
+void InterpreterAssembler::SetAccumulator(Node* value) { accumulator_ = value; }
-Node* InterpreterAssembler::ContextTaggedPointer() {
- return raw_assembler_->Parameter(Linkage::kInterpreterContextParameter);
-}
+Node* InterpreterAssembler::GetContext() { return context_; }
+
+
+void InterpreterAssembler::SetContext(Node* value) { context_ = value; }
Node* InterpreterAssembler::RegisterFileRawPointer() {
@@ -112,6 +111,13 @@ Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
}
+Node* InterpreterAssembler::LoadRegister(interpreter::Register reg) {
+ return raw_assembler_->Load(
+ kMachAnyTagged, RegisterFileRawPointer(),
+ RegisterFrameOffset(Int32Constant(reg.ToOperand())));
+}
+
+
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
return raw_assembler_->Load(kMachAnyTagged, RegisterFileRawPointer(),
RegisterFrameOffset(reg_index));
@@ -120,23 +126,32 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
return raw_assembler_->Store(kMachAnyTagged, RegisterFileRawPointer(),
- RegisterFrameOffset(reg_index), value);
+ RegisterFrameOffset(reg_index), value,
+ kNoWriteBarrier);
}
Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kByte,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
return raw_assembler_->Load(
kMachUint8, BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(1 + operand_index)));
+ IntPtrAdd(BytecodeOffset(),
+ Int32Constant(interpreter::Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
}
Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kByte,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
Node* load = raw_assembler_->Load(
kMachInt8, BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(1 + operand_index)));
+ IntPtrAdd(BytecodeOffset(),
+ Int32Constant(interpreter::Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
load = raw_assembler_->ChangeInt32ToInt64(load);
@@ -145,14 +160,46 @@ Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
}
+Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kShort,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ if (TargetSupportsUnalignedAccess()) {
+ return raw_assembler_->Load(
+ kMachUint16, BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(),
+ Int32Constant(interpreter::Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
+ } else {
+ int offset =
+ interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
+ Node* first_byte = raw_assembler_->Load(
+ kMachUint8, BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
+ Node* second_byte = raw_assembler_->Load(
+ kMachUint8, BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+#if V8_TARGET_LITTLE_ENDIAN
+ return raw_assembler_->WordOr(WordShl(second_byte, kBitsPerByte),
+ first_byte);
+#elif V8_TARGET_BIG_ENDIAN
+ return raw_assembler_->WordOr(WordShl(first_byte, kBitsPerByte),
+ second_byte);
+#else
+#error "Unknown Architecture"
+#endif
+ }
+}
+
+
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
- DCHECK_EQ(interpreter::OperandType::kCount,
+ DCHECK_EQ(interpreter::OperandType::kCount8,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperand(operand_index);
}
-Node* InterpreterAssembler::BytecodeOperandImm8(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
DCHECK_EQ(interpreter::OperandType::kImm8,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperandSignExtended(operand_index);
@@ -160,15 +207,31 @@ Node* InterpreterAssembler::BytecodeOperandImm8(int operand_index) {
Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
- DCHECK_EQ(interpreter::OperandType::kIdx,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperand(operand_index);
+ switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+ case interpreter::OperandSize::kByte:
+ DCHECK_EQ(
+ interpreter::OperandType::kIdx8,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+ case interpreter::OperandSize::kShort:
+ DCHECK_EQ(
+ interpreter::OperandType::kIdx16,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandShort(operand_index);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
}
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
- DCHECK_EQ(interpreter::OperandType::kReg,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+#ifdef DEBUG
+ interpreter::OperandType operand_type =
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index);
+ DCHECK(operand_type == interpreter::OperandType::kReg8 ||
+ operand_type == interpreter::OperandType::kMaybeReg8);
+#endif
return BytecodeOperandSignExtended(operand_index);
}
@@ -238,6 +301,15 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
}
+Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
+ int index) {
+ Node* entry_offset =
+ IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ WordShl(Int32Constant(index), kPointerSizeLog2));
+ return raw_assembler_->Load(kMachAnyTagged, fixed_array, entry_offset);
+}
+
+
Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
return raw_assembler_->Load(kMachAnyTagged, object,
IntPtrConstant(offset - kHeapObjectTag));
@@ -250,8 +322,21 @@ Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
}
-Node* InterpreterAssembler::LoadContextSlot(int slot_index) {
- return LoadContextSlot(ContextTaggedPointer(), slot_index);
+Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ return raw_assembler_->Load(kMachAnyTagged, context, offset);
+}
+
+
+Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
+ Node* value) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ return raw_assembler_->Store(kMachAnyTagged, context, offset, value,
+ kFullWriteBarrier);
}
@@ -267,21 +352,57 @@ Node* InterpreterAssembler::LoadTypeFeedbackVector() {
}
+Node* InterpreterAssembler::CallConstruct(Node* original_constructor,
+ Node* constructor, Node* first_arg,
+ Node* arg_count) {
+ Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
+
+ Node* code_target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg_count;
+ args[1] = original_constructor;
+ args[2] = constructor;
+ args[3] = first_arg;
+ args[4] = GetContext();
+
+ return CallN(descriptor, code_target, args);
+}
+
+
+Node* InterpreterAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ Node* stack_pointer_before_call = nullptr;
+ if (FLAG_debug_code) {
+ stack_pointer_before_call = raw_assembler_->LoadStackPointer();
+ }
+ Node* return_val = raw_assembler_->CallN(descriptor, code_target, args);
+ if (FLAG_debug_code) {
+ Node* stack_pointer_after_call = raw_assembler_->LoadStackPointer();
+ AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
+ kUnexpectedStackPointer);
+ }
+ return return_val;
+}
+
+
Node* InterpreterAssembler::CallJS(Node* function, Node* first_arg,
Node* arg_count) {
- Callable builtin = CodeFactory::PushArgsAndCall(isolate());
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(isolate());
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), builtin.descriptor(), 0, CallDescriptor::kNoFlags);
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
- Node* code_target = HeapConstant(builtin.code());
+ Node* code_target = HeapConstant(callable.code());
Node** args = zone()->NewArray<Node*>(4);
args[0] = arg_count;
args[1] = first_arg;
args[2] = function;
- args[3] = ContextTaggedPointer();
+ args[3] = GetContext();
- return raw_assembler_->CallN(descriptor, code_target, args);
+ return CallN(descriptor, code_target, args);
}
@@ -289,7 +410,19 @@ Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
Node* target, Node** args) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, 0, CallDescriptor::kNoFlags);
- return raw_assembler_->CallN(call_descriptor, target, args);
+ return CallN(call_descriptor, target, args);
+}
+
+
+Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
+ Node* target, Node* arg1, Node* arg2,
+ Node* arg3) {
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = GetContext();
+ return CallIC(descriptor, target, args);
}
@@ -301,7 +434,7 @@ Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
args[1] = arg2;
args[2] = arg3;
args[3] = arg4;
- args[4] = ContextTaggedPointer();
+ args[4] = GetContext();
return CallIC(descriptor, target, args);
}
@@ -315,22 +448,55 @@ Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
args[2] = arg3;
args[3] = arg4;
args[4] = arg5;
- args[5] = ContextTaggedPointer();
+ args[5] = GetContext();
return CallIC(descriptor, target, args);
}
+Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
+ Node* arg_count) {
+ Callable callable = CodeFactory::InterpreterCEntry(isolate());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
+
+ Node* code_target = HeapConstant(callable.code());
+
+ // Get the function entry from the function id.
+ Node* function_table = raw_assembler_->ExternalConstant(
+ ExternalReference::runtime_function_table_address(isolate()));
+ Node* function_offset = raw_assembler_->Int32Mul(
+ function_id, Int32Constant(sizeof(Runtime::Function)));
+ Node* function = IntPtrAdd(function_table, function_offset);
+ Node* function_entry = raw_assembler_->Load(
+ kMachPtr, function, Int32Constant(offsetof(Runtime::Function, entry)));
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg_count;
+ args[1] = first_arg;
+ args[2] = function_entry;
+ args[3] = GetContext();
+
+ return CallN(descriptor, code_target, args);
+}
+
+
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1) {
- return raw_assembler_->CallRuntime1(function_id, arg1,
- ContextTaggedPointer());
+ return raw_assembler_->CallRuntime1(function_id, arg1, GetContext());
}
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1, Node* arg2) {
- return raw_assembler_->CallRuntime2(function_id, arg1, arg2,
- ContextTaggedPointer());
+ return raw_assembler_->CallRuntime2(function_id, arg1, arg2, GetContext());
+}
+
+
+Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4) {
+ return raw_assembler_->CallRuntime4(function_id, arg1, arg2, arg3, arg4,
+ GetContext());
}
@@ -349,7 +515,7 @@ void InterpreterAssembler::Return() {
BytecodeOffset(),
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
- ContextTaggedPointer() };
+ GetContext() };
Node* tail_call = raw_assembler_->TailCallN(
call_descriptor(), exit_trampoline_code_object, args);
// This should always be the end node.
@@ -409,7 +575,7 @@ void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
new_bytecode_offset,
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
- ContextTaggedPointer() };
+ GetContext() };
Node* tail_call =
raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
// This should always be the end node.
@@ -417,6 +583,24 @@ void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
}
+void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
+ Node* abort_id = SmiTag(Int32Constant(bailout_reason));
+ CallRuntime(Runtime::kAbort, abort_id);
+ Return();
+}
+
+
+void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
+ BailoutReason bailout_reason) {
+ RawMachineAssembler::Label match, no_match;
+ Node* condition = raw_assembler_->WordEqual(lhs, rhs);
+ raw_assembler_->Branch(condition, &match, &no_match);
+ raw_assembler_->Bind(&no_match);
+ Abort(bailout_reason);
+ raw_assembler_->Bind(&match);
+}
+
+
void InterpreterAssembler::AddEndInput(Node* input) {
DCHECK_NOT_NULL(input);
end_nodes_.push_back(input);
@@ -432,6 +616,20 @@ void InterpreterAssembler::End() {
}
+// static
+bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ return false;
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
+ return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+ return true;
+#else
+#error "Unknown Architecture"
+#endif
+}
+
+
// RawMachineAssembler delegate helpers:
Isolate* InterpreterAssembler::isolate() { return raw_assembler_->isolate(); }
@@ -452,6 +650,6 @@ Schedule* InterpreterAssembler::schedule() {
Zone* InterpreterAssembler::zone() { return raw_assembler_->zone(); }
-} // namespace interpreter
+} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/interpreter-assembler.h b/deps/v8/src/compiler/interpreter-assembler.h
index 67ab9cc2a9..65955a92ba 100644
--- a/deps/v8/src/compiler/interpreter-assembler.h
+++ b/deps/v8/src/compiler/interpreter-assembler.h
@@ -47,7 +47,7 @@ class InterpreterAssembler {
Node* BytecodeOperandIdx(int operand_index);
// Returns the Imm8 immediate for bytecode operand |operand_index| in the
// current bytecode.
- Node* BytecodeOperandImm8(int operand_index);
+ Node* BytecodeOperandImm(int operand_index);
// Returns the register index for bytecode operand |operand_index| in the
// current bytecode.
Node* BytecodeOperandReg(int operand_index);
@@ -56,7 +56,12 @@ class InterpreterAssembler {
Node* GetAccumulator();
void SetAccumulator(Node* value);
+ // Context.
+ Node* GetContext();
+ void SetContext(Node* value);
+
// Loads from and stores to the interpreter register file.
+ Node* LoadRegister(interpreter::Register reg);
Node* LoadRegister(Node* reg_index);
Node* StoreRegister(Node* value, Node* reg_index);
@@ -83,31 +88,48 @@ class InterpreterAssembler {
// Load constant at |index| in the constant pool.
Node* LoadConstantPoolEntry(Node* index);
+ // Load an element from a fixed array on the heap.
+ Node* LoadFixedArrayElement(Node* fixed_array, int index);
+
// Load a field from an object on the heap.
Node* LoadObjectField(Node* object, int offset);
- // Load |slot_index| from a context.
+ // Load |slot_index| from |context|.
Node* LoadContextSlot(Node* context, int slot_index);
-
- // Load |slot_index| from the current context.
- Node* LoadContextSlot(int slot_index);
+ Node* LoadContextSlot(Node* context, Node* slot_index);
+ // Stores |value| into |slot_index| of |context|.
+ Node* StoreContextSlot(Node* context, Node* slot_index, Node* value);
// Load the TypeFeedbackVector for the current function.
Node* LoadTypeFeedbackVector();
- // Call JSFunction or Callable |function| with |arg_count| (not including
- // receiver) and the first argument located at |first_arg|.
+ // Call constructor |constructor| with |arg_count| arguments (not
+ // including receiver) and the first argument located at
+ // |first_arg|. The |original_constructor| is the same as the
+ // |constructor| for the new keyword, but differs for the super
+ // keyword.
+ Node* CallConstruct(Node* original_constructor, Node* constructor,
+ Node* first_arg, Node* arg_count);
+
+ // Call JSFunction or Callable |function| with |arg_count|
+ // arguments (not including receiver) and the first argument
+ // located at |first_arg|.
Node* CallJS(Node* function, Node* first_arg, Node* arg_count);
// Call an IC code stub.
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
+ Node* arg2, Node* arg3);
+ Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
Node* arg2, Node* arg3, Node* arg4);
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
Node* arg2, Node* arg3, Node* arg4, Node* arg5);
// Call runtime function.
+ Node* CallRuntime(Node* function_id, Node* first_arg, Node* arg_count);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4);
// Jump relative to the current bytecode by |jump_offset|.
void Jump(Node* jump_offset);
@@ -122,10 +144,15 @@ class InterpreterAssembler {
// Dispatch to the bytecode.
void Dispatch();
+ // Abort with the given bailout reason.
+ void Abort(BailoutReason bailout_reason);
+
protected:
// Close the graph.
void End();
+ static bool TargetSupportsUnalignedAccess();
+
// Protected helpers (for testing) which delegate to RawMachineAssembler.
CallDescriptor* call_descriptor() const;
Graph* graph();
@@ -139,8 +166,6 @@ class InterpreterAssembler {
Node* BytecodeOffset();
// Returns a raw pointer to first entry in the interpreter dispatch table.
Node* DispatchTableRawPointer();
- // Returns a tagged pointer to the current context.
- Node* ContextTaggedPointer();
// Returns the offset of register |index| relative to RegisterFilePointer().
Node* RegisterFrameOffset(Node* index);
@@ -148,7 +173,9 @@ class InterpreterAssembler {
Node* SmiShiftBitsConstant();
Node* BytecodeOperand(int operand_index);
Node* BytecodeOperandSignExtended(int operand_index);
+ Node* BytecodeOperandShort(int operand_index);
+ Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node** args);
Node* CallJSBuiltin(int context_index, Node* receiver, Node** js_args,
int js_arg_count);
@@ -161,6 +188,9 @@ class InterpreterAssembler {
// Starts next instruction dispatch at |new_bytecode_offset|.
void DispatchTo(Node* new_bytecode_offset);
+ // Abort operations for debug code.
+ void AbortIfWordNotEqual(Node* lhs, Node* rhs, BailoutReason bailout_reason);
+
// Adds an end node of the graph.
void AddEndInput(Node* input);
@@ -173,12 +203,13 @@ class InterpreterAssembler {
base::SmartPointer<RawMachineAssembler> raw_assembler_;
ZoneVector<Node*> end_nodes_;
Node* accumulator_;
+ Node* context_;
bool code_generated_;
DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
};
-} // namespace interpreter
+} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 49ed031182..6db5f99e3b 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -7,6 +7,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
#include "src/types.h"
@@ -87,9 +88,42 @@ class JSCallReduction {
JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- simplified_(jsgraph->zone()) {}
+ : AdvancedReducer(editor), jsgraph_(jsgraph) {}
+
+
+// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
+Reduction JSBuiltinReducer::ReduceFunctionCall(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Handle<JSFunction> apply = Handle<JSFunction>::cast(
+ HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
+ // Change context of {node} to the Function.prototype.call context,
+ // to ensure any exception is thrown in the correct context.
+ NodeProperties::ReplaceContextInput(
+ node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
+ // Remove the target from {node} and use the receiver as target instead, and
+ // the thisArg becomes the new target. If thisArg was not provided, insert
+ // undefined instead.
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ ConvertReceiverMode convert_mode;
+ if (arity == 2) {
+ // The thisArg was not provided, use undefined as receiver.
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ node->ReplaceInput(0, node->InputAt(1));
+ node->ReplaceInput(1, jsgraph()->UndefinedConstant());
+ } else {
+ // Just remove the target, which is the first value input.
+ convert_mode = ConvertReceiverMode::kAny;
+ node->RemoveInput(0);
+ --arity;
+ }
+ // TODO(turbofan): Migrate the call count to the new operator?
+ NodeProperties::ChangeOp(node, javascript()->CallFunction(
+ arity, p.language_mode(), VectorSlotPair(),
+ convert_mode, p.tail_call_mode()));
+ return Changed(node);
+}
// ECMA-262, section 15.8.2.11.
@@ -151,6 +185,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
+ case kFunctionCall:
+ return ReduceFunctionCall(node);
case kMathMax:
reduction = ReduceMathMax(node);
break;
@@ -175,6 +211,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
Graph* JSBuiltinReducer::graph() const { return jsgraph()->graph(); }
+Isolate* JSBuiltinReducer::isolate() const { return jsgraph()->isolate(); }
+
+
CommonOperatorBuilder* JSBuiltinReducer::common() const {
return jsgraph()->common();
}
@@ -184,6 +223,16 @@ MachineOperatorBuilder* JSBuiltinReducer::machine() const {
return jsgraph()->machine();
}
+
+SimplifiedOperatorBuilder* JSBuiltinReducer::simplified() const {
+ return jsgraph()->simplified();
+}
+
+
+JSOperatorBuilder* JSBuiltinReducer::javascript() const {
+ return jsgraph()->javascript();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 66b5723246..772cbdbf25 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_JS_BUILTIN_REDUCER_H_
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -15,7 +14,9 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
class JSGraph;
+class JSOperatorBuilder;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
class JSBuiltinReducer final : public AdvancedReducer {
@@ -26,18 +27,20 @@ class JSBuiltinReducer final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceFunctionCall(Node* node);
Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathFround(Node* node);
- JSGraph* jsgraph() const { return jsgraph_; }
Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SimplifiedOperatorBuilder* simplified() const;
+ JSOperatorBuilder* javascript() const;
JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 0ad25e179d..a4f3ff4986 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -95,6 +95,9 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// Success. The context load can be replaced with the constant.
// TODO(titzer): record the specialization for sharing code across multiple
// contexts that have the same value in the corresponding context slot.
+ if (value->IsConsString()) {
+ value = String::Flatten(Handle<String>::cast(value), TENURED);
+ }
Node* constant = jsgraph_->Constant(value);
ReplaceWithValue(node, constant);
return Replace(constant);
diff --git a/deps/v8/src/compiler/js-frame-specialization.cc b/deps/v8/src/compiler/js-frame-specialization.cc
index 98b1827492..6e4b0def56 100644
--- a/deps/v8/src/compiler/js-frame-specialization.cc
+++ b/deps/v8/src/compiler/js-frame-specialization.cc
@@ -52,7 +52,10 @@ Reduction JSFrameSpecialization::ReduceParameter(Node* node) {
if (index == Linkage::kJSFunctionCallClosureParamIndex) {
object = frame()->function();
} else if (index == parameters_count) {
- // The Parameter index (arity + 1) is the context.
+ // The Parameter index (arity + 1) is the parameter count.
+ object = Smi::FromInt(parameters_count - 1);
+ } else if (index == parameters_count + 1) {
+ // The Parameter index (arity + 2) is the context.
object = frame()->context();
} else {
// The Parameter index 0 is the receiver.
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index eac0565786..8c363d3e8b 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -117,6 +117,7 @@ REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThanOrEqual, Token::GTE)
REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
+REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
#undef REPLACE_RUNTIME
@@ -286,7 +287,7 @@ void JSGenericLowering::LowerJSToObject(Node* node) {
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const LoadPropertyParameters& p = LoadPropertyParametersOf(node->op());
+ const PropertyAccess& p = PropertyAccessOf(node->op());
Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
@@ -296,7 +297,7 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
+ NamedAccess const& p = NamedAccessOf(node->op());
Callable callable = CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_INSIDE_TYPEOF, p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
@@ -306,40 +307,32 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
- if (p.slot_index() >= 0) {
- Callable callable = CodeFactory::LoadGlobalViaContext(isolate(), 0);
- Node* script_context = node->InputAt(0);
- node->ReplaceInput(0, jsgraph()->Int32Constant(p.slot_index()));
- node->ReplaceInput(1, script_context); // Set new context...
- node->RemoveInput(2);
- node->RemoveInput(2); // ...instead of old one.
- ReplaceWithStubCall(node, callable, flags);
-
- } else {
- Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
- node->RemoveInput(0); // script context
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- ReplaceWithStubCall(node, callable, flags);
- }
+ Callable callable = CodeFactory::LoadICInOptimizedCode(
+ isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
+ // Load global object from the context.
+ Node* global = graph()->NewNode(machine()->Load(kMachAnyTagged), context,
+ jsgraph()->IntPtrConstant(Context::SlotOffset(
+ Context::GLOBAL_OBJECT_INDEX)),
+ effect, graph()->start());
+ node->InsertInput(zone(), 0, global);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const StorePropertyParameters& p = StorePropertyParametersOf(node->op());
+ PropertyAccess const& p = PropertyAccessOf(node->op());
LanguageMode language_mode = p.language_mode();
- // We have a special case where we do keyed stores but don't have a type
- // feedback vector slot allocated to support it. In this case, install
- // the megamorphic keyed store stub which needs neither vector nor slot.
- bool use_vector_slot = FLAG_vector_stores && p.feedback().index() != -1;
Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), language_mode,
- (use_vector_slot || !FLAG_vector_stores) ? UNINITIALIZED : MEGAMORPHIC);
- if (use_vector_slot) {
+ isolate(), language_mode, UNINITIALIZED);
+ if (FLAG_vector_stores) {
+ DCHECK(p.feedback().index() != -1);
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
} else {
node->RemoveInput(3);
@@ -351,7 +344,7 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
+ NamedAccess const& p = NamedAccessOf(node->op());
Callable callable = CodeFactory::StoreICInOptimizedCode(
isolate(), p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
@@ -367,35 +360,27 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
- if (p.slot_index() >= 0) {
- Callable callable =
- CodeFactory::StoreGlobalViaContext(isolate(), 0, p.language_mode());
- Node* script_context = node->InputAt(0);
- Node* value = node->InputAt(2);
- node->ReplaceInput(0, jsgraph()->Int32Constant(p.slot_index()));
- node->ReplaceInput(1, value);
- node->ReplaceInput(2, script_context); // Set new context...
- node->RemoveInput(3);
- node->RemoveInput(3); // ...instead of old one.
- ReplaceWithStubCall(node, callable, flags);
-
+ Callable callable = CodeFactory::StoreICInOptimizedCode(
+ isolate(), p.language_mode(), UNINITIALIZED);
+ // Load global object from the context.
+ Node* global = graph()->NewNode(machine()->Load(kMachAnyTagged), context,
+ jsgraph()->IntPtrConstant(Context::SlotOffset(
+ Context::GLOBAL_OBJECT_INDEX)),
+ effect, graph()->start());
+ node->InsertInput(zone(), 0, global);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ if (FLAG_vector_stores) {
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
} else {
- Callable callable = CodeFactory::StoreICInOptimizedCode(
- isolate(), p.language_mode(), UNINITIALIZED);
- node->RemoveInput(0); // script context
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- if (FLAG_vector_stores) {
- DCHECK(p.feedback().index() != -1);
- node->InsertInput(zone(), 3,
- jsgraph()->SmiConstant(p.feedback().index()));
- } else {
- node->RemoveInput(3);
- }
- ReplaceWithStubCall(node, callable,
- CallDescriptor::kPatchableCallSite | flags);
+ node->RemoveInput(3);
}
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
@@ -456,15 +441,14 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
}
-void JSGenericLowering::LowerJSLoadDynamicGlobal(Node* node) {
- const DynamicGlobalAccess& access = DynamicGlobalAccessOf(node->op());
+void JSGenericLowering::LowerJSLoadDynamic(Node* node) {
+ const DynamicAccess& access = DynamicAccessOf(node->op());
Runtime::FunctionId function_id =
(access.typeof_mode() == NOT_INSIDE_TYPEOF)
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
Node* projection = graph()->NewNode(common()->Projection(0), node);
NodeProperties::ReplaceUses(node, projection, node, node, node);
- node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
node->RemoveInput(NodeProperties::FirstValueIndex(node));
node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
ReplaceWithRuntimeCall(node, function_id);
@@ -472,16 +456,6 @@ void JSGenericLowering::LowerJSLoadDynamicGlobal(Node* node) {
}
-void JSGenericLowering::LowerJSLoadDynamicContext(Node* node) {
- const DynamicContextAccess& access = DynamicContextAccessOf(node->op());
- Node* projection = graph()->NewNode(common()->Projection(0), node);
- NodeProperties::ReplaceUses(node, projection, node, node, node);
- node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
- ReplaceWithRuntimeCall(node, Runtime::kLoadLookupSlot);
- projection->ReplaceInput(0, node);
-}
-
-
void JSGenericLowering::LowerJSCreate(Node* node) { UNIMPLEMENTED(); }
@@ -546,6 +520,7 @@ void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
void JSGenericLowering::LowerJSCallConstruct(Node* node) {
+ // TODO(bmeurer): Use the Construct builtin here.
int arity = OpParameter<int>(node);
CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
@@ -566,24 +541,27 @@ void JSGenericLowering::LowerJSCallConstruct(Node* node) {
void JSGenericLowering::LowerJSCallFunction(Node* node) {
- const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
- int arg_count = static_cast<int>(p.arity() - 2);
- CallFunctionStub stub(isolate(), arg_count, p.flags());
- CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ ConvertReceiverMode const mode = p.convert_mode();
+ Callable callable = CodeFactory::Call(isolate(), mode);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- if (p.AllowTailCalls()) {
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
flags |= CallDescriptor::kSupportsTailCalls;
}
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), d, static_cast<int>(p.arity() - 1), flags);
- Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
const CallRuntimeParameters& p = CallRuntimeParametersOf(node->op());
+ AdjustFrameStatesForCall(node);
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
@@ -794,6 +772,27 @@ void JSGenericLowering::LowerJSForInStep(Node* node) {
}
+void JSGenericLowering::LowerJSLoadMessage(Node* node) {
+ ExternalReference message_address =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+ node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
+ node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
+ NodeProperties::ChangeOp(node, machine()->Load(kMachAnyTagged));
+}
+
+
+void JSGenericLowering::LowerJSStoreMessage(Node* node) {
+ ExternalReference message_address =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+ node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
+ node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
+ StoreRepresentation representation(kMachAnyTagged, kNoWriteBarrier);
+ NodeProperties::ChangeOp(node, machine()->Store(representation));
+}
+
+
void JSGenericLowering::LowerJSYield(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
new file mode 100644
index 0000000000..497f098baf
--- /dev/null
+++ b/deps/v8/src/compiler/js-global-object-specialization.cc
@@ -0,0 +1,300 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-global-object-specialization.h"
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct JSGlobalObjectSpecialization::ScriptContextTableLookupResult {
+ Handle<Context> context;
+ bool immutable;
+ int index;
+};
+
+
+JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
+ Editor* editor, JSGraph* jsgraph, Flags flags,
+ Handle<JSGlobalObject> global_object, CompilationDependencies* dependencies)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ flags_(flags),
+ global_object_(global_object),
+ script_context_table_(
+ global_object->native_context()->script_context_table(), isolate()),
+ dependencies_(dependencies),
+ type_cache_(TypeCache::Get()) {}
+
+
+Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSLoadGlobal:
+ return ReduceJSLoadGlobal(node);
+ case IrOpcode::kJSStoreGlobal:
+ return ReduceJSStoreGlobal(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
+ Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to lookup the name on the script context table first (lexical scoping).
+ ScriptContextTableLookupResult result;
+ if (LookupInScriptContextTable(name, &result)) {
+ if (result.context->is_the_hole(result.index)) return NoChange();
+ Node* context = jsgraph()->HeapConstant(result.context);
+ Node* value = effect = graph()->NewNode(
+ javascript()->LoadContext(0, result.index, result.immutable), context,
+ context, effect);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+
+ // Lookup on the global object instead. We only deal with own data
+ // properties of the global object here (represented as PropertyCell).
+ LookupIterator it(global_object(), name, LookupIterator::OWN);
+ if (it.state() != LookupIterator::DATA) return NoChange();
+ Handle<PropertyCell> property_cell = it.GetPropertyCell();
+ PropertyDetails property_details = property_cell->property_details();
+ Handle<Object> property_cell_value(property_cell->value(), isolate());
+
+ // Load from non-configurable, read-only data property on the global
+ // object can be constant-folded, even without deoptimization support.
+ if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
+ Node* value = jsgraph()->Constant(property_cell_value);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ // Load from non-configurable, data property on the global can be lowered to
+ // a field load, even without deoptimization, because the property cannot be
+ // deleted or reconfigured to an accessor/interceptor property. Yet, if
+ // deoptimization support is available, we can constant-fold certain global
+ // properties or at least lower them to field loads annotated with more
+ // precise type feedback.
+ Type* property_cell_value_type = Type::Tagged();
+ if (flags() & kDeoptimizationEnabled) {
+ // Record a code dependency on the cell if we can benefit from the
+ // additional feedback, or the global property is configurable (i.e.
+ // can be deleted or reconfigured to an accessor property).
+ if (property_details.cell_type() != PropertyCellType::kMutable ||
+ property_details.IsConfigurable()) {
+ dependencies()->AssumePropertyCell(property_cell);
+ }
+
+ // Load from constant/undefined global property can be constant-folded.
+ if ((property_details.cell_type() == PropertyCellType::kConstant ||
+ property_details.cell_type() == PropertyCellType::kUndefined)) {
+ Node* value = jsgraph()->Constant(property_cell_value);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ // Load from constant type cell can benefit from type feedback.
+ if (property_details.cell_type() == PropertyCellType::kConstantType) {
+ // Compute proper type based on the current value in the cell.
+ if (property_cell_value->IsSmi()) {
+ property_cell_value_type = type_cache_.kSmi;
+ } else if (property_cell_value->IsNumber()) {
+ property_cell_value_type = type_cache_.kHeapNumber;
+ } else {
+ Handle<Map> property_cell_value_map(
+ Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+ property_cell_value_type =
+ Type::Class(property_cell_value_map, graph()->zone());
+ }
+ }
+ } else if (property_details.IsConfigurable()) {
+ // Access to configurable global properties requires deoptimization support.
+ return NoChange();
+ }
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
+ jsgraph()->HeapConstant(property_cell), effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
+ Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to lookup the name on the script context table first (lexical scoping).
+ ScriptContextTableLookupResult result;
+ if (LookupInScriptContextTable(name, &result)) {
+ if (result.context->is_the_hole(result.index)) return NoChange();
+ if (result.immutable) return NoChange();
+ Node* context = jsgraph()->HeapConstant(result.context);
+ effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
+ context, value, context, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ // Lookup on the global object instead. We only deal with own data
+ // properties of the global object here (represented as PropertyCell).
+ LookupIterator it(global_object(), name, LookupIterator::OWN);
+ if (it.state() != LookupIterator::DATA) return NoChange();
+ Handle<PropertyCell> property_cell = it.GetPropertyCell();
+ PropertyDetails property_details = property_cell->property_details();
+ Handle<Object> property_cell_value(property_cell->value(), isolate());
+
+ // Don't even bother trying to lower stores to read-only data properties.
+ if (property_details.IsReadOnly()) return NoChange();
+ switch (property_details.cell_type()) {
+ case PropertyCellType::kUndefined: {
+ return NoChange();
+ }
+ case PropertyCellType::kConstant: {
+ // Store to constant property cell requires deoptimization support,
+ // because we might even need to eager deoptimize for mismatch.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ dependencies()->AssumePropertyCell(property_cell);
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
+ jsgraph()->Constant(property_cell_value));
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
+ effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ break;
+ }
+ case PropertyCellType::kConstantType: {
+ // Store to constant-type property cell requires deoptimization support,
+ // because we might even need to eager deoptimize for mismatch.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ dependencies()->AssumePropertyCell(property_cell);
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Type* property_cell_value_type = Type::TaggedSigned();
+ if (property_cell_value->IsHeapObject()) {
+ // Deoptimize if the {value} is a Smi.
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
+ effect, if_true);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+
+ // Load the {value} map check against the {property_cell} map.
+ Node* value_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, control);
+ Handle<Map> property_cell_value_map(
+ Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+ check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Any()), value_map,
+ jsgraph()->HeapConstant(property_cell_value_map));
+ property_cell_value_type = Type::TaggedPointer();
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
+ effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
+ jsgraph()->HeapConstant(property_cell), value, effect, control);
+ break;
+ }
+ case PropertyCellType::kMutable: {
+ // Store to non-configurable, data property on the global can be lowered
+ // to a field store, even without deoptimization, because the property
+ // cannot be deleted or reconfigured to an accessor/interceptor property.
+ if (property_details.IsConfigurable()) {
+ // With deoptimization support, we can lower stores even to configurable
+ // data properties on the global object, by adding a code dependency on
+ // the cell.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ dependencies()->AssumePropertyCell(property_cell);
+ }
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForPropertyCellValue()),
+ jsgraph()->HeapConstant(property_cell), value, effect, control);
+ break;
+ }
+ }
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
+ Handle<Name> name, ScriptContextTableLookupResult* result) {
+ if (!name->IsString()) return false;
+ ScriptContextTable::LookupResult lookup_result;
+ if (!ScriptContextTable::Lookup(script_context_table(),
+ Handle<String>::cast(name), &lookup_result)) {
+ return false;
+ }
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ script_context_table(), lookup_result.context_index);
+ result->context = script_context;
+ result->immutable = IsImmutableVariableMode(lookup_result.mode);
+ result->index = lookup_result.slot_index;
+ return true;
+}
+
+
+Graph* JSGlobalObjectSpecialization::graph() const {
+ return jsgraph()->graph();
+}
+
+
+Isolate* JSGlobalObjectSpecialization::isolate() const {
+ return jsgraph()->isolate();
+}
+
+
+CommonOperatorBuilder* JSGlobalObjectSpecialization::common() const {
+ return jsgraph()->common();
+}
+
+
+JSOperatorBuilder* JSGlobalObjectSpecialization::javascript() const {
+ return jsgraph()->javascript();
+}
+
+
+SimplifiedOperatorBuilder* JSGlobalObjectSpecialization::simplified() const {
+ return jsgraph()->simplified();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-global-object-specialization.h b/deps/v8/src/compiler/js-global-object-specialization.h
new file mode 100644
index 0000000000..49b4114676
--- /dev/null
+++ b/deps/v8/src/compiler/js-global-object-specialization.h
@@ -0,0 +1,84 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class ScriptContextTable;
+class TypeCache;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Specializes a given JSGraph to a given global object, potentially constant
+// folding some {JSLoadGlobal} nodes or strength reducing some {JSStoreGlobal}
+// nodes.
+class JSGlobalObjectSpecialization final : public AdvancedReducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+ Handle<JSGlobalObject> global_object,
+ CompilationDependencies* dependencies);
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceJSLoadGlobal(Node* node);
+ Reduction ReduceJSStoreGlobal(Node* node);
+
+ struct ScriptContextTableLookupResult;
+ bool LookupInScriptContextTable(Handle<Name> name,
+ ScriptContextTableLookupResult* result);
+
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ Flags flags() const { return flags_; }
+ Handle<JSGlobalObject> global_object() const { return global_object_; }
+ Handle<ScriptContextTable> script_context_table() const {
+ return script_context_table_;
+ }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+
+ JSGraph* const jsgraph_;
+ Flags const flags_;
+ Handle<JSGlobalObject> global_object_;
+ Handle<ScriptContextTable> script_context_table_;
+ CompilationDependencies* const dependencies_;
+ TypeCache const& type_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSGlobalObjectSpecialization);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSGlobalObjectSpecialization::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 9b6b187c6a..782236fe0c 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -11,8 +11,9 @@ namespace v8 {
namespace internal {
namespace compiler {
-Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> object) {
- return graph()->NewNode(common()->HeapConstant(object));
+Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> value) {
+ // TODO(bmeurer): Flatten cons strings here before we canonicalize them?
+ return graph()->NewNode(common()->HeapConstant(value));
}
@@ -29,6 +30,12 @@ Node* JSGraph::CEntryStubConstant(int result_size) {
}
+Node* JSGraph::EmptyFixedArrayConstant() {
+ return CACHED(kEmptyFixedArrayConstant,
+ ImmovableHeapConstant(factory()->empty_fixed_array()));
+}
+
+
Node* JSGraph::UndefinedConstant() {
return CACHED(kUndefinedConstant,
ImmovableHeapConstant(factory()->undefined_value()));
@@ -78,7 +85,7 @@ Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
// TODO(titzer): We could also match against the addresses of immortable
// immovables here, even without access to the heap, thus always
// canonicalizing references to them.
- return graph()->NewNode(common()->HeapConstant(value));
+ return ImmovableHeapConstant(value);
}
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 4f23773259..16760a5a9d 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -17,19 +17,22 @@ namespace v8 {
namespace internal {
namespace compiler {
+class SimplifiedOperatorBuilder;
class Typer;
// Implements a facade on a Graph, enhancing the graph with JS-specific
-// notions, including a builder for for JS* operators, canonicalized global
+// notions, including various builders for operators, canonicalized global
// constants, and various helper methods.
class JSGraph : public ZoneObject {
public:
JSGraph(Isolate* isolate, Graph* graph, CommonOperatorBuilder* common,
- JSOperatorBuilder* javascript, MachineOperatorBuilder* machine)
+ JSOperatorBuilder* javascript, SimplifiedOperatorBuilder* simplified,
+ MachineOperatorBuilder* machine)
: isolate_(isolate),
graph_(graph),
common_(common),
javascript_(javascript),
+ simplified_(simplified),
machine_(machine),
cache_(zone()) {
for (int i = 0; i < kNumCachedNodes; i++) cached_nodes_[i] = nullptr;
@@ -37,6 +40,7 @@ class JSGraph : public ZoneObject {
// Canonicalized global constants.
Node* CEntryStubConstant(int result_size);
+ Node* EmptyFixedArrayConstant();
Node* UndefinedConstant();
Node* TheHoleConstant();
Node* TrueConstant();
@@ -117,8 +121,9 @@ class JSGraph : public ZoneObject {
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
- JSOperatorBuilder* javascript() const { return javascript_; }
CommonOperatorBuilder* common() const { return common_; }
+ JSOperatorBuilder* javascript() const { return javascript_; }
+ SimplifiedOperatorBuilder* simplified() const { return simplified_; }
MachineOperatorBuilder* machine() const { return machine_; }
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
@@ -130,6 +135,7 @@ class JSGraph : public ZoneObject {
private:
enum CachedNode {
kCEntryStubConstant,
+ kEmptyFixedArrayConstant,
kUndefinedConstant,
kTheHoleConstant,
kTrueConstant,
@@ -147,6 +153,7 @@ class JSGraph : public ZoneObject {
Graph* graph_;
CommonOperatorBuilder* common_;
JSOperatorBuilder* javascript_;
+ SimplifiedOperatorBuilder* simplified_;
MachineOperatorBuilder* machine_;
CommonNodeCache cache_;
Node* cached_nodes_[kNumCachedNodes];
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
new file mode 100644
index 0000000000..ec00e9bde4
--- /dev/null
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -0,0 +1,125 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-inlining-heuristic.h"
+
+#include "src/compiler.h"
+#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction JSInliningHeuristic::Reduce(Node* node) {
+ if (node->opcode() != IrOpcode::kJSCallFunction) return NoChange();
+
+ // Check if we already saw that {node} before, and if so, just skip it.
+ if (seen_.find(node->id()) != seen_.end()) return NoChange();
+ seen_.insert(node->id());
+
+ Node* callee = node->InputAt(0);
+ HeapObjectMatcher match(callee);
+ if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+
+ // Functions marked with %SetForceInlineFlag are immediately inlined.
+ if (function->shared()->force_inline()) {
+ return inliner_.ReduceJSCallFunction(node, function);
+ }
+
+ // Handling of special inlining modes right away:
+ // - For restricted inlining: stop all handling at this point.
+ // - For stressing inlining: immediately handle all functions.
+ switch (mode_) {
+ case kRestrictedInlining:
+ return NoChange();
+ case kStressInlining:
+ return inliner_.ReduceJSCallFunction(node, function);
+ case kGeneralInlining:
+ break;
+ }
+
+ // ---------------------------------------------------------------------------
+ // Everything below this line is part of the inlining heuristic.
+ // ---------------------------------------------------------------------------
+
+ // Built-in functions are handled by the JSBuiltinReducer.
+ if (function->shared()->HasBuiltinFunctionId()) return NoChange();
+
+ // Quick check on source code length to avoid parsing large candidate.
+ if (function->shared()->SourceSize() > FLAG_max_inlined_source_size) {
+ return NoChange();
+ }
+
+ // Quick check on the size of the AST to avoid parsing large candidate.
+ if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
+ return NoChange();
+ }
+
+ // Avoid inlining within or across the boundary of asm.js code.
+ if (info_->shared_info()->asm_function()) return NoChange();
+ if (function->shared()->asm_function()) return NoChange();
+
+ // Stop inlinining once the maximum allowed level is reached.
+ int level = 0;
+ for (Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ frame_state->opcode() == IrOpcode::kFrameState;
+ frame_state = NodeProperties::GetFrameStateInput(frame_state, 0)) {
+ if (++level > FLAG_max_inlining_levels) return NoChange();
+ }
+
+ // Gather feedback on how often this call site has been hit before.
+ CallFunctionParameters p = CallFunctionParametersOf(node->op());
+ int calls = -1; // Same default as CallICNexus::ExtractCallCount.
+ if (p.feedback().IsValid()) {
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ calls = nexus.ExtractCallCount();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Everything above this line is part of the inlining heuristic.
+ // ---------------------------------------------------------------------------
+
+ // In the general case we remember the candidate for later.
+ candidates_.insert({function, node, calls});
+ return NoChange();
+}
+
+
+void JSInliningHeuristic::Finalize() {
+ if (candidates_.empty()) return; // Nothing to do without candidates.
+ if (FLAG_trace_turbo_inlining) PrintCandidates();
+
+ while (!candidates_.empty()) {
+ if (cumulative_count_ > FLAG_max_inlined_nodes_cumulative) break;
+ auto i = candidates_.begin();
+ Candidate const& candidate = *i;
+ inliner_.ReduceJSCallFunction(candidate.node, candidate.function);
+ cumulative_count_ += candidate.function->shared()->ast_node_count();
+ candidates_.erase(i);
+ }
+}
+
+
+bool JSInliningHeuristic::CandidateCompare::operator()(
+ const Candidate& left, const Candidate& right) const {
+ return left.node != right.node && left.calls >= right.calls;
+}
+
+
+void JSInliningHeuristic::PrintCandidates() {
+ PrintF("Candidates for inlining (size=%zu):\n", candidates_.size());
+ for (const Candidate& candidate : candidates_) {
+ PrintF(" id:%d, calls:%d, size[source]:%d, size[ast]:%d / %s\n",
+ candidate.node->id(), candidate.calls,
+ candidate.function->shared()->SourceSize(),
+ candidate.function->shared()->ast_node_count(),
+ candidate.function->shared()->DebugName()->ToCString().get());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
new file mode 100644
index 0000000000..7f577475bf
--- /dev/null
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -0,0 +1,62 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_INLINING_HEURISTIC_H_
+#define V8_COMPILER_JS_INLINING_HEURISTIC_H_
+
+#include "src/compiler/js-inlining.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSInliningHeuristic final : public AdvancedReducer {
+ public:
+ enum Mode { kGeneralInlining, kRestrictedInlining, kStressInlining };
+ JSInliningHeuristic(Editor* editor, Mode mode, Zone* local_zone,
+ CompilationInfo* info, JSGraph* jsgraph)
+ : AdvancedReducer(editor),
+ mode_(mode),
+ inliner_(editor, local_zone, info, jsgraph),
+ candidates_(local_zone),
+ seen_(local_zone),
+ info_(info) {}
+
+ Reduction Reduce(Node* node) final;
+
+ // Processes the list of candidates gathered while the reducer was running,
+ // and inlines call sites that the heuristic determines to be important.
+ void Finalize() final;
+
+ private:
+ struct Candidate {
+ Handle<JSFunction> function; // The call target being inlined.
+ Node* node; // The call site at which to inline.
+ int calls; // Number of times the call site was hit.
+ };
+
+ // Comparator for candidates.
+ struct CandidateCompare {
+ bool operator()(const Candidate& left, const Candidate& right) const;
+ };
+
+ // Candidates are kept in a sorted set of unique candidates.
+ typedef ZoneSet<Candidate, CandidateCompare> Candidates;
+
+ // Dumps candidates to console.
+ void PrintCandidates();
+
+ Mode const mode_;
+ JSInliner inliner_;
+ Candidates candidates_;
+ ZoneSet<NodeId> seen_;
+ CompilationInfo* info_;
+ int cumulative_count_ = 0;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_INLINING_HEURISTIC_H_
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 0b7c78979c..f041698ab9 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -10,11 +10,15 @@
#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/dead-code-elimination.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-global-object-specialization.h"
+#include "src/compiler/js-native-context-specialization.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/rewriter.h"
@@ -53,7 +57,12 @@ class JSCallFunctionAccessor {
return value_inputs - 2;
}
- Node* frame_state() { return NodeProperties::GetFrameStateInput(call_, 0); }
+ Node* frame_state_before() {
+ return NodeProperties::GetFrameStateInput(call_, 1);
+ }
+ Node* frame_state_after() {
+ return NodeProperties::GetFrameStateInput(call_, 0);
+ }
private:
Node* call_;
@@ -125,7 +134,9 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
Node* control = NodeProperties::GetControlInput(call);
Node* effect = NodeProperties::GetEffectInput(call);
- // Context is last argument.
+ int const inlinee_arity_index =
+ static_cast<int>(start->op()->ValueOutputCount()) - 2;
+ // Context is last parameter.
int const inlinee_context_index =
static_cast<int>(start->op()->ValueOutputCount()) - 1;
@@ -139,10 +150,13 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
case IrOpcode::kParameter: {
int index = 1 + ParameterIndexOf(use->op());
DCHECK_LE(index, inlinee_context_index);
- if (index < inliner_inputs && index < inlinee_context_index) {
+ if (index < inliner_inputs && index < inlinee_arity_index) {
// There is an input from the call, and the index is a value
// projection but not the context, so rewire the input.
Replace(use, call->InputAt(index));
+ } else if (index == inlinee_arity_index) {
+ // The projection is requesting the number of arguments.
+ Replace(use, jsgraph_->Int32Constant(inliner_inputs - 2));
} else if (index == inlinee_context_index) {
// The projection is requesting the inlinee function context.
Replace(use, context);
@@ -214,8 +228,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
Node* JSInliner::CreateArgumentsAdaptorFrameState(
- JSCallFunctionAccessor* call, Handle<SharedFunctionInfo> shared_info,
- Zone* temp_zone) {
+ JSCallFunctionAccessor* call, Handle<SharedFunctionInfo> shared_info) {
const FrameStateFunctionInfo* state_info =
jsgraph_->common()->CreateFrameStateFunctionInfo(
FrameStateType::kArgumentsAdaptor,
@@ -226,7 +239,7 @@ Node* JSInliner::CreateArgumentsAdaptorFrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
const Operator* op0 = jsgraph_->common()->StateValues(0);
Node* node0 = jsgraph_->graph()->NewNode(op0);
- NodeVector params(temp_zone);
+ NodeVector params(local_zone_);
params.push_back(call->receiver());
for (size_t argument = 0; argument != call->formal_arguments(); ++argument) {
params.push_back(call->formal_argument(argument));
@@ -235,9 +248,9 @@ Node* JSInliner::CreateArgumentsAdaptorFrameState(
jsgraph_->common()->StateValues(static_cast<int>(params.size()));
Node* params_node = jsgraph_->graph()->NewNode(
op_param, static_cast<int>(params.size()), &params.front());
- return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
- jsgraph_->UndefinedConstant(),
- call->jsfunction(), call->frame_state());
+ return jsgraph_->graph()->NewNode(
+ op, params_node, node0, node0, jsgraph_->UndefinedConstant(),
+ call->jsfunction(), call->frame_state_after());
}
@@ -246,11 +259,32 @@ Reduction JSInliner::Reduce(Node* node) {
JSCallFunctionAccessor call(node);
HeapObjectMatcher match(call.jsfunction());
- if (!match.HasValue()) return NoChange();
-
- if (!match.Value()->IsJSFunction()) return NoChange();
+ if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
- if (mode_ == kRestrictedInlining && !function->shared()->force_inline()) {
+
+ return ReduceJSCallFunction(node, function);
+}
+
+
+Reduction JSInliner::ReduceJSCallFunction(Node* node,
+ Handle<JSFunction> function) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ JSCallFunctionAccessor call(node);
+
+ if (!function->shared()->IsInlineable()) {
+ // Function must be inlineable.
+ TRACE("Not inlining %s into %s because callee is not inlineable\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
+ // Class constructors are callable, but [[Call]] will raise an exception.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
+ if (IsClassConstructor(function->shared()->kind())) {
+ TRACE("Not inlining %s into %s because callee is classConstructor\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
@@ -281,7 +315,7 @@ Reduction JSInliner::Reduce(Node* node) {
// TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
// not inlining recursive functions. We might want to relax that at some
// point.
- for (Node* frame_state = call.frame_state();
+ for (Node* frame_state = call.frame_state_after();
frame_state->opcode() == IrOpcode::kFrameState;
frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
FrameStateInfo const& info = OpParameter<FrameStateInfo>(frame_state);
@@ -295,10 +329,23 @@ Reduction JSInliner::Reduce(Node* node) {
}
}
+ // TODO(turbofan): Inlining into a try-block is not yet supported.
+ if (NodeProperties::IsExceptionalCall(node)) {
+ TRACE("Not inlining %s into %s because of surrounding try-block\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
- if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
+ if (info_->is_deoptimization_enabled()) {
+ info.MarkAsDeoptimizationEnabled();
+ }
+ if (info_->is_native_context_specializing()) {
+ info.MarkAsNativeContextSpecializing();
+ }
if (!Compiler::ParseAndAnalyze(info.parse_info())) {
TRACE("Not inlining %s into %s because parsing failed\n",
@@ -310,51 +357,111 @@ Reduction JSInliner::Reduce(Node* node) {
return NoChange();
}
+ // In strong mode, in case of too few arguments we need to throw a TypeError
+ // so we must not inline this call.
+ size_t parameter_count = info.literal()->parameter_count();
+ if (is_strong(info.language_mode()) &&
+ call.formal_arguments() < parameter_count) {
+ TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
if (!Compiler::EnsureDeoptimizationSupport(&info)) {
TRACE("Not inlining %s into %s because deoptimization support failed\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
+ // Remember that we inlined this function. This needs to be called right
+ // after we ensure deoptimization support so that the code flusher
+ // does not remove the code with the deoptimization support.
+ info_->AddInlinedFunction(info.shared_info());
+
+ // ----------------------------------------------------------------
+ // After this point, we've made a decision to inline this function.
+ // We shall not bailout from inlining if we got here.
TRACE("Inlining %s into %s\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
- Graph graph(info.zone());
+ // TODO(mstarzinger): We could use the temporary zone for the graph because
+ // nodes are copied. This however leads to Zone-Types being allocated in the
+ // wrong zone and makes the engine explode at high speeds. Explosion bad!
+ Graph graph(jsgraph_->zone());
JSGraph jsgraph(info.isolate(), &graph, jsgraph_->common(),
- jsgraph_->javascript(), jsgraph_->machine());
+ jsgraph_->javascript(), jsgraph_->simplified(),
+ jsgraph_->machine());
AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
graph_builder.CreateGraph(false);
+ // TODO(mstarzinger): Unify this with the Pipeline once JSInliner refactoring
+ // starts.
+ if (info.is_native_context_specializing()) {
+ GraphReducer graph_reducer(local_zone_, &graph, jsgraph.Dead());
+ DeadCodeElimination dead_code_elimination(&graph_reducer, &graph,
+ jsgraph.common());
+ CommonOperatorReducer common_reducer(&graph_reducer, &graph,
+ jsgraph.common(), jsgraph.machine());
+ JSGlobalObjectSpecialization global_object_specialization(
+ &graph_reducer, &jsgraph,
+ info.is_deoptimization_enabled()
+ ? JSGlobalObjectSpecialization::kDeoptimizationEnabled
+ : JSGlobalObjectSpecialization::kNoFlags,
+ handle(info.global_object(), info.isolate()), info_->dependencies());
+ JSNativeContextSpecialization native_context_specialization(
+ &graph_reducer, &jsgraph,
+ info.is_deoptimization_enabled()
+ ? JSNativeContextSpecialization::kDeoptimizationEnabled
+ : JSNativeContextSpecialization::kNoFlags,
+ handle(info.global_object()->native_context(), info.isolate()),
+ info_->dependencies(), local_zone_);
+ graph_reducer.AddReducer(&dead_code_elimination);
+ graph_reducer.AddReducer(&common_reducer);
+ graph_reducer.AddReducer(&global_object_specialization);
+ graph_reducer.AddReducer(&native_context_specialization);
+ graph_reducer.ReduceGraph();
+ }
+
// The inlinee specializes to the context from the JSFunction object.
// TODO(turbofan): We might want to load the context from the JSFunction at
// runtime in case we only know the SharedFunctionInfo once we have dynamic
// type feedback in the compiler.
Node* context = jsgraph_->Constant(handle(function->context()));
- CopyVisitor visitor(&graph, jsgraph_->graph(), info.zone());
+ CopyVisitor visitor(&graph, jsgraph_->graph(), &zone);
visitor.CopyGraph();
Node* start = visitor.GetCopy(graph.start());
Node* end = visitor.GetCopy(graph.end());
-
- Node* frame_state = call.frame_state();
- size_t const inlinee_formal_parameters = start->op()->ValueOutputCount() - 3;
- // Insert argument adaptor frame if required.
- if (call.formal_arguments() != inlinee_formal_parameters) {
- // In strong mode, in case of too few arguments we need to throw a
- // TypeError so we must not inline this call.
- if (is_strong(info.language_mode()) &&
- call.formal_arguments() < inlinee_formal_parameters) {
- return NoChange();
- }
- frame_state = CreateArgumentsAdaptorFrameState(&call, info.shared_info(),
- info.zone());
+ Node* frame_state = call.frame_state_after();
+
+ // Insert a JSConvertReceiver node for sloppy callees. Note that the context
+ // passed into this node has to be the callees context (loaded above). Note
+ // that the frame state passed to the JSConvertReceiver must be the frame
+ // state _before_ the call; it is not necessary to fiddle with the receiver
+ // in that frame state tho, as the conversion of the receiver can be repeated
+ // any number of times, it's not observable.
+ if (is_sloppy(info.language_mode()) && !function->shared()->native()) {
+ const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* convert = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->ConvertReceiver(p.convert_mode()),
+ call.receiver(), context, call.frame_state_before(), effect, start);
+ NodeProperties::ReplaceValueInput(node, convert, 1);
+ NodeProperties::ReplaceEffectInput(node, convert);
}
- // Remember that we inlined this function.
- info_->AddInlinedFunction(info.shared_info());
+ // Insert argument adaptor frame if required. The callees formal parameter
+ // count (i.e. value outputs of start node minus target, receiver, num args
+ // and context) have to match the number of arguments passed to the call.
+ DCHECK_EQ(static_cast<int>(parameter_count),
+ start->op()->ValueOutputCount() - 4);
+ if (call.formal_arguments() != parameter_count) {
+ frame_state = CreateArgumentsAdaptorFrameState(&call, info.shared_info());
+ }
return InlineCall(node, context, frame_state, start, end);
}
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 21057e61e4..2ad49842f5 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -19,29 +19,32 @@ namespace compiler {
// Forward declarations.
class JSCallFunctionAccessor;
+// The JSInliner provides the core graph inlining machinery. Note that this
+// class only deals with the mechanics of how to inline one graph into another,
+// heuristics that decide what and how much to inline are beyond its scope.
class JSInliner final : public AdvancedReducer {
public:
- enum Mode { kRestrictedInlining, kGeneralInlining };
-
- JSInliner(Editor* editor, Mode mode, Zone* local_zone, CompilationInfo* info,
+ JSInliner(Editor* editor, Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph)
: AdvancedReducer(editor),
- mode_(mode),
local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph) {}
+ // Reducer interface, eagerly inlines everything.
Reduction Reduce(Node* node) final;
+ // Can be used by inlining heuristics or by testing code directly, without
+ // using the above generic reducer interface of the inlining machinery.
+ Reduction ReduceJSCallFunction(Node* node, Handle<JSFunction> function);
+
private:
- Mode const mode_;
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
- Node* CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
- Handle<SharedFunctionInfo> shared_info,
- Zone* temp_zone);
+ Node* CreateArgumentsAdaptorFrameState(
+ JSCallFunctionAccessor* call, Handle<SharedFunctionInfo> shared_info);
Reduction InlineCall(Node* call, Node* context, Node* frame_state,
Node* start, Node* end);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 219a452a7d..379f8b7490 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -6,13 +6,16 @@
#include <stack>
+#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/counters.h"
#include "src/objects-inl.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -23,7 +26,7 @@ JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
: AdvancedReducer(editor),
jsgraph_(jsgraph),
mode_(mode),
- simplified_(jsgraph->zone()) {}
+ type_cache_(TypeCache::Get()) {}
Reduction JSIntrinsicLowering::Reduce(Node* node) {
@@ -60,8 +63,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsSmi(node);
case Runtime::kInlineJSValueGetValue:
return ReduceJSValueGetValue(node);
- case Runtime::kInlineLikely:
- return ReduceUnLikely(node, BranchHint::kTrue);
case Runtime::kInlineMapGetInstanceType:
return ReduceMapGetInstanceType(node);
case Runtime::kInlineMathClz32:
@@ -80,8 +81,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceSeqStringGetChar(node, String::TWO_BYTE_ENCODING);
case Runtime::kInlineTwoByteSeqStringSetChar:
return ReduceSeqStringSetChar(node, String::TWO_BYTE_ENCODING);
- case Runtime::kInlineUnlikely:
- return ReduceUnLikely(node, BranchHint::kFalse);
case Runtime::kInlineValueOf:
return ReduceValueOf(node);
case Runtime::kInlineIsMinusZero:
@@ -94,12 +93,24 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceGetTypeFeedbackVector(node);
case Runtime::kInlineGetCallerJSFunction:
return ReduceGetCallerJSFunction(node);
+ case Runtime::kInlineToInteger:
+ return ReduceToInteger(node);
+ case Runtime::kInlineToLength:
+ return ReduceToLength(node);
+ case Runtime::kInlineToName:
+ return ReduceToName(node);
+ case Runtime::kInlineToNumber:
+ return ReduceToNumber(node);
case Runtime::kInlineToObject:
return ReduceToObject(node);
+ case Runtime::kInlineToPrimitive:
+ return ReduceToPrimitive(node);
+ case Runtime::kInlineToString:
+ return ReduceToString(node);
case Runtime::kInlineThrowNotDateError:
return ReduceThrowNotDateError(node);
- case Runtime::kInlineCallFunction:
- return ReduceCallFunction(node);
+ case Runtime::kInlineCall:
+ return ReduceCall(node);
default:
break;
}
@@ -321,35 +332,11 @@ Reduction JSIntrinsicLowering::ReduceStringGetLength(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- return Change(node, simplified()->LoadField(
- AccessBuilder::ForStringLength(graph()->zone())),
+ return Change(node, simplified()->LoadField(AccessBuilder::ForStringLength()),
value, effect, control);
}
-Reduction JSIntrinsicLowering::ReduceUnLikely(Node* node, BranchHint hint) {
- std::stack<Node*> nodes_to_visit;
- nodes_to_visit.push(node);
- while (!nodes_to_visit.empty()) {
- Node* current = nodes_to_visit.top();
- nodes_to_visit.pop();
- for (Node* use : current->uses()) {
- if (use->opcode() == IrOpcode::kJSToBoolean) {
- // We have to "look through" ToBoolean calls.
- nodes_to_visit.push(use);
- } else if (use->opcode() == IrOpcode::kBranch) {
- // Actually set the hint on any branch using the intrinsic node.
- NodeProperties::ChangeOp(use, common()->Branch(hint));
- }
- }
- }
- // Apart from adding hints to branchs nodes, this is the identity function.
- Node* value = NodeProperties::GetValueInput(node, 0);
- ReplaceWithValue(node, value);
- return Changed(value);
-}
-
-
Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
// if (%_IsSmi(value)) {
// return value;
@@ -527,25 +514,101 @@ Reduction JSIntrinsicLowering::ReduceThrowNotDateError(Node* node) {
}
+Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToName(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToName());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToNumber(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToNumber());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
+ if (value_type->Max() <= 0.0) {
+ value = jsgraph()->ZeroConstant();
+ } else if (value_type->Min() >= kMaxSafeInteger) {
+ value = jsgraph()->Constant(kMaxSafeInteger);
+ } else {
+ if (value_type->Min() <= 0.0) {
+ value = graph()->NewNode(
+ common()->Select(kMachAnyTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), value,
+ jsgraph()->ZeroConstant()),
+ jsgraph()->ZeroConstant(), value);
+ value_type = Type::Range(0.0, value_type->Max(), graph()->zone());
+ NodeProperties::SetType(value, value_type);
+ }
+ if (value_type->Max() > kMaxSafeInteger) {
+ value = graph()->NewNode(
+ common()->Select(kMachAnyTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->Constant(kMaxSafeInteger), value),
+ jsgraph()->Constant(kMaxSafeInteger), value);
+ value_type =
+ Type::Range(value_type->Min(), kMaxSafeInteger, graph()->zone());
+ NodeProperties::SetType(value, value_type);
+ }
+ }
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Callable callable = CodeFactory::ToLength(isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+}
+
+
Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
NodeProperties::ChangeOp(node, javascript()->ToObject());
return Changed(node);
}
-Reduction JSIntrinsicLowering::ReduceCallFunction(Node* node) {
- CallRuntimeParameters params = OpParameter<CallRuntimeParameters>(node->op());
- size_t arity = params.arity();
- Node* function = node->InputAt(static_cast<int>(arity - 1));
- while (--arity != 0) {
- node->ReplaceInput(static_cast<int>(arity),
- node->InputAt(static_cast<int>(arity - 1)));
+Reduction JSIntrinsicLowering::ReduceToPrimitive(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ if (value_type->Is(Type::Primitive())) {
+ ReplaceWithValue(node, value);
+ return Replace(value);
}
- node->ReplaceInput(0, function);
+ return NoChange();
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToString());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
+ size_t const arity = CallRuntimeParametersOf(node->op()).arity();
NodeProperties::ChangeOp(
- node,
- javascript()->CallFunction(params.arity(), NO_CALL_FUNCTION_FLAGS, STRICT,
- VectorSlotPair(), ALLOW_TAIL_CALLS));
+ node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
+ ConvertReceiverMode::kAny,
+ TailCallMode::kAllow));
return Changed(node);
}
@@ -595,6 +658,9 @@ Reduction JSIntrinsicLowering::ChangeToUndefined(Node* node, Node* effect) {
Graph* JSIntrinsicLowering::graph() const { return jsgraph()->graph(); }
+Isolate* JSIntrinsicLowering::isolate() const { return jsgraph()->isolate(); }
+
+
CommonOperatorBuilder* JSIntrinsicLowering::common() const {
return jsgraph()->common();
}
@@ -608,6 +674,11 @@ MachineOperatorBuilder* JSIntrinsicLowering::machine() const {
return jsgraph()->machine();
}
+
+SimplifiedOperatorBuilder* JSIntrinsicLowering::simplified() const {
+ return jsgraph()->simplified();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 15e9b4053e..8989ba19a1 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -7,10 +7,14 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
+
namespace compiler {
// Forward declarations.
@@ -18,6 +22,7 @@ class CommonOperatorBuilder;
class JSOperatorBuilder;
class JSGraph;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
// Lowers certain JS-level runtime calls.
@@ -50,15 +55,20 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceSeqStringGetChar(Node* node, String::Encoding encoding);
Reduction ReduceSeqStringSetChar(Node* node, String::Encoding encoding);
Reduction ReduceStringGetLength(Node* node);
- Reduction ReduceUnLikely(Node* node, BranchHint hint);
Reduction ReduceValueOf(Node* node);
Reduction ReduceFixedArrayGet(Node* node);
Reduction ReduceFixedArraySet(Node* node);
Reduction ReduceGetTypeFeedbackVector(Node* node);
Reduction ReduceGetCallerJSFunction(Node* node);
Reduction ReduceThrowNotDateError(Node* node);
+ Reduction ReduceToInteger(Node* node);
+ Reduction ReduceToLength(Node* node);
+ Reduction ReduceToName(Node* node);
+ Reduction ReduceToNumber(Node* node);
Reduction ReduceToObject(Node* node);
- Reduction ReduceCallFunction(Node* node);
+ Reduction ReduceToPrimitive(Node* node);
+ Reduction ReduceToString(Node* node);
+ Reduction ReduceCall(Node* node);
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
@@ -69,15 +79,17 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
MachineOperatorBuilder* machine() const;
+ SimplifiedOperatorBuilder* simplified() const;
DeoptimizationMode mode() const { return mode_; }
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ TypeCache const& type_cache() const { return type_cache_; }
JSGraph* const jsgraph_;
DeoptimizationMode const mode_;
- SimplifiedOperatorBuilder simplified_;
+ TypeCache const& type_cache_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
new file mode 100644
index 0000000000..9e687bdc07
--- /dev/null
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -0,0 +1,898 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-native-context-specialization.h"
+
+#include "src/accessors.h"
+#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/field-index-inl.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-cache.h"
+#include "src/type-feedback-vector.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+JSNativeContextSpecialization::JSNativeContextSpecialization(
+ Editor* editor, JSGraph* jsgraph, Flags flags,
+ Handle<Context> native_context, CompilationDependencies* dependencies,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ flags_(flags),
+ native_context_(native_context),
+ dependencies_(dependencies),
+ zone_(zone),
+ type_cache_(TypeCache::Get()),
+ access_info_factory_(dependencies, native_context, graph()->zone()) {}
+
+
+Reduction JSNativeContextSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCallFunction:
+ return ReduceJSCallFunction(node);
+ case IrOpcode::kJSLoadNamed:
+ return ReduceJSLoadNamed(node);
+ case IrOpcode::kJSStoreNamed:
+ return ReduceJSStoreNamed(node);
+ case IrOpcode::kJSLoadProperty:
+ return ReduceJSLoadProperty(node);
+ case IrOpcode::kJSStoreProperty:
+ return ReduceJSStoreProperty(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSCallFunction(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Don't mess with JSCallFunction nodes that have a constant {target}.
+ if (HeapObjectMatcher(target).HasValue()) return NoChange();
+ if (!p.feedback().IsValid()) return NoChange();
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ Handle<Object> feedback(nexus.GetFeedback(), isolate());
+ if (feedback->IsWeakCell()) {
+ Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
+ if (cell->value()->IsJSFunction()) {
+ // Avoid cross-context leaks, meaning don't embed references to functions
+ // in other native contexts.
+ Handle<JSFunction> function(JSFunction::cast(cell->value()), isolate());
+ if (function->context()->native_context() != *native_context()) {
+ return NoChange();
+ }
+
+ // Check that the {target} is still the {target_function}.
+ Node* target_function = jsgraph()->HeapConstant(function);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
+ target, target_function);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
+ effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Specialize the JSCallFunction node to the {target_function}.
+ NodeProperties::ReplaceValueInput(node, target_function, 0);
+ NodeProperties::ReplaceControlInput(node, control);
+ return Changed(node);
+ }
+ // TODO(bmeurer): Also support optimizing bound functions and proxies here.
+ }
+ return NoChange();
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceNamedAccess(
+ Node* node, Node* value, MapHandleList const& receiver_maps,
+ Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
+ Node* index) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
+ node->opcode() == IrOpcode::kJSStoreNamed ||
+ node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Compute property access infos for the receiver maps.
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ if (!access_info_factory().ComputePropertyAccessInfos(
+ receiver_maps, name, access_mode, &access_infos)) {
+ return NoChange();
+ }
+
+ // Nothing to do if we have no non-deprecated maps.
+ if (access_infos.empty()) return NoChange();
+
+ // The final states for every polymorphic branch. We join them with
+ // Merge++Phi+EffectPhi at the bottom.
+ ZoneVector<Node*> values(zone());
+ ZoneVector<Node*> effects(zone());
+ ZoneVector<Node*> controls(zone());
+
+ // The list of "exiting" controls, which currently go to a single deoptimize.
+ // TODO(bmeurer): Consider using an IC as fallback.
+ Node* const exit_effect = effect;
+ ZoneVector<Node*> exit_controls(zone());
+
+ // Ensure that {index} matches the specified {name} (if {index} is given).
+ if (index != nullptr) {
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
+ index, jsgraph()->HeapConstant(name));
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+
+ // Ensure that {receiver} is a heap object.
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+ Node* receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+ Node* receiverissmi_effect = effect;
+
+ // Load the {receiver} map. The resulting effect is the dominating effect for
+ // all (polymorphic) branches.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+
+ // Generate code for the various different property access patterns.
+ Node* fallthrough_control = control;
+ for (PropertyAccessInfo const& access_info : access_infos) {
+ Node* this_value = value;
+ Node* this_receiver = receiver;
+ Node* this_effect = effect;
+ Node* this_control;
+
+ // Perform map check on {receiver}.
+ Type* receiver_type = access_info.receiver_type();
+ if (receiver_type->Is(Type::String())) {
+ // Emit an instance type check for strings.
+ Node* receiver_instance_type = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ receiver_map, this_effect, fallthrough_control);
+ Node* check =
+ graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
+ jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ } else {
+ // Emit a (sequence of) map checks for other {receiver}s.
+ ZoneVector<Node*> this_controls(zone());
+ ZoneVector<Node*> this_effects(zone());
+ for (auto i = access_info.receiver_type()->Classes(); !i.Done();
+ i.Advance()) {
+ Handle<Map> map = i.Current();
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
+ receiver_map, jsgraph()->Constant(map));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(this_effect);
+ }
+
+ // The Number case requires special treatment to also deal with Smis.
+ if (receiver_type->Is(Type::Number())) {
+ // Join this check with the "receiver is smi" check above, and mark the
+ // "receiver is smi" check as "consumed" so that we don't deoptimize if
+ // the {receiver} is actually a Smi.
+ if (receiverissmi_control != nullptr) {
+ this_controls.push_back(receiverissmi_control);
+ this_effects.push_back(receiverissmi_effect);
+ receiverissmi_control = receiverissmi_effect = nullptr;
+ }
+ }
+
+ // Create dominating Merge+EffectPhi for this {receiver} type.
+ int const this_control_count = static_cast<int>(this_controls.size());
+ this_control =
+ (this_control_count == 1)
+ ? this_controls.front()
+ : graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count, &this_controls.front());
+ this_effects.push_back(this_control);
+ int const this_effect_count = static_cast<int>(this_effects.size());
+ this_effect =
+ (this_control_count == 1)
+ ? this_effects.front()
+ : graph()->NewNode(common()->EffectPhi(this_control_count),
+ this_effect_count, &this_effects.front());
+ }
+
+ // Determine actual holder and perform prototype chain checks.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ AssumePrototypesStable(receiver_type, holder);
+ }
+
+ // Generate the actual property access.
+ if (access_info.IsNotFound()) {
+ DCHECK_EQ(AccessMode::kLoad, access_mode);
+ if (is_strong(language_mode)) {
+ // TODO(bmeurer/mstarzinger): Add support for lowering inside try
+ // blocks rewiring the IfException edge to a runtime call/throw.
+ exit_controls.push_back(this_control);
+ continue;
+ } else {
+ this_value = jsgraph()->UndefinedConstant();
+ }
+ } else if (access_info.IsDataConstant()) {
+ this_value = jsgraph()->Constant(access_info.constant());
+ if (access_mode == AccessMode::kStore) {
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+ } else {
+ DCHECK(access_info.IsDataField());
+ FieldIndex const field_index = access_info.field_index();
+ Type* const field_type = access_info.field_type();
+ if (access_mode == AccessMode::kLoad &&
+ access_info.holder().ToHandle(&holder)) {
+ this_receiver = jsgraph()->Constant(holder);
+ }
+ Node* this_storage = this_receiver;
+ if (!field_index.is_inobject()) {
+ this_storage = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
+ this_storage, this_effect, this_control);
+ }
+ FieldAccess field_access = {kTaggedBase, field_index.offset(), name,
+ field_type, kMachAnyTagged};
+ if (access_mode == AccessMode::kLoad) {
+ if (field_type->Is(Type::UntaggedFloat64())) {
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ this_storage = this_effect =
+ graph()->NewNode(simplified()->LoadField(field_access),
+ this_storage, this_effect, this_control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ }
+ field_access.machine_type = kMachFloat64;
+ }
+ this_value = this_effect =
+ graph()->NewNode(simplified()->LoadField(field_access),
+ this_storage, this_effect, this_control);
+ } else {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ if (field_type->Is(Type::UntaggedFloat64())) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(
+ graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(Type::Number()),
+ this_value, this_control);
+
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ if (access_info.HasTransitionMap()) {
+ // Allocate a MutableHeapNumber for the new property.
+ Callable callable =
+ CodeFactory::AllocateMutableHeapNumber(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ Node* this_box = this_effect = graph()->NewNode(
+ common()->Call(desc),
+ jsgraph()->HeapConstant(callable.code()),
+ jsgraph()->NoContextConstant(), this_effect, this_control);
+ this_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+ this_box, this_value, this_effect, this_control);
+ this_value = this_box;
+
+ field_access.type = Type::TaggedPointer();
+ } else {
+ // We just store directly to the MutableHeapNumber.
+ this_storage = this_effect =
+ graph()->NewNode(simplified()->LoadField(field_access),
+ this_storage, this_effect, this_control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ field_access.machine_type = kMachFloat64;
+ }
+ } else {
+ // Unboxed double field, we store directly to the field.
+ field_access.machine_type = kMachFloat64;
+ }
+ } else if (field_type->Is(Type::TaggedSigned())) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(
+ graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ } else if (field_type->Is(Type::TaggedPointer())) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ if (field_type->NumClasses() > 0) {
+ // Emit a (sequence of) map checks for the value.
+ ZoneVector<Node*> this_controls(zone());
+ Node* this_value_map = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), this_value,
+ this_effect, this_control);
+ for (auto i = field_type->Classes(); !i.Done(); i.Advance()) {
+ Handle<Map> field_map(i.Current());
+ check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Internal()),
+ this_value_map, jsgraph()->Constant(field_map));
+ branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_controls.push_back(
+ graph()->NewNode(common()->IfTrue(), branch));
+ }
+ exit_controls.push_back(this_control);
+ int const this_control_count =
+ static_cast<int>(this_controls.size());
+ this_control =
+ (this_control_count == 1)
+ ? this_controls.front()
+ : graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count,
+ &this_controls.front());
+ }
+ } else {
+ DCHECK(field_type->Is(Type::Tagged()));
+ }
+ Handle<Map> transition_map;
+ if (access_info.transition_map().ToHandle(&transition_map)) {
+ this_effect = graph()->NewNode(common()->BeginRegion(), this_effect);
+ this_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), this_receiver,
+ jsgraph()->Constant(transition_map), this_effect, this_control);
+ }
+ this_effect = graph()->NewNode(simplified()->StoreField(field_access),
+ this_storage, this_value, this_effect,
+ this_control);
+ if (access_info.HasTransitionMap()) {
+ this_effect =
+ graph()->NewNode(common()->FinishRegion(),
+ jsgraph()->UndefinedConstant(), this_effect);
+ }
+ }
+ }
+
+ // Remember the final state for this property access.
+ values.push_back(this_value);
+ effects.push_back(this_effect);
+ controls.push_back(this_control);
+ }
+
+ // Collect the fallthrough control as final "exit" control.
+ if (fallthrough_control != control) {
+ // Mark the last fallthrough branch as deferred.
+ MarkAsDeferred(fallthrough_control);
+ }
+ exit_controls.push_back(fallthrough_control);
+
+ // Also collect the "receiver is smi" control if we didn't handle the case of
+ // Number primitives in the polymorphic branches above.
+ if (receiverissmi_control != nullptr) {
+ // Mark the "receiver is smi" case as deferred.
+ MarkAsDeferred(receiverissmi_control);
+ DCHECK_EQ(exit_effect, receiverissmi_effect);
+ exit_controls.push_back(receiverissmi_control);
+ }
+
+ // Generate the single "exit" point, where we get if either all map/instance
+ // type checks failed, or one of the assumptions inside one of the cases
+ // failes (i.e. failing prototype chain check).
+ // TODO(bmeurer): Consider falling back to IC here if deoptimization is
+ // disabled.
+ int const exit_control_count = static_cast<int>(exit_controls.size());
+ Node* exit_control =
+ (exit_control_count == 1)
+ ? exit_controls.front()
+ : graph()->NewNode(common()->Merge(exit_control_count),
+ exit_control_count, &exit_controls.front());
+ Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
+ exit_effect, exit_control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+
+ // Generate the final merge point for all (polymorphic) branches.
+ int const control_count = static_cast<int>(controls.size());
+ if (control_count == 0) {
+ value = effect = control = jsgraph()->Dead();
+ } else if (control_count == 1) {
+ value = values.front();
+ effect = effects.front();
+ control = controls.front();
+ } else {
+ control = graph()->NewNode(common()->Merge(control_count), control_count,
+ &controls.front());
+ values.push_back(control);
+ value = graph()->NewNode(common()->Phi(kMachAnyTagged, control_count),
+ control_count + 1, &values.front());
+ effects.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(control_count),
+ control_count + 1, &effects.front());
+ }
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
+ NamedAccess const& p = NamedAccessOf(node->op());
+ Node* const value = jsgraph()->Dead();
+
+ // Extract receiver maps from the LOAD_IC using the LoadICNexus.
+ MapHandleList receiver_maps;
+ if (!p.feedback().IsValid()) return NoChange();
+ LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Try to lower the named access based on the {receiver_maps}.
+ return ReduceNamedAccess(node, value, receiver_maps, p.name(),
+ AccessMode::kLoad, p.language_mode());
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
+ NamedAccess const& p = NamedAccessOf(node->op());
+ Node* const value = NodeProperties::GetValueInput(node, 1);
+
+ // Extract receiver maps from the STORE_IC using the StoreICNexus.
+ MapHandleList receiver_maps;
+ if (!p.feedback().IsValid()) return NoChange();
+ StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Try to lower the named access based on the {receiver_maps}.
+ return ReduceNamedAccess(node, value, receiver_maps, p.name(),
+ AccessMode::kStore, p.language_mode());
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceElementAccess(
+ Node* node, Node* index, Node* value, MapHandleList const& receiver_maps,
+ AccessMode access_mode, LanguageMode language_mode) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Compute element access infos for the receiver maps.
+ ZoneVector<ElementAccessInfo> access_infos(zone());
+ if (!access_info_factory().ComputeElementAccessInfos(
+ receiver_maps, access_mode, &access_infos)) {
+ return NoChange();
+ }
+
+ // Nothing to do if we have no non-deprecated maps.
+ if (access_infos.empty()) return NoChange();
+
+ // The final states for every polymorphic branch. We join them with
+ // Merge+Phi+EffectPhi at the bottom.
+ ZoneVector<Node*> values(zone());
+ ZoneVector<Node*> effects(zone());
+ ZoneVector<Node*> controls(zone());
+
+ // The list of "exiting" controls, which currently go to a single deoptimize.
+ // TODO(bmeurer): Consider using an IC as fallback.
+ Node* const exit_effect = effect;
+ ZoneVector<Node*> exit_controls(zone());
+
+ // Ensure that {receiver} is a heap object.
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ control = graph()->NewNode(common()->IfFalse(), branch);
+
+ // Load the {receiver} map. The resulting effect is the dominating effect for
+ // all (polymorphic) branches.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+
+ // Generate code for the various different element access patterns.
+ Node* fallthrough_control = control;
+ for (ElementAccessInfo const& access_info : access_infos) {
+ Node* this_receiver = receiver;
+ Node* this_value = value;
+ Node* this_index = index;
+ Node* this_effect = effect;
+ Node* this_control;
+
+ // Perform map check on {receiver}.
+ Type* receiver_type = access_info.receiver_type();
+ {
+ ZoneVector<Node*> this_controls(zone());
+ for (auto i = access_info.receiver_type()->Classes(); !i.Done();
+ i.Advance()) {
+ Handle<Map> map = i.Current();
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
+ receiver_map, jsgraph()->Constant(map));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ }
+ int const this_control_count = static_cast<int>(this_controls.size());
+ this_control =
+ (this_control_count == 1)
+ ? this_controls.front()
+ : graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count, &this_controls.front());
+ }
+
+ // Certain stores need a prototype chain check because shape changes
+ // could allow callbacks on elements in the prototype chain that are
+ // not compatible with (monomorphic) keyed stores.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ AssumePrototypesStable(receiver_type, holder);
+ }
+
+ // Check that the {index} is actually a Number.
+ if (!NumberMatcher(this_index).HasValue()) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_index = graph()->NewNode(common()->Guard(Type::Number()), this_index,
+ this_control);
+ }
+
+ // Convert the {index} to an unsigned32 value and check if the result is
+ // equal to the original {index}.
+ if (!NumberMatcher(this_index).IsInRange(0.0, kMaxUInt32)) {
+ Node* this_index32 =
+ graph()->NewNode(simplified()->NumberToUint32(), this_index);
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), this_index32,
+ this_index);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_index = this_index32;
+ }
+
+ // TODO(bmeurer): We currently specialize based on elements kind. We should
+ // also be able to properly support strings and other JSObjects here.
+ ElementsKind elements_kind = access_info.elements_kind();
+
+ // Load the elements for the {receiver}.
+ Node* this_elements = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ this_receiver, this_effect, this_control);
+
+ // Don't try to store to a copy-on-write backing store.
+ if (access_mode == AccessMode::kStore &&
+ IsFastSmiOrObjectElementsKind(elements_kind)) {
+ Node* this_elements_map = this_effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ this_elements, this_effect, this_control);
+ check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Any()), this_elements_map,
+ jsgraph()->HeapConstant(factory()->fixed_array_map()));
+ branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
+ this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+
+ // Load the length of the {receiver}.
+ FieldAccess length_access = {
+ kTaggedBase, JSArray::kLengthOffset, factory()->name_string(),
+ type_cache_.kJSArrayLengthType, kMachAnyTagged};
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ length_access.type = type_cache_.kFixedDoubleArrayLengthType;
+ } else if (IsFastElementsKind(elements_kind)) {
+ length_access.type = type_cache_.kFixedArrayLengthType;
+ }
+ Node* this_length = this_effect =
+ graph()->NewNode(simplified()->LoadField(length_access), this_receiver,
+ this_effect, this_control);
+
+ // Check that the {index} is in the valid range for the {receiver}.
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
+ this_length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
+ this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Compute the element access.
+ Type* element_type = Type::Any();
+ MachineType element_machine_type = kMachAnyTagged;
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ element_type = type_cache_.kFloat64;
+ element_machine_type = kMachFloat64;
+ } else if (IsFastSmiElementsKind(elements_kind)) {
+ element_type = type_cache_.kSmi;
+ }
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type};
+
+ // Access the actual element.
+ if (access_mode == AccessMode::kLoad) {
+ this_value = this_effect = graph()->NewNode(
+ simplified()->LoadElement(element_access), this_elements, this_index,
+ this_effect, this_control);
+ } else {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ if (IsFastSmiElementsKind(elements_kind)) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ } else if (IsFastDoubleElementsKind(elements_kind)) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(Type::Number()),
+ this_value, this_control);
+ }
+ this_effect = graph()->NewNode(simplified()->StoreElement(element_access),
+ this_elements, this_index, this_value,
+ this_effect, this_control);
+ }
+
+ // Remember the final state for this element access.
+ values.push_back(this_value);
+ effects.push_back(this_effect);
+ controls.push_back(this_control);
+ }
+
+ // Collect the fallthrough control as final "exit" control.
+ if (fallthrough_control != control) {
+ // Mark the last fallthrough branch as deferred.
+ MarkAsDeferred(fallthrough_control);
+ }
+ exit_controls.push_back(fallthrough_control);
+
+ // Generate the single "exit" point, where we get if either all map/instance
+ // type checks failed, or one of the assumptions inside one of the cases
+ // failes (i.e. failing prototype chain check).
+ // TODO(bmeurer): Consider falling back to IC here if deoptimization is
+ // disabled.
+ int const exit_control_count = static_cast<int>(exit_controls.size());
+ Node* exit_control =
+ (exit_control_count == 1)
+ ? exit_controls.front()
+ : graph()->NewNode(common()->Merge(exit_control_count),
+ exit_control_count, &exit_controls.front());
+ Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
+ exit_effect, exit_control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+
+ // Generate the final merge point for all (polymorphic) branches.
+ int const control_count = static_cast<int>(controls.size());
+ if (control_count == 0) {
+ value = effect = control = jsgraph()->Dead();
+ } else if (control_count == 1) {
+ value = values.front();
+ effect = effects.front();
+ control = controls.front();
+ } else {
+ control = graph()->NewNode(common()->Merge(control_count), control_count,
+ &controls.front());
+ values.push_back(control);
+ value = graph()->NewNode(common()->Phi(kMachAnyTagged, control_count),
+ control_count + 1, &values.front());
+ effects.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(control_count),
+ control_count + 1, &effects.front());
+ }
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
+ Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
+ AccessMode access_mode, LanguageMode language_mode) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty);
+
+ // Extract receiver maps from the {nexus}.
+ MapHandleList receiver_maps;
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Optimize access for constant {index}.
+ HeapObjectMatcher mindex(index);
+ if (mindex.HasValue() && mindex.Value()->IsPrimitive()) {
+ // Keyed access requires a ToPropertyKey on the {index} first before
+ // looking up the property on the object (see ES6 section 12.3.2.1).
+ // We can only do this for non-observable ToPropertyKey invocations,
+ // so we limit the constant indices to primitives at this point.
+ Handle<Name> name;
+ if (Object::ToName(isolate(), mindex.Value()).ToHandle(&name)) {
+ uint32_t array_index;
+ if (name->AsArrayIndex(&array_index)) {
+ // Use the constant array index.
+ index = jsgraph()->Constant(static_cast<double>(array_index));
+ } else {
+ name = factory()->InternalizeName(name);
+ return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
+ language_mode);
+ }
+ }
+ }
+
+ // Check if we have feedback for a named access.
+ if (Name* name = nexus.FindFirstName()) {
+ return ReduceNamedAccess(node, value, receiver_maps,
+ handle(name, isolate()), access_mode,
+ language_mode, index);
+ }
+
+ // Try to lower the element access based on the {receiver_maps}.
+ return ReduceElementAccess(node, index, value, receiver_maps, access_mode,
+ language_mode);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const value = jsgraph()->Dead();
+
+ // Extract receiver maps from the KEYED_LOAD_IC using the KeyedLoadICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ KeyedLoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ // Try to lower the keyed access based on the {nexus}.
+ return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kLoad,
+ p.language_mode());
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const value = NodeProperties::GetValueInput(node, 2);
+
+ // Extract receiver maps from the KEYED_STORE_IC using the KeyedStoreICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ KeyedStoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ // Try to lower the keyed access based on the {nexus}.
+ return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kStore,
+ p.language_mode());
+}
+
+
+void JSNativeContextSpecialization::AssumePrototypesStable(
+ Type* receiver_type, Handle<JSObject> holder) {
+ // Determine actual holder and perform prototype chain checks.
+ for (auto i = receiver_type->Classes(); !i.Done(); i.Advance()) {
+ Handle<Map> map = i.Current();
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor;
+ if (Map::GetConstructorFunction(map, native_context())
+ .ToHandle(&constructor)) {
+ map = handle(constructor->initial_map(), isolate());
+ }
+ for (PrototypeIterator j(map); !j.IsAtEnd(); j.Advance()) {
+ // Check that the {prototype} still has the same map. All prototype
+ // maps are guaranteed to be stable, so it's sufficient to add a
+ // stability dependency here.
+ Handle<JSReceiver> const prototype =
+ PrototypeIterator::GetCurrent<JSReceiver>(j);
+ dependencies()->AssumeMapStable(handle(prototype->map(), isolate()));
+ // Stop once we get to the holder.
+ if (prototype.is_identical_to(holder)) break;
+ }
+ }
+}
+
+
+void JSNativeContextSpecialization::MarkAsDeferred(Node* if_projection) {
+ Node* branch = NodeProperties::GetControlInput(if_projection);
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ if (if_projection->opcode() == IrOpcode::kIfTrue) {
+ NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kFalse));
+ } else {
+ DCHECK_EQ(IrOpcode::kIfFalse, if_projection->opcode());
+ NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kTrue));
+ }
+}
+
+
+Graph* JSNativeContextSpecialization::graph() const {
+ return jsgraph()->graph();
+}
+
+
+Isolate* JSNativeContextSpecialization::isolate() const {
+ return jsgraph()->isolate();
+}
+
+
+Factory* JSNativeContextSpecialization::factory() const {
+ return isolate()->factory();
+}
+
+
+MachineOperatorBuilder* JSNativeContextSpecialization::machine() const {
+ return jsgraph()->machine();
+}
+
+
+CommonOperatorBuilder* JSNativeContextSpecialization::common() const {
+ return jsgraph()->common();
+}
+
+
+JSOperatorBuilder* JSNativeContextSpecialization::javascript() const {
+ return jsgraph()->javascript();
+}
+
+
+SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const {
+ return jsgraph()->simplified();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
new file mode 100644
index 0000000000..89adcce601
--- /dev/null
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -0,0 +1,112 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/access-info.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+class FeedbackNexus;
+class TypeCache;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Specializes a given JSGraph to a given native context, potentially constant
+// folding some {LoadGlobal} nodes or strength reducing some {StoreGlobal}
+// nodes. And also specializes {LoadNamed} and {StoreNamed} nodes according
+// to type feedback (if available).
+class JSNativeContextSpecialization final : public AdvancedReducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+ Handle<Context> native_context,
+ CompilationDependencies* dependencies,
+ Zone* zone);
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceJSCallFunction(Node* node);
+ Reduction ReduceJSLoadNamed(Node* node);
+ Reduction ReduceJSStoreNamed(Node* node);
+ Reduction ReduceJSLoadProperty(Node* node);
+ Reduction ReduceJSStoreProperty(Node* node);
+
+ Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
+ MapHandleList const& receiver_maps,
+ AccessMode access_mode,
+ LanguageMode language_mode);
+ Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
+ FeedbackNexus const& nexus,
+ AccessMode access_mode,
+ LanguageMode language_mode);
+ Reduction ReduceNamedAccess(Node* node, Node* value,
+ MapHandleList const& receiver_maps,
+ Handle<Name> name, AccessMode access_mode,
+ LanguageMode language_mode,
+ Node* index = nullptr);
+
+ // Adds stability dependencies on all prototypes of every class in
+ // {receiver_type} up to (and including) the {holder}.
+ void AssumePrototypesStable(Type* receiver_type, Handle<JSObject> holder);
+
+ // Assuming that {if_projection} is either IfTrue or IfFalse, adds a hint on
+ // the dominating Branch that {if_projection} is the unlikely (deferred) case.
+ void MarkAsDeferred(Node* if_projection);
+
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ Factory* factory() const;
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ MachineOperatorBuilder* machine() const;
+ Flags flags() const { return flags_; }
+ Handle<Context> native_context() const { return native_context_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Zone* zone() const { return zone_; }
+ AccessInfoFactory& access_info_factory() { return access_info_factory_; }
+
+ JSGraph* const jsgraph_;
+ Flags const flags_;
+ Handle<Context> native_context_;
+ CompilationDependencies* const dependencies_;
+ Zone* const zone_;
+ TypeCache const& type_cache_;
+ AccessInfoFactory access_info_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSNativeContextSpecialization);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSNativeContextSpecialization::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 37369f6970..c4500a50bb 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-VectorSlotPair::VectorSlotPair() : slot_(FeedbackVectorICSlot::Invalid()) {}
+VectorSlotPair::VectorSlotPair() {}
int VectorSlotPair::index() const {
@@ -40,11 +40,32 @@ size_t hash_value(VectorSlotPair const& p) {
}
-std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
- os << p.arity() << ", " << p.flags() << ", " << p.language_mode();
- if (p.AllowTailCalls()) {
- os << ", ALLOW_TAIL_CALLS";
+ConvertReceiverMode ConvertReceiverModeOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSConvertReceiver, op->opcode());
+ return OpParameter<ConvertReceiverMode>(op);
+}
+
+
+size_t hash_value(TailCallMode mode) {
+ return base::hash_value(static_cast<unsigned>(mode));
+}
+
+
+std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
+ switch (mode) {
+ case TailCallMode::kAllow:
+ return os << "ALLOW_TAIL_CALLS";
+ case TailCallMode::kDisallow:
+ return os << "DISALLOW_TAIL_CALLS";
}
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
+ os << p.arity() << ", " << p.language_mode() << ", " << p.convert_mode()
+ << ", " << p.tail_call_mode();
return os;
}
@@ -121,156 +142,101 @@ ContextAccess const& ContextAccessOf(Operator const* op) {
}
-DynamicGlobalAccess::DynamicGlobalAccess(const Handle<String>& name,
- uint32_t check_bitset,
- const VectorSlotPair& feedback,
- TypeofMode typeof_mode)
- : name_(name),
- check_bitset_(check_bitset),
- feedback_(feedback),
- typeof_mode_(typeof_mode) {
- DCHECK(check_bitset == kFullCheckRequired || check_bitset < 0x80000000U);
-}
-
-
-bool operator==(DynamicGlobalAccess const& lhs,
- DynamicGlobalAccess const& rhs) {
- UNIMPLEMENTED();
- return true;
-}
-
-
-bool operator!=(DynamicGlobalAccess const& lhs,
- DynamicGlobalAccess const& rhs) {
- return !(lhs == rhs);
-}
-
-
-size_t hash_value(DynamicGlobalAccess const& access) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-std::ostream& operator<<(std::ostream& os, DynamicGlobalAccess const& access) {
- return os << Brief(*access.name()) << ", " << access.check_bitset() << ", "
- << access.typeof_mode();
-}
-
+DynamicAccess::DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode)
+ : name_(name), typeof_mode_(typeof_mode) {}
-DynamicGlobalAccess const& DynamicGlobalAccessOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSLoadDynamicGlobal, op->opcode());
- return OpParameter<DynamicGlobalAccess>(op);
-}
-
-DynamicContextAccess::DynamicContextAccess(const Handle<String>& name,
- uint32_t check_bitset,
- const ContextAccess& context_access)
- : name_(name),
- check_bitset_(check_bitset),
- context_access_(context_access) {
- DCHECK(check_bitset == kFullCheckRequired || check_bitset < 0x80000000U);
-}
-
-
-bool operator==(DynamicContextAccess const& lhs,
- DynamicContextAccess const& rhs) {
+bool operator==(DynamicAccess const& lhs, DynamicAccess const& rhs) {
UNIMPLEMENTED();
return true;
}
-bool operator!=(DynamicContextAccess const& lhs,
- DynamicContextAccess const& rhs) {
+bool operator!=(DynamicAccess const& lhs, DynamicAccess const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(DynamicContextAccess const& access) {
+size_t hash_value(DynamicAccess const& access) {
UNIMPLEMENTED();
return 0;
}
-std::ostream& operator<<(std::ostream& os, DynamicContextAccess const& access) {
- return os << Brief(*access.name()) << ", " << access.check_bitset() << ", "
- << access.context_access();
+std::ostream& operator<<(std::ostream& os, DynamicAccess const& access) {
+ return os << Brief(*access.name()) << ", " << access.typeof_mode();
}
-DynamicContextAccess const& DynamicContextAccessOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSLoadDynamicContext, op->opcode());
- return OpParameter<DynamicContextAccess>(op);
+DynamicAccess const& DynamicAccessOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSLoadDynamic, op->opcode());
+ return OpParameter<DynamicAccess>(op);
}
-bool operator==(LoadNamedParameters const& lhs,
- LoadNamedParameters const& rhs) {
+bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
return lhs.name().location() == rhs.name().location() &&
lhs.language_mode() == rhs.language_mode() &&
lhs.feedback() == rhs.feedback();
}
-bool operator!=(LoadNamedParameters const& lhs,
- LoadNamedParameters const& rhs) {
+bool operator!=(NamedAccess const& lhs, NamedAccess const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(LoadNamedParameters const& p) {
+size_t hash_value(NamedAccess const& p) {
return base::hash_combine(p.name().location(), p.language_mode(),
p.feedback());
}
-std::ostream& operator<<(std::ostream& os, LoadNamedParameters const& p) {
+std::ostream& operator<<(std::ostream& os, NamedAccess const& p) {
return os << Brief(*p.name()) << ", " << p.language_mode();
}
-std::ostream& operator<<(std::ostream& os, LoadPropertyParameters const& p) {
+NamedAccess const& NamedAccessOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
+ op->opcode() == IrOpcode::kJSStoreNamed);
+ return OpParameter<NamedAccess>(op);
+}
+
+
+std::ostream& operator<<(std::ostream& os, PropertyAccess const& p) {
return os << p.language_mode();
}
-bool operator==(LoadPropertyParameters const& lhs,
- LoadPropertyParameters const& rhs) {
+bool operator==(PropertyAccess const& lhs, PropertyAccess const& rhs) {
return lhs.language_mode() == rhs.language_mode() &&
lhs.feedback() == rhs.feedback();
}
-bool operator!=(LoadPropertyParameters const& lhs,
- LoadPropertyParameters const& rhs) {
+bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) {
return !(lhs == rhs);
}
-const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
- return OpParameter<LoadPropertyParameters>(op);
+PropertyAccess const& PropertyAccessOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSLoadProperty ||
+ op->opcode() == IrOpcode::kJSStoreProperty);
+ return OpParameter<PropertyAccess>(op);
}
-size_t hash_value(LoadPropertyParameters const& p) {
+size_t hash_value(PropertyAccess const& p) {
return base::hash_combine(p.language_mode(), p.feedback());
}
-const LoadNamedParameters& LoadNamedParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
- return OpParameter<LoadNamedParameters>(op);
-}
-
-
bool operator==(LoadGlobalParameters const& lhs,
LoadGlobalParameters const& rhs) {
return lhs.name().location() == rhs.name().location() &&
lhs.feedback() == rhs.feedback() &&
- lhs.typeof_mode() == rhs.typeof_mode() &&
- lhs.slot_index() == rhs.slot_index();
+ lhs.typeof_mode() == rhs.typeof_mode();
}
@@ -281,14 +247,12 @@ bool operator!=(LoadGlobalParameters const& lhs,
size_t hash_value(LoadGlobalParameters const& p) {
- return base::hash_combine(p.name().location(), p.typeof_mode(),
- p.slot_index());
+ return base::hash_combine(p.name().location(), p.typeof_mode());
}
std::ostream& operator<<(std::ostream& os, LoadGlobalParameters const& p) {
- return os << Brief(*p.name()) << ", " << p.typeof_mode()
- << ", slot: " << p.slot_index();
+ return os << Brief(*p.name()) << ", " << p.typeof_mode();
}
@@ -302,8 +266,7 @@ bool operator==(StoreGlobalParameters const& lhs,
StoreGlobalParameters const& rhs) {
return lhs.language_mode() == rhs.language_mode() &&
lhs.name().location() == rhs.name().location() &&
- lhs.feedback() == rhs.feedback() &&
- lhs.slot_index() == rhs.slot_index();
+ lhs.feedback() == rhs.feedback();
}
@@ -315,13 +278,12 @@ bool operator!=(StoreGlobalParameters const& lhs,
size_t hash_value(StoreGlobalParameters const& p) {
return base::hash_combine(p.language_mode(), p.name().location(),
- p.feedback(), p.slot_index());
+ p.feedback());
}
std::ostream& operator<<(std::ostream& os, StoreGlobalParameters const& p) {
- return os << p.language_mode() << ", " << Brief(*p.name())
- << ", slot: " << p.slot_index();
+ return os << p.language_mode() << ", " << Brief(*p.name());
}
@@ -331,66 +293,6 @@ const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op) {
}
-bool operator==(StoreNamedParameters const& lhs,
- StoreNamedParameters const& rhs) {
- return lhs.language_mode() == rhs.language_mode() &&
- lhs.name().location() == rhs.name().location() &&
- lhs.feedback() == rhs.feedback();
-}
-
-
-bool operator!=(StoreNamedParameters const& lhs,
- StoreNamedParameters const& rhs) {
- return !(lhs == rhs);
-}
-
-
-size_t hash_value(StoreNamedParameters const& p) {
- return base::hash_combine(p.language_mode(), p.name().location(),
- p.feedback());
-}
-
-
-std::ostream& operator<<(std::ostream& os, StoreNamedParameters const& p) {
- return os << p.language_mode() << ", " << Brief(*p.name());
-}
-
-
-const StoreNamedParameters& StoreNamedParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSStoreNamed, op->opcode());
- return OpParameter<StoreNamedParameters>(op);
-}
-
-
-bool operator==(StorePropertyParameters const& lhs,
- StorePropertyParameters const& rhs) {
- return lhs.language_mode() == rhs.language_mode() &&
- lhs.feedback() == rhs.feedback();
-}
-
-
-bool operator!=(StorePropertyParameters const& lhs,
- StorePropertyParameters const& rhs) {
- return !(lhs == rhs);
-}
-
-
-size_t hash_value(StorePropertyParameters const& p) {
- return base::hash_combine(p.language_mode(), p.feedback());
-}
-
-
-std::ostream& operator<<(std::ostream& os, StorePropertyParameters const& p) {
- return os << p.language_mode();
-}
-
-
-const StorePropertyParameters& StorePropertyParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSStoreProperty, op->opcode());
- return OpParameter<StorePropertyParameters>(op);
-}
-
-
bool operator==(CreateArgumentsParameters const& lhs,
CreateArgumentsParameters const& rhs) {
return lhs.type() == rhs.type() && lhs.start_index() == rhs.start_index();
@@ -423,7 +325,7 @@ const CreateArgumentsParameters& CreateArgumentsParametersOf(
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.pretenure() == rhs.pretenure() &&
- lhs.shared_info().is_identical_to(rhs.shared_info());
+ lhs.shared_info().location() == rhs.shared_info().location();
}
@@ -434,9 +336,7 @@ bool operator!=(CreateClosureParameters const& lhs,
size_t hash_value(CreateClosureParameters const& p) {
- // TODO(mstarzinger): Include hash of the SharedFunctionInfo here.
- base::hash<PretenureFlag> h;
- return h(p.pretenure());
+ return base::hash_combine(p.pretenure(), p.shared_info().location());
}
@@ -451,29 +351,30 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
}
-#define CACHED_OP_LIST(V) \
- V(Equal, Operator::kNoProperties, 2, 1) \
- V(NotEqual, Operator::kNoProperties, 2, 1) \
- V(StrictEqual, Operator::kNoThrow, 2, 1) \
- V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
- V(UnaryNot, Operator::kEliminatable, 1, 1) \
- V(ToBoolean, Operator::kEliminatable, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kNoProperties, 1, 1) \
- V(Yield, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kEliminatable, 0, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kEliminatable, 1, 1) \
- V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(ForInDone, Operator::kPure, 2, 1) \
- V(ForInNext, Operator::kNoProperties, 4, 1) \
- V(ForInPrepare, Operator::kNoProperties, 1, 3) \
- V(ForInStep, Operator::kPure, 1, 1) \
- V(StackCheck, Operator::kNoProperties, 0, 0) \
- V(CreateFunctionContext, Operator::kNoProperties, 1, 1) \
- V(CreateWithContext, Operator::kNoProperties, 2, 1) \
+#define CACHED_OP_LIST(V) \
+ V(Equal, Operator::kNoProperties, 2, 1) \
+ V(NotEqual, Operator::kNoProperties, 2, 1) \
+ V(StrictEqual, Operator::kNoThrow, 2, 1) \
+ V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
+ V(UnaryNot, Operator::kEliminatable, 1, 1) \
+ V(ToBoolean, Operator::kEliminatable, 1, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kNoProperties, 1, 1) \
+ V(Yield, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kEliminatable, 0, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(TypeOf, Operator::kEliminatable, 1, 1) \
+ V(InstanceOf, Operator::kNoProperties, 2, 1) \
+ V(ForInDone, Operator::kPure, 2, 1) \
+ V(ForInNext, Operator::kNoProperties, 4, 1) \
+ V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(ForInStep, Operator::kPure, 1, 1) \
+ V(LoadMessage, Operator::kNoThrow, 0, 1) \
+ V(StoreMessage, Operator::kNoThrow, 1, 0) \
+ V(StackCheck, Operator::kNoProperties, 0, 0) \
+ V(CreateWithContext, Operator::kNoProperties, 2, 1) \
V(CreateModuleContext, Operator::kNoProperties, 2, 1)
@@ -566,13 +467,11 @@ CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
#undef CACHED_WITH_LANGUAGE_MODE
-const Operator* JSOperatorBuilder::CallFunction(size_t arity,
- CallFunctionFlags flags,
- LanguageMode language_mode,
- VectorSlotPair const& feedback,
- TailCallMode tail_call_mode) {
- CallFunctionParameters parameters(arity, flags, language_mode, feedback,
- tail_call_mode);
+const Operator* JSOperatorBuilder::CallFunction(
+ size_t arity, LanguageMode language_mode, VectorSlotPair const& feedback,
+ ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
+ CallFunctionParameters parameters(arity, language_mode, feedback,
+ tail_call_mode, convert_mode);
return new (zone()) Operator1<CallFunctionParameters>( // --
IrOpcode::kJSCallFunction, Operator::kNoProperties, // opcode
"JSCallFunction", // name
@@ -603,49 +502,59 @@ const Operator* JSOperatorBuilder::CallConstruct(int arguments) {
}
-const Operator* JSOperatorBuilder::LoadNamed(const Handle<Name>& name,
- const VectorSlotPair& feedback,
- LanguageMode language_mode) {
- LoadNamedParameters parameters(name, feedback, language_mode);
- return new (zone()) Operator1<LoadNamedParameters>( // --
+const Operator* JSOperatorBuilder::ConvertReceiver(
+ ConvertReceiverMode convert_mode) {
+ return new (zone()) Operator1<ConvertReceiverMode>( // --
+ IrOpcode::kJSConvertReceiver, Operator::kNoThrow, // opcode
+ "JSConvertReceiver", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ convert_mode); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::LoadNamed(LanguageMode language_mode,
+ Handle<Name> name,
+ const VectorSlotPair& feedback) {
+ NamedAccess access(language_mode, name, feedback);
+ return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
2, 1, 1, 1, 1, 2, // counts
- parameters); // parameter
+ access); // parameter
}
-const Operator* JSOperatorBuilder::LoadProperty(const VectorSlotPair& feedback,
- LanguageMode language_mode) {
- LoadPropertyParameters parameters(feedback, language_mode);
- return new (zone()) Operator1<LoadPropertyParameters>( // --
+const Operator* JSOperatorBuilder::LoadProperty(
+ LanguageMode language_mode, VectorSlotPair const& feedback) {
+ PropertyAccess access(language_mode, feedback);
+ return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode
"JSLoadProperty", // name
3, 1, 1, 1, 1, 2, // counts
- parameters); // parameter
+ access); // parameter
}
const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
- const Handle<Name>& name,
- const VectorSlotPair& feedback) {
- StoreNamedParameters parameters(language_mode, feedback, name);
- return new (zone()) Operator1<StoreNamedParameters>( // --
+ Handle<Name> name,
+ VectorSlotPair const& feedback) {
+ NamedAccess access(language_mode, name, feedback);
+ return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode
"JSStoreNamed", // name
3, 1, 1, 0, 1, 2, // counts
- parameters); // parameter
+ access); // parameter
}
const Operator* JSOperatorBuilder::StoreProperty(
- LanguageMode language_mode, const VectorSlotPair& feedback) {
- StorePropertyParameters parameters(language_mode, feedback);
- return new (zone()) Operator1<StorePropertyParameters>( // --
+ LanguageMode language_mode, VectorSlotPair const& feedback) {
+ PropertyAccess access(language_mode, feedback);
+ return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSStoreProperty, Operator::kNoProperties, // opcode
"JSStoreProperty", // name
4, 1, 1, 0, 1, 2, // counts
- parameters); // parameter
+ access); // parameter
}
@@ -660,26 +569,24 @@ const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
- TypeofMode typeof_mode,
- int slot_index) {
- LoadGlobalParameters parameters(name, feedback, typeof_mode, slot_index);
+ TypeofMode typeof_mode) {
+ LoadGlobalParameters parameters(name, feedback, typeof_mode);
return new (zone()) Operator1<LoadGlobalParameters>( // --
IrOpcode::kJSLoadGlobal, Operator::kNoProperties, // opcode
"JSLoadGlobal", // name
- 3, 1, 1, 1, 1, 2, // counts
+ 1, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
const Handle<Name>& name,
- const VectorSlotPair& feedback,
- int slot_index) {
- StoreGlobalParameters parameters(language_mode, feedback, name, slot_index);
+ const VectorSlotPair& feedback) {
+ StoreGlobalParameters parameters(language_mode, feedback, name);
return new (zone()) Operator1<StoreGlobalParameters>( // --
IrOpcode::kJSStoreGlobal, Operator::kNoProperties, // opcode
"JSStoreGlobal", // name
- 4, 1, 1, 0, 1, 2, // counts
+ 2, 1, 1, 0, 1, 2, // counts
parameters); // parameter
}
@@ -707,28 +614,14 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
}
-const Operator* JSOperatorBuilder::LoadDynamicGlobal(
- const Handle<String>& name, uint32_t check_bitset,
- const VectorSlotPair& feedback, TypeofMode typeof_mode) {
- DynamicGlobalAccess access(name, check_bitset, feedback, typeof_mode);
- return new (zone()) Operator1<DynamicGlobalAccess>( // --
- IrOpcode::kJSLoadDynamicGlobal, Operator::kNoProperties, // opcode
- "JSLoadDynamicGlobal", // name
- 2, 1, 1, 1, 1, 2, // counts
- access); // parameter
-}
-
-
-const Operator* JSOperatorBuilder::LoadDynamicContext(
- const Handle<String>& name, uint32_t check_bitset, size_t depth,
- size_t index) {
- ContextAccess context_access(depth, index, false);
- DynamicContextAccess access(name, check_bitset, context_access);
- return new (zone()) Operator1<DynamicContextAccess>( // --
- IrOpcode::kJSLoadDynamicContext, Operator::kNoProperties, // opcode
- "JSLoadDynamicContext", // name
- 1, 1, 1, 1, 1, 2, // counts
- access); // parameter
+const Operator* JSOperatorBuilder::LoadDynamic(const Handle<String>& name,
+ TypeofMode typeof_mode) {
+ DynamicAccess access(name, typeof_mode);
+ return new (zone()) Operator1<DynamicAccess>( // --
+ IrOpcode::kJSLoadDynamic, Operator::kNoProperties, // opcode
+ "JSLoadDynamic", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ access); // parameter
}
@@ -773,6 +666,15 @@ const Operator* JSOperatorBuilder::CreateLiteralObject(int literal_flags) {
}
+const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSCreateFunctionContext, Operator::kNoProperties, // opcode
+ "JSCreateFunctionContext", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ slot_count); // parameter
+}
+
+
const Operator* JSOperatorBuilder::CreateCatchContext(
const Handle<String>& name) {
return new (zone()) Operator1<Handle<String>, Handle<String>::equal_to,
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 88b2dd304e..6bd6516af3 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -16,24 +16,24 @@ class Operator;
struct JSOperatorGlobalCache;
-// Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorICSlot}, which
+// Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorSlot}, which
// is used to access the type feedback for a certain {Node}.
class VectorSlotPair {
public:
VectorSlotPair();
- VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: vector_(vector), slot_(slot) {}
- bool IsValid() const { return !vector_.is_null(); }
+ bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
Handle<TypeFeedbackVector> vector() const { return vector_; }
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
int index() const;
private:
const Handle<TypeFeedbackVector> vector_;
- const FeedbackVectorICSlot slot_;
+ const FeedbackVectorSlot slot_;
};
bool operator==(VectorSlotPair const&, VectorSlotPair const&);
@@ -41,26 +41,43 @@ bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
size_t hash_value(VectorSlotPair const&);
-enum TailCallMode { NO_TAIL_CALLS, ALLOW_TAIL_CALLS };
+
+// The ConvertReceiverMode is used as parameter by JSConvertReceiver operators.
+ConvertReceiverMode ConvertReceiverModeOf(const Operator* op);
+
+
+// Defines whether tail call optimization is allowed.
+enum class TailCallMode : unsigned { kAllow, kDisallow };
+
+size_t hash_value(TailCallMode);
+
+std::ostream& operator<<(std::ostream&, TailCallMode);
+
// Defines the arity and the call flags for a JavaScript function call. This is
// used as a parameter by JSCallFunction operators.
class CallFunctionParameters final {
public:
- CallFunctionParameters(size_t arity, CallFunctionFlags flags,
- LanguageMode language_mode,
+ CallFunctionParameters(size_t arity, LanguageMode language_mode,
VectorSlotPair const& feedback,
- TailCallMode tail_call_mode)
- : bit_field_(ArityField::encode(arity) | FlagsField::encode(flags) |
- LanguageModeField::encode(language_mode)),
- feedback_(feedback),
- tail_call_mode_(tail_call_mode) {}
+ TailCallMode tail_call_mode,
+ ConvertReceiverMode convert_mode)
+ : bit_field_(ArityField::encode(arity) |
+ ConvertReceiverModeField::encode(convert_mode) |
+ LanguageModeField::encode(language_mode) |
+ TailCallModeField::encode(tail_call_mode)),
+ feedback_(feedback) {}
size_t arity() const { return ArityField::decode(bit_field_); }
- CallFunctionFlags flags() const { return FlagsField::decode(bit_field_); }
LanguageMode language_mode() const {
return LanguageModeField::decode(bit_field_);
}
+ ConvertReceiverMode convert_mode() const {
+ return ConvertReceiverModeField::decode(bit_field_);
+ }
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
+ }
VectorSlotPair const& feedback() const { return feedback_; }
bool operator==(CallFunctionParameters const& that) const {
@@ -71,20 +88,18 @@ class CallFunctionParameters final {
return !(*this == that);
}
- bool AllowTailCalls() const { return tail_call_mode_ == ALLOW_TAIL_CALLS; }
-
private:
friend size_t hash_value(CallFunctionParameters const& p) {
return base::hash_combine(p.bit_field_, p.feedback_);
}
- typedef BitField<size_t, 0, 28> ArityField;
- typedef BitField<CallFunctionFlags, 28, 2> FlagsField;
- typedef BitField<LanguageMode, 30, 2> LanguageModeField;
+ typedef BitField<size_t, 0, 27> ArityField;
+ typedef BitField<ConvertReceiverMode, 27, 2> ConvertReceiverModeField;
+ typedef BitField<LanguageMode, 29, 2> LanguageModeField;
+ typedef BitField<TailCallMode, 31, 1> TailCallModeField;
const uint32_t bit_field_;
const VectorSlotPair feedback_;
- bool tail_call_mode_;
};
size_t hash_value(CallFunctionParameters const&);
@@ -148,113 +163,56 @@ std::ostream& operator<<(std::ostream&, ContextAccess const&);
ContextAccess const& ContextAccessOf(Operator const*);
-// Defines the name for a dynamic variable lookup. The {check_bitset} allows to
-// inline checks whether the lookup yields in a global variable. This is used as
-// a parameter by JSLoadDynamicGlobal and JSStoreDynamicGlobal operators.
-class DynamicGlobalAccess final {
+// Defines the name for a dynamic variable lookup. This is used as a parameter
+// by JSLoadDynamic and JSStoreDynamic operators.
+class DynamicAccess final {
public:
- DynamicGlobalAccess(const Handle<String>& name, uint32_t check_bitset,
- const VectorSlotPair& feedback, TypeofMode typeof_mode);
+ DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode);
const Handle<String>& name() const { return name_; }
- uint32_t check_bitset() const { return check_bitset_; }
- const VectorSlotPair& feedback() const { return feedback_; }
TypeofMode typeof_mode() const { return typeof_mode_; }
- // Indicates that an inline check is disabled.
- bool RequiresFullCheck() const {
- return check_bitset() == kFullCheckRequired;
- }
-
- // Limit of context chain length to which inline check is possible.
- static const int kMaxCheckDepth = 30;
-
- // Sentinel for {check_bitset} disabling inline checks.
- static const uint32_t kFullCheckRequired = -1;
-
private:
const Handle<String> name_;
- const uint32_t check_bitset_;
- const VectorSlotPair feedback_;
const TypeofMode typeof_mode_;
};
-size_t hash_value(DynamicGlobalAccess const&);
-
-bool operator==(DynamicGlobalAccess const&, DynamicGlobalAccess const&);
-bool operator!=(DynamicGlobalAccess const&, DynamicGlobalAccess const&);
-
-std::ostream& operator<<(std::ostream&, DynamicGlobalAccess const&);
-
-DynamicGlobalAccess const& DynamicGlobalAccessOf(Operator const*);
-
-
-// Defines the name for a dynamic variable lookup. The {check_bitset} allows to
-// inline checks whether the lookup yields in a context variable. This is used
-// as a parameter by JSLoadDynamicContext and JSStoreDynamicContext operators.
-class DynamicContextAccess final {
- public:
- DynamicContextAccess(const Handle<String>& name, uint32_t check_bitset,
- const ContextAccess& context_access);
-
- const Handle<String>& name() const { return name_; }
- uint32_t check_bitset() const { return check_bitset_; }
- const ContextAccess& context_access() const { return context_access_; }
-
- // Indicates that an inline check is disabled.
- bool RequiresFullCheck() const {
- return check_bitset() == kFullCheckRequired;
- }
-
- // Limit of context chain length to which inline check is possible.
- static const int kMaxCheckDepth = 30;
-
- // Sentinel for {check_bitset} disabling inline checks.
- static const uint32_t kFullCheckRequired = -1;
-
- private:
- const Handle<String> name_;
- const uint32_t check_bitset_;
- const ContextAccess context_access_;
-};
-
-size_t hash_value(DynamicContextAccess const&);
+size_t hash_value(DynamicAccess const&);
-bool operator==(DynamicContextAccess const&, DynamicContextAccess const&);
-bool operator!=(DynamicContextAccess const&, DynamicContextAccess const&);
+bool operator==(DynamicAccess const&, DynamicAccess const&);
+bool operator!=(DynamicAccess const&, DynamicAccess const&);
-std::ostream& operator<<(std::ostream&, DynamicContextAccess const&);
+std::ostream& operator<<(std::ostream&, DynamicAccess const&);
-DynamicContextAccess const& DynamicContextAccessOf(Operator const*);
+DynamicAccess const& DynamicAccessOf(Operator const*);
-// Defines the property being loaded from an object by a named load. This is
-// used as a parameter by JSLoadNamed operators.
-class LoadNamedParameters final {
+// Defines the property of an object for a named access. This is
+// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
+class NamedAccess final {
public:
- LoadNamedParameters(const Handle<Name>& name, const VectorSlotPair& feedback,
- LanguageMode language_mode)
+ NamedAccess(LanguageMode language_mode, Handle<Name> name,
+ VectorSlotPair const& feedback)
: name_(name), feedback_(feedback), language_mode_(language_mode) {}
- const Handle<Name>& name() const { return name_; }
+ Handle<Name> name() const { return name_; }
LanguageMode language_mode() const { return language_mode_; }
-
- const VectorSlotPair& feedback() const { return feedback_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
- const Handle<Name> name_;
- const VectorSlotPair feedback_;
- const LanguageMode language_mode_;
+ Handle<Name> const name_;
+ VectorSlotPair const feedback_;
+ LanguageMode const language_mode_;
};
-bool operator==(LoadNamedParameters const&, LoadNamedParameters const&);
-bool operator!=(LoadNamedParameters const&, LoadNamedParameters const&);
+bool operator==(NamedAccess const&, NamedAccess const&);
+bool operator!=(NamedAccess const&, NamedAccess const&);
-size_t hash_value(LoadNamedParameters const&);
+size_t hash_value(NamedAccess const&);
-std::ostream& operator<<(std::ostream&, LoadNamedParameters const&);
+std::ostream& operator<<(std::ostream&, NamedAccess const&);
-const LoadNamedParameters& LoadNamedParametersOf(const Operator* op);
+const NamedAccess& NamedAccessOf(const Operator* op);
// Defines the property being loaded from an object by a named load. This is
@@ -262,24 +220,18 @@ const LoadNamedParameters& LoadNamedParametersOf(const Operator* op);
class LoadGlobalParameters final {
public:
LoadGlobalParameters(const Handle<Name>& name, const VectorSlotPair& feedback,
- TypeofMode typeof_mode, int slot_index)
- : name_(name),
- feedback_(feedback),
- typeof_mode_(typeof_mode),
- slot_index_(slot_index) {}
+ TypeofMode typeof_mode)
+ : name_(name), feedback_(feedback), typeof_mode_(typeof_mode) {}
const Handle<Name>& name() const { return name_; }
TypeofMode typeof_mode() const { return typeof_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
- int slot_index() const { return slot_index_; }
-
private:
const Handle<Name> name_;
const VectorSlotPair feedback_;
const TypeofMode typeof_mode_;
- const int slot_index_;
};
bool operator==(LoadGlobalParameters const&, LoadGlobalParameters const&);
@@ -298,22 +250,17 @@ class StoreGlobalParameters final {
public:
StoreGlobalParameters(LanguageMode language_mode,
const VectorSlotPair& feedback,
- const Handle<Name>& name, int slot_index)
- : language_mode_(language_mode),
- name_(name),
- feedback_(feedback),
- slot_index_(slot_index) {}
+ const Handle<Name>& name)
+ : language_mode_(language_mode), name_(name), feedback_(feedback) {}
LanguageMode language_mode() const { return language_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
const Handle<Name>& name() const { return name_; }
- int slot_index() const { return slot_index_; }
private:
const LanguageMode language_mode_;
const Handle<Name> name_;
const VectorSlotPair feedback_;
- int slot_index_;
};
bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&);
@@ -326,85 +273,29 @@ std::ostream& operator<<(std::ostream&, StoreGlobalParameters const&);
const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op);
-// Defines the property being loaded from an object. This is
-// used as a parameter by JSLoadProperty operators.
-class LoadPropertyParameters final {
+// Defines the property of an object for a keyed access. This is used
+// as a parameter by the JSLoadProperty and JSStoreProperty operators.
+class PropertyAccess final {
public:
- explicit LoadPropertyParameters(const VectorSlotPair& feedback,
- LanguageMode language_mode)
+ PropertyAccess(LanguageMode language_mode, VectorSlotPair const& feedback)
: feedback_(feedback), language_mode_(language_mode) {}
- const VectorSlotPair& feedback() const { return feedback_; }
-
- LanguageMode language_mode() const { return language_mode_; }
-
- private:
- const VectorSlotPair feedback_;
- const LanguageMode language_mode_;
-};
-
-bool operator==(LoadPropertyParameters const&, LoadPropertyParameters const&);
-bool operator!=(LoadPropertyParameters const&, LoadPropertyParameters const&);
-
-size_t hash_value(LoadPropertyParameters const&);
-
-std::ostream& operator<<(std::ostream&, LoadPropertyParameters const&);
-
-const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op);
-
-
-// Defines the property being stored to an object by a named store. This is
-// used as a parameter by JSStoreNamed operator.
-class StoreNamedParameters final {
- public:
- StoreNamedParameters(LanguageMode language_mode,
- const VectorSlotPair& feedback, const Handle<Name>& name)
- : language_mode_(language_mode), name_(name), feedback_(feedback) {}
-
- LanguageMode language_mode() const { return language_mode_; }
- const VectorSlotPair& feedback() const { return feedback_; }
- const Handle<Name>& name() const { return name_; }
-
- private:
- const LanguageMode language_mode_;
- const Handle<Name> name_;
- const VectorSlotPair feedback_;
-};
-
-bool operator==(StoreNamedParameters const&, StoreNamedParameters const&);
-bool operator!=(StoreNamedParameters const&, StoreNamedParameters const&);
-
-size_t hash_value(StoreNamedParameters const&);
-
-std::ostream& operator<<(std::ostream&, StoreNamedParameters const&);
-
-const StoreNamedParameters& StoreNamedParametersOf(const Operator* op);
-
-
-// Defines the property being stored to an object. This is used as a parameter
-// by JSStoreProperty operators.
-class StorePropertyParameters final {
- public:
- StorePropertyParameters(LanguageMode language_mode,
- const VectorSlotPair& feedback)
- : language_mode_(language_mode), feedback_(feedback) {}
-
LanguageMode language_mode() const { return language_mode_; }
- const VectorSlotPair& feedback() const { return feedback_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
- const LanguageMode language_mode_;
- const VectorSlotPair feedback_;
+ VectorSlotPair const feedback_;
+ LanguageMode const language_mode_;
};
-bool operator==(StorePropertyParameters const&, StorePropertyParameters const&);
-bool operator!=(StorePropertyParameters const&, StorePropertyParameters const&);
+bool operator==(PropertyAccess const&, PropertyAccess const&);
+bool operator!=(PropertyAccess const&, PropertyAccess const&);
-size_t hash_value(StorePropertyParameters const&);
+size_t hash_value(PropertyAccess const&);
-std::ostream& operator<<(std::ostream&, StorePropertyParameters const&);
+std::ostream& operator<<(std::ostream&, PropertyAccess const&);
-const StorePropertyParameters& StorePropertyParametersOf(const Operator* op);
+PropertyAccess const& PropertyAccessOf(const Operator* op);
// Defines specifics about arguments object or rest parameter creation. This is
@@ -506,24 +397,24 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* CreateLiteralObject(int literal_flags);
const Operator* CallFunction(
- size_t arity, CallFunctionFlags flags, LanguageMode language_mode,
+ size_t arity, LanguageMode language_mode,
VectorSlotPair const& feedback = VectorSlotPair(),
- TailCallMode tail_call_mode = NO_TAIL_CALLS);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
-
const Operator* CallConstruct(int arguments);
- const Operator* LoadProperty(const VectorSlotPair& feedback,
- LanguageMode language_mode);
- const Operator* LoadNamed(const Handle<Name>& name,
- const VectorSlotPair& feedback,
- LanguageMode language_mode);
+ const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
+
+ const Operator* LoadProperty(LanguageMode language_mode,
+ VectorSlotPair const& feedback);
+ const Operator* LoadNamed(LanguageMode language_mode, Handle<Name> name,
+ VectorSlotPair const& feedback);
const Operator* StoreProperty(LanguageMode language_mode,
- const VectorSlotPair& feedback);
- const Operator* StoreNamed(LanguageMode language_mode,
- const Handle<Name>& name,
- const VectorSlotPair& feedback);
+ VectorSlotPair const& feedback);
+ const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
+ VectorSlotPair const& feedback);
const Operator* DeleteProperty(LanguageMode language_mode);
@@ -531,23 +422,16 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
- TypeofMode typeof_mode = NOT_INSIDE_TYPEOF,
- int slot_index = -1);
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
const Operator* StoreGlobal(LanguageMode language_mode,
const Handle<Name>& name,
- const VectorSlotPair& feedback,
- int slot_index = -1);
+ const VectorSlotPair& feedback);
const Operator* LoadContext(size_t depth, size_t index, bool immutable);
const Operator* StoreContext(size_t depth, size_t index);
- const Operator* LoadDynamicGlobal(const Handle<String>& name,
- uint32_t check_bitset,
- const VectorSlotPair& feedback,
- TypeofMode typeof_mode);
- const Operator* LoadDynamicContext(const Handle<String>& name,
- uint32_t check_bitset, size_t depth,
- size_t index);
+ const Operator* LoadDynamic(const Handle<String>& name,
+ TypeofMode typeof_mode);
const Operator* TypeOf();
const Operator* InstanceOf();
@@ -557,9 +441,12 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* ForInPrepare();
const Operator* ForInStep();
+ const Operator* LoadMessage();
+ const Operator* StoreMessage();
+
const Operator* StackCheck();
- const Operator* CreateFunctionContext();
+ const Operator* CreateFunctionContext(int slot_count);
const Operator* CreateCatchContext(const Handle<String>& name);
const Operator* CreateWithContext();
const Operator* CreateBlockContext(const Handle<ScopeInfo>& scpope_info);
diff --git a/deps/v8/src/compiler/js-type-feedback-lowering.cc b/deps/v8/src/compiler/js-type-feedback-lowering.cc
deleted file mode 100644
index d97a305d08..0000000000
--- a/deps/v8/src/compiler/js-type-feedback-lowering.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/js-type-feedback-lowering.h"
-
-#include "src/compiler/access-builder.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
-#include "src/type-feedback-vector.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-JSTypeFeedbackLowering::JSTypeFeedbackLowering(Editor* editor, Flags flags,
- JSGraph* jsgraph)
- : AdvancedReducer(editor),
- flags_(flags),
- jsgraph_(jsgraph),
- simplified_(graph()->zone()) {}
-
-
-Reduction JSTypeFeedbackLowering::Reduce(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSLoadNamed:
- return ReduceJSLoadNamed(node);
- default:
- break;
- }
- return NoChange();
-}
-
-
-Reduction JSTypeFeedbackLowering::ReduceJSLoadNamed(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Type* receiver_type = NodeProperties::GetType(receiver);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // We need to make optimistic assumptions to continue.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
- LoadNamedParameters const& p = LoadNamedParametersOf(node->op());
- if (p.feedback().vector().is_null()) return NoChange();
- if (p.name().is_identical_to(factory()->length_string())) {
- LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
- MapHandleList maps;
- if (nexus.ExtractMaps(&maps) > 0) {
- for (Handle<Map> map : maps) {
- if (map->instance_type() >= FIRST_NONSTRING_TYPE) return NoChange();
- }
- // Optimistic optimization for "length" property of strings.
- if (receiver_type->Maybe(Type::TaggedSigned())) {
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- effect, if_true);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- control = graph()->NewNode(common()->IfFalse(), branch);
- }
- Node* receiver_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
- Node* receiver_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- receiver_map, effect, control);
- Node* check =
- graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
- jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- effect, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- control = graph()->NewNode(common()->IfTrue(), branch);
- Node* value = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForStringLength(graph()->zone())),
- receiver, effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- return NoChange();
-}
-
-
-Factory* JSTypeFeedbackLowering::factory() const {
- return isolate()->factory();
-}
-
-
-CommonOperatorBuilder* JSTypeFeedbackLowering::common() const {
- return jsgraph()->common();
-}
-
-
-Graph* JSTypeFeedbackLowering::graph() const { return jsgraph()->graph(); }
-
-
-Isolate* JSTypeFeedbackLowering::isolate() const {
- return jsgraph()->isolate();
-}
-
-
-MachineOperatorBuilder* JSTypeFeedbackLowering::machine() const {
- return jsgraph()->machine();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/js-type-feedback-lowering.h b/deps/v8/src/compiler/js-type-feedback-lowering.h
deleted file mode 100644
index db0fbdd626..0000000000
--- a/deps/v8/src/compiler/js-type-feedback-lowering.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_TYPE_FEEDBACK_LOWERING_H_
-#define V8_COMPILER_JS_TYPE_FEEDBACK_LOWERING_H_
-
-#include "src/base/flags.h"
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class Factory;
-
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-class JSGraph;
-class MachineOperatorBuilder;
-
-
-// Lowers JS-level operators to simplified operators based on type feedback.
-class JSTypeFeedbackLowering final : public AdvancedReducer {
- public:
- // Various configuration flags to control the operation of this lowering.
- enum Flag {
- kNoFlags = 0,
- kDeoptimizationEnabled = 1 << 0,
- };
- typedef base::Flags<Flag> Flags;
-
- JSTypeFeedbackLowering(Editor* editor, Flags flags, JSGraph* jsgraph);
- ~JSTypeFeedbackLowering() final {}
-
- Reduction Reduce(Node* node) final;
-
- private:
- Reduction ReduceJSLoadNamed(Node* node);
-
- Factory* factory() const;
- Flags flags() const { return flags_; }
- Graph* graph() const;
- Isolate* isolate() const;
- JSGraph* jsgraph() const { return jsgraph_; }
- CommonOperatorBuilder* common() const;
- MachineOperatorBuilder* machine() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- Flags const flags_;
- JSGraph* const jsgraph_;
- SimplifiedOperatorBuilder simplified_;
-
- DISALLOW_COPY_AND_ASSIGN(JSTypeFeedbackLowering);
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(JSTypeFeedbackLowering::Flags)
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_JS_TYPE_FEEDBACK_LOWERING_H_
diff --git a/deps/v8/src/compiler/js-type-feedback.cc b/deps/v8/src/compiler/js-type-feedback.cc
deleted file mode 100644
index 395a7dccca..0000000000
--- a/deps/v8/src/compiler/js-type-feedback.cc
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/js-type-feedback.h"
-
-#include "src/property-details.h"
-
-#include "src/accessors.h"
-#include "src/ast.h"
-#include "src/compiler.h"
-#include "src/type-info.h"
-
-#include "src/compiler/access-builder.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/frame-states.h"
-#include "src/compiler/node-aux-data.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/operator-properties.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-enum LoadOrStore { LOAD, STORE };
-
-// TODO(turbofan): fix deoptimization problems
-#define ENABLE_FAST_PROPERTY_LOADS false
-#define ENABLE_FAST_PROPERTY_STORES false
-
-JSTypeFeedbackTable::JSTypeFeedbackTable(Zone* zone)
- : type_feedback_id_map_(TypeFeedbackIdMap::key_compare(),
- TypeFeedbackIdMap::allocator_type(zone)),
- feedback_vector_ic_slot_map_(TypeFeedbackIdMap::key_compare(),
- TypeFeedbackIdMap::allocator_type(zone)) {}
-
-
-void JSTypeFeedbackTable::Record(Node* node, TypeFeedbackId id) {
- type_feedback_id_map_.insert(std::make_pair(node->id(), id));
-}
-
-
-void JSTypeFeedbackTable::Record(Node* node, FeedbackVectorICSlot slot) {
- feedback_vector_ic_slot_map_.insert(std::make_pair(node->id(), slot));
-}
-
-
-Reduction JSTypeFeedbackSpecializer::Reduce(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSLoadProperty:
- return ReduceJSLoadProperty(node);
- case IrOpcode::kJSLoadNamed:
- return ReduceJSLoadNamed(node);
- case IrOpcode::kJSLoadGlobal:
- return ReduceJSLoadGlobal(node);
- case IrOpcode::kJSStoreNamed:
- return ReduceJSStoreNamed(node);
- case IrOpcode::kJSStoreProperty:
- return ReduceJSStoreProperty(node);
- default:
- break;
- }
- return NoChange();
-}
-
-
-static void AddFieldAccessTypes(FieldAccess* access,
- PropertyDetails property_details) {
- if (property_details.representation().IsSmi()) {
- access->type = Type::SignedSmall();
- access->machine_type = static_cast<MachineType>(kTypeInt32 | kRepTagged);
- } else if (property_details.representation().IsDouble()) {
- access->type = Type::Number();
- access->machine_type = kMachFloat64;
- }
-}
-
-
-static bool GetInObjectFieldAccess(LoadOrStore mode, Handle<Map> map,
- Handle<Name> name, FieldAccess* access) {
- access->base_is_tagged = kTaggedBase;
- access->offset = -1;
- access->name = name;
- access->type = Type::Any();
- access->machine_type = kMachAnyTagged;
-
- // Check for properties that have accessors but are JSObject fields.
- if (Accessors::IsJSObjectFieldAccessor(map, name, &access->offset)) {
- // TODO(turbofan): fill in types for special JSObject field accesses.
- return true;
- }
-
- // Check if the map is a dictionary.
- if (map->is_dictionary_map()) return false;
-
- // Search the descriptor array.
- DescriptorArray* descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(*name, *map);
- if (number == DescriptorArray::kNotFound) return false;
- PropertyDetails property_details = descriptors->GetDetails(number);
-
- bool is_smi = property_details.representation().IsSmi();
- bool is_double = property_details.representation().IsDouble();
-
- if (property_details.type() != DATA) {
- // TODO(turbofan): constant loads and stores.
- return false;
- }
-
- // Transfer known types from property details.
- AddFieldAccessTypes(access, property_details);
-
- if (mode == STORE) {
- if (property_details.IsReadOnly()) {
- // TODO(turbofan): deopt, ignore or throw on readonly stores.
- return false;
- }
- if (is_smi || is_double) {
- // TODO(turbofan): check type and deopt for SMI/double stores.
- return false;
- }
- }
-
- int index = map->instance_descriptors()->GetFieldIndex(number);
- FieldIndex field_index = FieldIndex::ForPropertyIndex(*map, index, is_double);
-
- if (field_index.is_inobject()) {
- if (is_double && !map->IsUnboxedDoubleField(field_index)) {
- // TODO(turbofan): support for out-of-line (MutableHeapNumber) loads.
- return false;
- }
- access->offset = field_index.offset();
- return true;
- }
-
- // TODO(turbofan): handle out of object properties.
- return false;
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamed(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadNamed);
- if (mode() != kDeoptimizationEnabled) return NoChange();
- Node* frame_state_before = GetFrameStateBefore(node);
- if (frame_state_before == nullptr) return NoChange();
-
- const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
- SmallMapList maps;
-
- FeedbackVectorICSlot slot = js_type_feedback_->FindFeedbackVectorICSlot(node);
- if (slot.IsInvalid() ||
- oracle()->LoadInlineCacheState(slot) == UNINITIALIZED) {
- // No type feedback ids or the load is uninitialized.
- return NoChange();
- }
- oracle()->PropertyReceiverTypes(slot, p.name(), &maps);
-
- Node* receiver = node->InputAt(0);
- Node* effect = NodeProperties::GetEffectInput(node);
-
- if (maps.length() != 1) return NoChange(); // TODO(turbofan): polymorphism
- if (!ENABLE_FAST_PROPERTY_LOADS) return NoChange();
-
- Handle<Map> map = maps.first();
- FieldAccess field_access;
- if (!GetInObjectFieldAccess(LOAD, map, p.name(), &field_access)) {
- return NoChange();
- }
-
- Node* control = NodeProperties::GetControlInput(node);
- Node* check_success;
- Node* check_failed;
- BuildMapCheck(receiver, map, true, effect, control, &check_success,
- &check_failed);
-
- // Build the actual load.
- Node* load = graph()->NewNode(simplified()->LoadField(field_access), receiver,
- effect, check_success);
-
- // TODO(turbofan): handle slow case instead of deoptimizing.
- Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state_before,
- effect, check_failed);
- NodeProperties::MergeControlToEnd(graph(), common(), deopt);
- ReplaceWithValue(node, load, load, check_success);
- return Replace(load);
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSLoadGlobal(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadGlobal);
- Handle<String> name =
- Handle<String>::cast(LoadGlobalParametersOf(node->op()).name());
- // Try to optimize loads from the global object.
- Handle<Object> constant_value =
- jsgraph()->isolate()->factory()->GlobalConstantFor(name);
- if (!constant_value.is_null()) {
- // Always optimize global constants.
- Node* constant = jsgraph()->Constant(constant_value);
- ReplaceWithValue(node, constant);
- return Replace(constant);
- }
-
- if (global_object_.is_null()) {
- // Nothing else can be done if we don't have a global object.
- return NoChange();
- }
-
- if (mode() == kDeoptimizationEnabled) {
- // Handle lookups in the script context.
- {
- Handle<ScriptContextTable> script_contexts(
- global_object_->native_context()->script_context_table());
- ScriptContextTable::LookupResult lookup;
- if (ScriptContextTable::Lookup(script_contexts, name, &lookup)) {
- // TODO(turbofan): introduce a LoadContext here.
- return NoChange();
- }
- }
-
- // Constant promotion or cell access requires lazy deoptimization support.
- LookupIterator it(global_object_, name, LookupIterator::OWN);
-
- if (it.state() == LookupIterator::DATA) {
- Handle<PropertyCell> cell = it.GetPropertyCell();
- dependencies_->AssumePropertyCell(cell);
-
- if (it.property_details().cell_type() == PropertyCellType::kConstant) {
- // Constant promote the global's current value.
- Handle<Object> constant_value(cell->value(), jsgraph()->isolate());
- if (constant_value->IsConsString()) {
- constant_value =
- String::Flatten(Handle<String>::cast(constant_value));
- }
- Node* constant = jsgraph()->Constant(constant_value);
- ReplaceWithValue(node, constant);
- return Replace(constant);
- } else {
- // Load directly from the property cell.
- FieldAccess access = AccessBuilder::ForPropertyCellValue();
- Node* control = NodeProperties::GetControlInput(node);
- Node* load_field = graph()->NewNode(
- simplified()->LoadField(access), jsgraph()->Constant(cell),
- NodeProperties::GetEffectInput(node), control);
- ReplaceWithValue(node, load_field, load_field, control);
- return Replace(load_field);
- }
- }
- } else {
- // TODO(turbofan): non-configurable properties on the global object
- // should be loadable through a cell without deoptimization support.
- }
-
- return NoChange();
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSLoadProperty(Node* node) {
- return NoChange();
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSStoreNamed(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kJSStoreNamed);
- Node* frame_state_before = GetFrameStateBefore(node);
- if (frame_state_before == nullptr) return NoChange();
-
- const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
- SmallMapList maps;
- TypeFeedbackId id = js_type_feedback_->FindTypeFeedbackId(node);
- if (id.IsNone() || oracle()->StoreIsUninitialized(id) == UNINITIALIZED) {
- // No type feedback ids or the store is uninitialized.
- // TODO(titzer): no feedback from vector ICs from stores.
- return NoChange();
- } else {
- oracle()->AssignmentReceiverTypes(id, p.name(), &maps);
- }
-
- Node* receiver = node->InputAt(0);
- Node* effect = NodeProperties::GetEffectInput(node);
-
- if (maps.length() != 1) return NoChange(); // TODO(turbofan): polymorphism
-
- if (!ENABLE_FAST_PROPERTY_STORES) return NoChange();
-
- Handle<Map> map = maps.first();
- FieldAccess field_access;
- if (!GetInObjectFieldAccess(STORE, map, p.name(), &field_access)) {
- return NoChange();
- }
-
- Node* control = NodeProperties::GetControlInput(node);
- Node* check_success;
- Node* check_failed;
- BuildMapCheck(receiver, map, true, effect, control, &check_success,
- &check_failed);
-
- // Build the actual load.
- Node* value = node->InputAt(1);
- Node* store = graph()->NewNode(simplified()->StoreField(field_access),
- receiver, value, effect, check_success);
-
- // TODO(turbofan): handle slow case instead of deoptimizing.
- Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state_before,
- effect, check_failed);
- NodeProperties::MergeControlToEnd(graph(), common(), deopt);
- ReplaceWithValue(node, store, store, check_success);
- return Replace(store);
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSStoreProperty(Node* node) {
- return NoChange();
-}
-
-
-void JSTypeFeedbackSpecializer::BuildMapCheck(Node* receiver, Handle<Map> map,
- bool smi_check, Node* effect,
- Node* control, Node** success,
- Node** fail) {
- Node* if_smi = nullptr;
- if (smi_check) {
- Node* branch_smi = graph()->NewNode(
- common()->Branch(BranchHint::kFalse),
- graph()->NewNode(simplified()->ObjectIsSmi(), receiver), control);
- if_smi = graph()->NewNode(common()->IfTrue(), branch_smi);
- control = graph()->NewNode(common()->IfFalse(), branch_smi);
- }
-
- FieldAccess map_access = AccessBuilder::ForMap();
- Node* receiver_map = graph()->NewNode(simplified()->LoadField(map_access),
- receiver, effect, control);
- Node* map_const = jsgraph_->Constant(map);
- Node* cmp = graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
- receiver_map, map_const);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), cmp, control);
- *success = graph()->NewNode(common()->IfTrue(), branch);
- *fail = graph()->NewNode(common()->IfFalse(), branch);
-
- if (if_smi) {
- *fail = graph()->NewNode(common()->Merge(2), *fail, if_smi);
- }
-}
-
-
-// Get the frame state before an operation if it exists and has a valid
-// bailout id.
-Node* JSTypeFeedbackSpecializer::GetFrameStateBefore(Node* node) {
- int count = OperatorProperties::GetFrameStateInputCount(node->op());
- DCHECK_LE(count, 2);
- if (count == 2) {
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- if (frame_state->opcode() == IrOpcode::kFrameState) {
- BailoutId id = OpParameter<FrameStateInfo>(node).bailout_id();
- if (id != BailoutId::None()) return frame_state;
- }
- }
- return nullptr;
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/js-type-feedback.h b/deps/v8/src/compiler/js-type-feedback.h
deleted file mode 100644
index 84060f8096..0000000000
--- a/deps/v8/src/compiler/js-type-feedback.h
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_TYPE_FEEDBACK_H_
-#define V8_COMPILER_JS_TYPE_FEEDBACK_H_
-
-#include "src/utils.h"
-
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-aux-data.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-
-class TypeFeedbackOracle;
-class SmallMapList;
-class CompilationDependencies;
-
-namespace compiler {
-
-// Stores type feedback information for nodes in the graph in a separate
-// data structure.
-class JSTypeFeedbackTable : public ZoneObject {
- public:
- explicit JSTypeFeedbackTable(Zone* zone);
-
- void Record(Node* node, TypeFeedbackId id);
- void Record(Node* node, FeedbackVectorICSlot slot);
-
- private:
- friend class JSTypeFeedbackSpecializer;
- typedef std::map<NodeId, TypeFeedbackId, std::less<NodeId>,
- zone_allocator<TypeFeedbackId> > TypeFeedbackIdMap;
- typedef std::map<NodeId, FeedbackVectorICSlot, std::less<NodeId>,
- zone_allocator<FeedbackVectorICSlot> >
- FeedbackVectorICSlotMap;
-
- TypeFeedbackIdMap type_feedback_id_map_;
- FeedbackVectorICSlotMap feedback_vector_ic_slot_map_;
-
- TypeFeedbackId FindTypeFeedbackId(Node* node) {
- TypeFeedbackIdMap::const_iterator it =
- type_feedback_id_map_.find(node->id());
- return it == type_feedback_id_map_.end() ? TypeFeedbackId::None()
- : it->second;
- }
-
- FeedbackVectorICSlot FindFeedbackVectorICSlot(Node* node) {
- FeedbackVectorICSlotMap::const_iterator it =
- feedback_vector_ic_slot_map_.find(node->id());
- return it == feedback_vector_ic_slot_map_.end()
- ? FeedbackVectorICSlot::Invalid()
- : it->second;
- }
-};
-
-
-// Specializes a graph to the type feedback recorded in the
-// {js_type_feedback} provided to the constructor.
-class JSTypeFeedbackSpecializer : public AdvancedReducer {
- public:
- enum DeoptimizationMode { kDeoptimizationEnabled, kDeoptimizationDisabled };
-
- JSTypeFeedbackSpecializer(Editor* editor, JSGraph* jsgraph,
- JSTypeFeedbackTable* js_type_feedback,
- TypeFeedbackOracle* oracle,
- Handle<GlobalObject> global_object,
- DeoptimizationMode mode,
- CompilationDependencies* dependencies)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- simplified_(jsgraph->graph()->zone()),
- js_type_feedback_(js_type_feedback),
- oracle_(oracle),
- global_object_(global_object),
- mode_(mode),
- dependencies_(dependencies) {
- CHECK_NOT_NULL(js_type_feedback);
- }
-
- Reduction Reduce(Node* node) override;
-
- // Visible for unit testing.
- Reduction ReduceJSLoadGlobal(Node* node);
- Reduction ReduceJSLoadNamed(Node* node);
- Reduction ReduceJSLoadProperty(Node* node);
- Reduction ReduceJSStoreNamed(Node* node);
- Reduction ReduceJSStoreProperty(Node* node);
-
- private:
- JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
- JSTypeFeedbackTable* js_type_feedback_;
- TypeFeedbackOracle* oracle_;
- Handle<GlobalObject> global_object_;
- DeoptimizationMode const mode_;
- CompilationDependencies* dependencies_;
-
- TypeFeedbackOracle* oracle() { return oracle_; }
- Graph* graph() { return jsgraph_->graph(); }
- JSGraph* jsgraph() { return jsgraph_; }
- CommonOperatorBuilder* common() { return jsgraph_->common(); }
- DeoptimizationMode mode() const { return mode_; }
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- void BuildMapCheck(Node* receiver, Handle<Map> map, bool smi_check,
- Node* effect, Node* control, Node** success, Node** fail);
-
- Node* GetFrameStateBefore(Node* node);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 7c25afcfaf..f221577104 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-typed-lowering.h"
@@ -10,6 +11,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/state-values-utils.h"
#include "src/types.h"
namespace v8 {
@@ -21,8 +23,13 @@ namespace compiler {
// - relax effects from generic but not-side-effecting operations
-JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph, Zone* zone)
- : AdvancedReducer(editor), jsgraph_(jsgraph), simplified_(graph()->zone()) {
+JSTypedLowering::JSTypedLowering(Editor* editor,
+ CompilationDependencies* dependencies,
+ Flags flags, JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ dependencies_(dependencies),
+ flags_(flags),
+ jsgraph_(jsgraph) {
for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
double min = kMinInt / (1 << k);
double max = kMaxInt / (1 << k);
@@ -36,16 +43,15 @@ JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph, Zone* zone)
// allocated object and also provides helpers for commonly allocated objects.
class AllocationBuilder final {
public:
- AllocationBuilder(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
- Node* effect, Node* control)
+ AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
: jsgraph_(jsgraph),
- simplified_(simplified),
allocation_(nullptr),
effect_(effect),
control_(control) {}
// Primitive allocation of static size.
void Allocate(int size) {
+ effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
allocation_ = graph()->NewNode(
simplified()->Allocate(), jsgraph()->Constant(size), effect_, control_);
effect_ = allocation_;
@@ -61,8 +67,7 @@ class AllocationBuilder final {
void AllocateArray(int length, Handle<Map> map) {
Allocate(FixedArray::SizeFor(length));
Store(AccessBuilder::ForMap(), map);
- Store(AccessBuilder::ForFixedArrayLength(graph()->zone()),
- jsgraph()->Constant(length));
+ Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
}
// Compound store of a constant into a field.
@@ -70,17 +75,26 @@ class AllocationBuilder final {
Store(access, jsgraph()->Constant(value));
}
- Node* allocation() const { return allocation_; }
- Node* effect() const { return effect_; }
+ void FinishAndChange(Node* node) {
+ NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
+ node->ReplaceInput(0, allocation_);
+ node->ReplaceInput(1, effect_);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, common()->FinishRegion());
+ }
+
+ Node* Finish() {
+ return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
+ }
protected:
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
- SimplifiedOperatorBuilder* simplified() { return simplified_; }
+ CommonOperatorBuilder* common() { return jsgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
private:
JSGraph* const jsgraph_;
- SimplifiedOperatorBuilder* simplified_;
Node* allocation_;
Node* effect_;
Node* control_;
@@ -135,11 +149,6 @@ class JSBinopReduction final {
node_->ReplaceInput(1, ConvertToUI32(right(), right_signedness));
}
- void ConvertInputsToString() {
- node_->ReplaceInput(0, ConvertToString(left()));
- node_->ReplaceInput(1, ConvertToString(right()));
- }
-
void SwapInputs() {
Node* l = left();
Node* r = right();
@@ -213,11 +222,13 @@ class JSBinopReduction final {
bool IsStrong() { return is_strong(OpParameter<LanguageMode>(node_)); }
- bool OneInputIs(Type* t) { return left_type()->Is(t) || right_type()->Is(t); }
+ bool LeftInputIs(Type* t) { return left_type()->Is(t); }
- bool BothInputsAre(Type* t) {
- return left_type()->Is(t) && right_type()->Is(t);
- }
+ bool RightInputIs(Type* t) { return right_type()->Is(t); }
+
+ bool OneInputIs(Type* t) { return LeftInputIs(t) || RightInputIs(t); }
+
+ bool BothInputsAre(Type* t) { return LeftInputIs(t) && RightInputIs(t); }
bool OneInputCannotBe(Type* t) {
return !left_type()->Maybe(t) || !right_type()->Maybe(t);
@@ -247,16 +258,6 @@ class JSBinopReduction final {
JSTypedLowering* lowering_; // The containing lowering instance.
Node* node_; // The original node.
- Node* ConvertToString(Node* node) {
- // Avoid introducing too many eager ToString() operations.
- Reduction reduced = lowering_->ReduceJSToStringInput(node);
- if (reduced.Changed()) return reduced.replacement();
- Node* n = graph()->NewNode(javascript()->ToString(), node, context(),
- effect(), control());
- update_effect(n);
- return n;
- }
-
Node* CreateFrameStateForLeftInput(Node* frame_state) {
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
@@ -587,13 +588,33 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
invert);
}
+ if (r.BothInputsAre(Type::Boolean())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
+ invert);
+ }
if (r.BothInputsAre(Type::Receiver())) {
return r.ChangeToPureOperator(
simplified()->ReferenceEqual(Type::Receiver()), invert);
}
- // TODO(turbofan): js-typed-lowering of Equal(undefined)
- // TODO(turbofan): js-typed-lowering of Equal(null)
- // TODO(turbofan): js-typed-lowering of Equal(boolean)
+ if (r.OneInputIs(Type::NullOrUndefined())) {
+ Callable const callable = CodeFactory::CompareNilIC(isolate(), kNullValue);
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ node->RemoveInput(r.LeftInputIs(Type::NullOrUndefined()) ? 0 : 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ if (invert) {
+ // Insert an boolean not to invert the value.
+ Node* value = graph()->NewNode(simplified()->BooleanNot(), node);
+ node->ReplaceUses(value);
+ // Note: ReplaceUses() smashes all uses, so smash it back here.
+ value->ReplaceInput(0, node);
+ return Replace(value);
+ }
+ return Changed(node);
+ }
return NoChange();
}
@@ -671,7 +692,7 @@ Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
return Changed(node);
} else if (input_type->Is(Type::String())) {
// JSUnaryNot(x:string) => NumberEqual(x.length,#0)
- FieldAccess const access = AccessBuilder::ForStringLength(graph()->zone());
+ FieldAccess const access = AccessBuilder::ForStringLength();
// It is safe for the load to be effect-free (i.e. not linked into effect
// chain) because we assume String::length to be immutable.
Node* length = graph()->NewNode(simplified()->LoadField(access), input,
@@ -690,9 +711,10 @@ Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
Node* const input = node->InputAt(0);
Type* const input_type = NodeProperties::GetType(input);
+ Node* const effect = NodeProperties::GetEffectInput(node);
if (input_type->Is(Type::Boolean())) {
// JSToBoolean(x:boolean) => x
- ReplaceWithValue(node, input);
+ ReplaceWithValue(node, input, effect);
return Replace(input);
} else if (input_type->Is(Type::OrderedNumber())) {
// JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
@@ -704,11 +726,9 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
return Changed(node);
} else if (input_type->Is(Type::String())) {
// JSToBoolean(x:string) => NumberLessThan(#0,x.length)
- FieldAccess const access = AccessBuilder::ForStringLength(graph()->zone());
- // It is safe for the load to be effect-free (i.e. not linked into effect
- // chain) because we assume String::length to be immutable.
+ FieldAccess const access = AccessBuilder::ForStringLength();
Node* length = graph()->NewNode(simplified()->LoadField(access), input,
- graph()->start(), graph()->start());
+ effect, graph()->start());
ReplaceWithValue(node, node, length);
node->ReplaceInput(0, jsgraph()->ZeroConstant());
node->ReplaceInput(1, length);
@@ -791,13 +811,18 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
if (input_type->Is(Type::String())) {
return Changed(input); // JSToString(x:string) => x
}
+ if (input_type->Is(Type::Boolean())) {
+ return Replace(
+ graph()->NewNode(common()->Select(kMachAnyTagged), input,
+ jsgraph()->HeapConstant(factory()->true_string()),
+ jsgraph()->HeapConstant(factory()->false_string())));
+ }
if (input_type->Is(Type::Undefined())) {
return Replace(jsgraph()->HeapConstant(factory()->undefined_string()));
}
if (input_type->Is(Type::Null())) {
return Replace(jsgraph()->HeapConstant(factory()->null_string()));
}
- // TODO(turbofan): js-typed-lowering of ToString(x:boolean)
// TODO(turbofan): js-typed-lowering of ToString(x:number)
return NoChange();
}
@@ -815,16 +840,82 @@ Reduction JSTypedLowering::ReduceJSToString(Node* node) {
}
-Reduction JSTypedLowering::ReduceJSLoadGlobal(Node* node) {
- // Optimize global constants like "undefined", "Infinity", and "NaN".
- Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
- Handle<Object> constant_value = factory()->GlobalConstantFor(name);
- if (!constant_value.is_null()) {
- Node* constant = jsgraph()->Constant(constant_value);
- ReplaceWithValue(node, constant);
- return Replace(constant);
+Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!receiver_type->Is(Type::Receiver())) {
+ // TODO(bmeurer/mstarzinger): Add support for lowering inside try blocks.
+ if (receiver_type->Maybe(Type::NullOrUndefined()) &&
+ NodeProperties::IsExceptionalCall(node)) {
+ // ToObject throws for null or undefined inputs.
+ return NoChange();
+ }
+
+ // Check whether {receiver} is a Smi.
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+
+ // Determine the instance type of {receiver}.
+ Node* receiver_map = efalse0 =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, efalse0, if_false0);
+ Node* receiver_instance_type = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ receiver_map, efalse0, if_false0);
+
+ // Check whether {receiver} is a spec object.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Node* check1 =
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+ receiver_instance_type);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+
+ // Convert {receiver} using the ToObjectStub.
+ Node* if_convert =
+ graph()->NewNode(common()->Merge(2), if_true0, if_false1);
+ Node* econvert =
+ graph()->NewNode(common()->EffectPhi(2), etrue0, efalse1, if_convert);
+ Node* rconvert;
+ {
+ Callable callable = CodeFactory::ToObject(isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ rconvert = econvert = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ receiver, context, frame_state, econvert, if_convert);
+ }
+
+ // The {receiver} is already a spec object.
+ Node* if_done = if_true1;
+ Node* edone = etrue1;
+ Node* rdone = receiver;
+
+ control = graph()->NewNode(common()->Merge(2), if_convert, if_done);
+ effect = graph()->NewNode(common()->EffectPhi(2), econvert, edone, control);
+ receiver = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), rconvert,
+ rdone, control);
}
- return NoChange();
+ ReplaceWithValue(node, receiver, effect, control);
+ return Changed(receiver);
}
@@ -834,14 +925,13 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
Type* receiver_type = NodeProperties::GetType(receiver);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Handle<Name> name = LoadNamedParametersOf(node->op()).name();
+ Handle<Name> name = NamedAccessOf(node->op()).name();
// Optimize "length" property of strings.
if (name.is_identical_to(factory()->length_string()) &&
receiver_type->Is(Type::String())) {
- Node* value = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForStringLength(graph()->zone())),
- receiver, effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+ effect, control);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
@@ -975,6 +1065,117 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
+
+ // If deoptimization is disabled, we cannot optimize.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ JSBinopReduction r(this, node);
+ Node* effect = r.effect();
+ Node* control = r.control();
+
+ if (r.right_type()->IsConstant() &&
+ r.right_type()->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+ if (!function->map()->has_non_instance_prototype()) {
+ JSFunction::EnsureHasInitialMap(function);
+ DCHECK(function->has_initial_map());
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ this->dependencies()->AssumeInitialMapCantChange(initial_map);
+ Node* prototype =
+ jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+
+ Node* if_is_smi = nullptr;
+ Node* e_is_smi = nullptr;
+ // If the left hand side is an object, no smi check is needed.
+ if (r.left_type()->Maybe(Type::TaggedSigned())) {
+ Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
+ Node* branch_is_smi = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), is_smi, control);
+ if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
+ e_is_smi = effect;
+ control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
+ }
+
+ Node* object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ r.left(), effect, control);
+
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ Node* loop = control =
+ graph()->NewNode(common()->Loop(2), control, control);
+
+ Node* loop_effect = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+
+ Node* loop_object_map = graph()->NewNode(common()->Phi(kMachAnyTagged, 2),
+ object_map, r.left(), loop);
+
+
+ Node* object_prototype = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()),
+ loop_object_map, loop_effect, control);
+
+ // Check if object prototype is equal to function prototype.
+ Node* eq_proto =
+ graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+ object_prototype, prototype);
+ Node* branch_eq_proto = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), eq_proto, control);
+ Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
+ Node* e_eq_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
+
+ // If not, check if object prototype is the null prototype.
+ Node* null_proto =
+ graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+ object_prototype, jsgraph()->NullConstant());
+ Node* branch_null_proto = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), null_proto, control);
+ Node* if_null_proto =
+ graph()->NewNode(common()->IfTrue(), branch_null_proto);
+ Node* e_null_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
+ Node* load_object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ object_prototype, effect, control);
+ // Close the loop.
+ loop_effect->ReplaceInput(1, effect);
+ loop_object_map->ReplaceInput(1, load_object_map);
+ loop->ReplaceInput(1, control);
+
+ control =
+ graph()->NewNode(common()->Merge(2), if_eq_proto, if_null_proto);
+ effect = graph()->NewNode(common()->EffectPhi(2), e_eq_proto,
+ e_null_proto, control);
+
+
+ Node* result = graph()->NewNode(common()->Phi(kTypeBool, 2),
+ jsgraph()->TrueConstant(),
+ jsgraph()->FalseConstant(), control);
+
+ if (if_is_smi != nullptr) {
+ DCHECK(e_is_smi != nullptr);
+ control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
+ result = graph()->NewNode(common()->Phi(kTypeBool, 2),
+ jsgraph()->FalseConstant(), result, control);
+ }
+ ReplaceWithValue(node, result, effect, control);
+ return Changed(result);
+ }
+ }
+
+ return NoChange();
+}
+
+
Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
@@ -1016,124 +1217,116 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
}
-Reduction JSTypedLowering::ReduceJSLoadDynamicGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadDynamicGlobal, node->opcode());
- DynamicGlobalAccess const& access = DynamicGlobalAccessOf(node->op());
- Node* const vector = NodeProperties::GetValueInput(node, 0);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const state1 = NodeProperties::GetFrameStateInput(node, 0);
- Node* const state2 = NodeProperties::GetFrameStateInput(node, 1);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- if (access.RequiresFullCheck()) return NoChange();
-
- // Perform checks whether the fast mode applies, by looking for any extension
- // object which might shadow the optimistic declaration.
- uint32_t bitset = access.check_bitset();
- Node* check_true = control;
- Node* check_false = graph()->NewNode(common()->Merge(0));
- for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
- if ((bitset & 1) == 0) continue;
- Node* load = graph()->NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- context, context, effect);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()),
- load, jsgraph()->ZeroConstant());
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
- check_true);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- check_false->AppendInput(graph()->zone(), if_false);
- NodeProperties::ChangeOp(check_false,
- common()->Merge(check_false->InputCount()));
- check_true = if_true;
- }
-
- // Fast case, because variable is not shadowed. Perform global object load.
- Node* global = graph()->NewNode(
- javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true), context,
- context, effect);
- Node* fast = graph()->NewNode(
- javascript()->LoadGlobal(access.name(), access.feedback(),
- access.typeof_mode()),
- context, global, vector, context, state1, state2, global, check_true);
-
- // Slow case, because variable potentially shadowed. Perform dynamic lookup.
- uint32_t check_bitset = DynamicGlobalAccess::kFullCheckRequired;
- Node* slow = graph()->NewNode(
- javascript()->LoadDynamicGlobal(access.name(), check_bitset,
- access.feedback(), access.typeof_mode()),
- vector, context, context, state1, state2, effect, check_false);
-
- // Replace value, effect and control uses accordingly.
- Node* new_control =
- graph()->NewNode(common()->Merge(2), check_true, check_false);
- Node* new_effect =
- graph()->NewNode(common()->EffectPhi(2), fast, slow, new_control);
- Node* new_value = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fast,
- slow, new_control);
- ReplaceWithValue(node, new_value, new_effect, new_control);
- return Changed(new_value);
+Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConvertReceiver, node->opcode());
+ ConvertReceiverMode mode = ConvertReceiverModeOf(node->op());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* context = NodeProperties::GetContextInput(node);
+ Type* context_type = NodeProperties::GetType(context);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!receiver_type->Is(Type::Receiver())) {
+ if (receiver_type->Is(Type::NullOrUndefined()) ||
+ mode == ConvertReceiverMode::kNullOrUndefined) {
+ if (context_type->IsConstant()) {
+ Handle<JSObject> global_proxy(
+ Handle<Context>::cast(context_type->AsConstant()->Value())
+ ->global_proxy(),
+ isolate());
+ receiver = jsgraph()->Constant(global_proxy);
+ } else {
+ Node* global_object = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true),
+ context, context, effect);
+ receiver = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSGlobalObjectGlobalProxy()),
+ global_object, effect, control);
+ }
+ } else if (!receiver_type->Maybe(Type::NullOrUndefined()) ||
+ mode == ConvertReceiverMode::kNotNullOrUndefined) {
+ receiver = effect =
+ graph()->NewNode(javascript()->ToObject(), receiver, context,
+ frame_state, effect, control);
+ } else {
+ // Check {receiver} for undefined.
+ Node* check0 =
+ graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
+ receiver, jsgraph()->UndefinedConstant());
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check0, control);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+
+ // Check {receiver} for null.
+ Node* check1 =
+ graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
+ receiver, jsgraph()->NullConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+
+ // Convert {receiver} using ToObject.
+ Node* if_convert = if_false1;
+ Node* econvert = effect;
+ Node* rconvert;
+ {
+ rconvert = econvert =
+ graph()->NewNode(javascript()->ToObject(), receiver, context,
+ frame_state, econvert, if_convert);
+ }
+
+ // Replace {receiver} with global proxy of {context}.
+ Node* if_global =
+ graph()->NewNode(common()->Merge(2), if_true0, if_true1);
+ Node* eglobal = effect;
+ Node* rglobal;
+ {
+ if (context_type->IsConstant()) {
+ Handle<JSObject> global_proxy(
+ Handle<Context>::cast(context_type->AsConstant()->Value())
+ ->global_proxy(),
+ isolate());
+ rglobal = jsgraph()->Constant(global_proxy);
+ } else {
+ Node* global_object = eglobal = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true),
+ context, context, eglobal);
+ rglobal = eglobal = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSGlobalObjectGlobalProxy()),
+ global_object, eglobal, if_global);
+ }
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_convert, if_global);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), econvert, eglobal, control);
+ receiver = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), rconvert,
+ rglobal, control);
+ }
+ }
+ ReplaceWithValue(node, receiver, effect, control);
+ return Changed(receiver);
}
-Reduction JSTypedLowering::ReduceJSLoadDynamicContext(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadDynamicContext, node->opcode());
- DynamicContextAccess const& access = DynamicContextAccessOf(node->op());
- ContextAccess const& context_access = access.context_access();
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const state = NodeProperties::GetFrameStateInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- if (access.RequiresFullCheck()) return NoChange();
-
- // Perform checks whether the fast mode applies, by looking for any extension
- // object which might shadow the optimistic declaration.
- uint32_t bitset = access.check_bitset();
- Node* check_true = control;
- Node* check_false = graph()->NewNode(common()->Merge(0));
- for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
- if ((bitset & 1) == 0) continue;
- Node* load = graph()->NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- context, context, effect);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()),
- load, jsgraph()->ZeroConstant());
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
- check_true);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- check_false->AppendInput(graph()->zone(), if_false);
- NodeProperties::ChangeOp(check_false,
- common()->Merge(check_false->InputCount()));
- check_true = if_true;
- }
-
- // Fast case, because variable is not shadowed. Perform context slot load.
- Node* fast =
- graph()->NewNode(javascript()->LoadContext(context_access.depth(),
- context_access.index(), false),
- context, context, effect);
-
- // Slow case, because variable potentially shadowed. Perform dynamic lookup.
- uint32_t check_bitset = DynamicContextAccess::kFullCheckRequired;
- Node* slow =
- graph()->NewNode(javascript()->LoadDynamicContext(
- access.name(), check_bitset, context_access.depth(),
- context_access.index()),
- context, context, state, effect, check_false);
-
- // Replace value, effect and control uses accordingly.
- Node* new_control =
- graph()->NewNode(common()->Merge(2), check_true, check_false);
- Node* new_effect =
- graph()->NewNode(common()->EffectPhi(2), fast, slow, new_control);
- Node* new_value = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fast,
- slow, new_control);
- ReplaceWithValue(node, new_value, new_effect, new_control);
- return Changed(new_value);
+namespace {
+
+// Retrieves the frame state holding actual argument values.
+Node* GetArgumentsFrameState(Node* frame_state) {
+ Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
+ return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
+ ? outer_state
+ : frame_state;
}
+} // namespace
+
Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
@@ -1170,6 +1363,97 @@ Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
return Changed(node);
}
+ // Use inline allocation for all mapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ if (p.type() == CreateArgumentsParameters::kMappedArguments &&
+ outer_state->opcode() == IrOpcode::kFrameState) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ Node* const callee = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ if (shared->has_duplicate_parameters()) return NoChange();
+ // Choose the correct frame state and frame state info depending on whether
+ // there conceptually is an arguments adaptor frame in the call chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ bool has_aliased_arguments = false;
+ Node* const elements = AllocateAliasedArguments(
+ effect, control, args_state, context, shared, &has_aliased_arguments);
+ // Load the arguments object map from the current native context.
+ Node* const load_global_object = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
+ context, effect, control);
+ Node* const load_native_context =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSGlobalObjectNativeContext()),
+ load_global_object, effect, control);
+ Node* const load_arguments_map = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
+ : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(Heap::kSloppyArgumentsObjectSize == 5 * kPointerSize);
+ a.Allocate(Heap::kSloppyArgumentsObjectSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ // Use inline allocation for all unmapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ if (p.type() == CreateArgumentsParameters::kUnmappedArguments &&
+ outer_state->opcode() == IrOpcode::kFrameState) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ // Choose the correct frame state and frame state info depending on whether
+ // there conceptually is an arguments adaptor frame in the call chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ Node* const elements = AllocateArguments(effect, control, args_state);
+ // Load the arguments object map from the current native context.
+ Node* const load_global_object = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
+ context, effect, control);
+ Node* const load_native_context =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSGlobalObjectNativeContext()),
+ load_global_object, effect, control);
+ Node* const load_arguments_map = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(Heap::kStrictArgumentsObjectSize == 4 * kPointerSize);
+ a.Allocate(Heap::kStrictArgumentsObjectSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
return NoChange();
}
@@ -1211,7 +1495,7 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
// TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
if ((flags & ArrayLiteral::kShallowElements) != 0 &&
(flags & ArrayLiteral::kIsStrong) == 0 &&
- length < JSObject::kInitialMaxFastElementArray) {
+ length < JSArray::kInitialMaxFastElementArray) {
Isolate* isolate = jsgraph()->isolate();
Callable callable = CodeFactory::FastCloneShallowArray(isolate);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1260,36 +1544,94 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSCreateFunctionContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
+ int slot_count = OpParameter<int>(node->op());
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+ // The closure can be NumberConstant(0) if the closure is global code
+ // (rather than a function). We exclude that case here.
+ // TODO(jarin) Find a better way to check that the closure is a function.
+
+ // Use inline allocation for function contexts up to a size limit.
+ if (slot_count < kFunctionContextAllocationLimit &&
+ closure->opcode() != IrOpcode::kNumberConstant) {
+ // JSCreateFunctionContext[slot_count < limit]](fun)
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const extension = jsgraph()->ZeroConstant();
+ Node* const load = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
+ context, effect, control);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
+ a.AllocateArray(context_length, factory()->function_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+ a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+ a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ // Use the FastNewContextStub only for function contexts up maximum size.
+ if (slot_count <= FastNewContextStub::kMaximumSlots) {
+ Isolate* isolate = jsgraph()->isolate();
+ Callable callable = CodeFactory::FastNewContext(isolate, slot_count);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
Node* const input = NodeProperties::GetValueInput(node, 0);
+ Node* const closure = NodeProperties::GetValueInput(node, 1);
Type* input_type = NodeProperties::GetType(input);
- if (FLAG_turbo_allocate && input_type->Is(Type::Receiver())) {
- // JSCreateWithContext(o:receiver, f)
+
+ // The closure can be NumberConstant(0) if the closure is global code
+ // (rather than a function). We exclude that case here.
+ // TODO(jarin) Find a better way to check that the closure is a function.
+
+ // Use inline allocation for with contexts for regular objects.
+ if (input_type->Is(Type::Receiver()) &&
+ closure->opcode() != IrOpcode::kNumberConstant) {
+ // JSCreateWithContext(o:receiver, fun)
Node* const effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
- Node* const closure = NodeProperties::GetValueInput(node, 1);
Node* const context = NodeProperties::GetContextInput(node);
Node* const load = graph()->NewNode(
simplified()->LoadField(
AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
context, effect, control);
- AllocationBuilder a(jsgraph(), simplified(), effect, control);
+ AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), input);
a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
- // TODO(mstarzinger): We could mutate {node} into the allocation instead.
- NodeProperties::SetType(a.allocation(), NodeProperties::GetType(node));
- ReplaceWithValue(node, node, a.effect());
- node->ReplaceInput(0, a.allocation());
- node->ReplaceInput(1, a.effect());
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, common()->Finish(1));
+ RelaxControls(node);
+ a.FinishAndChange(node);
return Changed(node);
}
+
return NoChange();
}
@@ -1298,18 +1640,25 @@ Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
int context_length = scope_info->ContextLength();
- if (FLAG_turbo_allocate && context_length < kBlockContextAllocationLimit) {
- // JSCreateBlockContext(s:scope[length < limit], f)
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+ // The closure can be NumberConstant(0) if the closure is global code
+ // (rather than a function). We exclude that case here.
+ // TODO(jarin) Find a better way to check that the closure is a function.
+
+ // Use inline allocation for block contexts up to a size limit.
+ if (context_length < kBlockContextAllocationLimit &&
+ closure->opcode() != IrOpcode::kNumberConstant) {
+ // JSCreateBlockContext[scope[length < limit]](fun)
Node* const effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
- Node* const closure = NodeProperties::GetValueInput(node, 1);
Node* const context = NodeProperties::GetContextInput(node);
Node* const extension = jsgraph()->Constant(scope_info);
Node* const load = graph()->NewNode(
simplified()->LoadField(
AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
context, effect, control);
- AllocationBuilder a(jsgraph(), simplified(), effect, control);
+ AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
a.AllocateArray(context_length, factory()->block_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
@@ -1319,15 +1668,11 @@ Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->TheHoleConstant());
}
- // TODO(mstarzinger): We could mutate {node} into the allocation instead.
- NodeProperties::SetType(a.allocation(), NodeProperties::GetType(node));
- ReplaceWithValue(node, node, a.effect());
- node->ReplaceInput(0, a.allocation());
- node->ReplaceInput(1, a.effect());
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, common()->Finish(1));
+ RelaxControls(node);
+ a.FinishAndChange(node);
return Changed(node);
}
+
return NoChange();
}
@@ -1336,32 +1681,114 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
int const arity = static_cast<int>(p.arity() - 2);
- Node* const function = NodeProperties::GetValueInput(node, 0);
- Type* const function_type = NodeProperties::GetType(function);
- Node* const receiver = NodeProperties::GetValueInput(node, 1);
- Type* const receiver_type = NodeProperties::GetType(receiver);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
-
- // Check that {function} is actually a JSFunction with the correct arity.
- if (function_type->IsFunction() &&
- function_type->AsFunction()->Arity() == arity) {
- // Check that the {receiver} doesn't need to be wrapped.
- if (receiver_type->Is(Type::ReceiverOrUndefined())) {
- Node* const context = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSFunctionContext()),
- function, effect, control);
- NodeProperties::ReplaceContextInput(node, context);
- CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- if (is_strict(p.language_mode())) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
+ ConvertReceiverMode convert_mode = p.convert_mode();
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Type* target_type = NodeProperties::GetType(target);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to infer receiver {convert_mode} from {receiver} type.
+ if (receiver_type->Is(Type::NullOrUndefined())) {
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ } else if (!receiver_type->Maybe(Type::NullOrUndefined())) {
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
+ }
+
+ // Check if {target} is a known JSFunction.
+ if (target_type->IsConstant() &&
+ target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ // Class constructors are callable, but [[Call]] will raise an exception.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
+ if (IsClassConstructor(shared->kind())) return NoChange();
+
+ // Grab the context from the {function}.
+ Node* context = jsgraph()->Constant(handle(function->context(), isolate()));
+ NodeProperties::ReplaceContextInput(node, context);
+
+ // Check if we need to convert the {receiver}.
+ if (is_sloppy(shared->language_mode()) && !shared->native() &&
+ !receiver_type->Is(Type::Receiver())) {
+ receiver = effect =
+ graph()->NewNode(javascript()->ConvertReceiver(convert_mode),
+ receiver, context, frame_state, effect, control);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceValueInput(node, receiver, 1);
+ }
+
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Compute flags for the call.
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+
+ if (shared->internal_formal_parameter_count() == arity ||
+ shared->internal_formal_parameter_count() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Patch {node} to a direct call.
+ node->InsertInput(graph()->zone(), arity + 2,
+ jsgraph()->Int32Constant(arity));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + arity, flags)));
- return Changed(node);
+ } else {
+ // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
+ Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Int32Constant(arity));
+ node->InsertInput(
+ graph()->zone(), 3,
+ jsgraph()->Int32Constant(shared->internal_formal_parameter_count()));
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(),
+ 1 + arity, flags)));
}
+ return Changed(node);
}
+
+ // Check if {target} is a JSFunction.
+ if (target_type->Is(Type::Function())) {
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Compute flags for the call.
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+
+ // Patch {node} to an indirect call via the CallFunction builtin.
+ Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Int32Constant(arity));
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
+ flags)));
+ return Changed(node);
+ }
+
+ // Maybe we did at least learn something about the {receiver}.
+ if (p.convert_mode() != convert_mode) {
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->CallFunction(p.arity(), p.language_mode(), p.feedback(),
+ convert_mode, p.tail_call_mode()));
+ return Changed(node);
+ }
+
return NoChange();
}
@@ -1414,9 +1841,9 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
Node* cache_type_enum_length = etrue0 = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForMapBitField3()), cache_type,
effect, if_true0);
- cache_length_true0 =
- graph()->NewNode(machine()->Word32And(), cache_type_enum_length,
- jsgraph()->Uint32Constant(Map::EnumLengthBits::kMask));
+ cache_length_true0 = graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), cache_type_enum_length,
+ jsgraph()->Int32Constant(Map::EnumLengthBits::kMask));
Node* check1 =
graph()->NewNode(machine()->Word32Equal(), cache_length_true0,
@@ -1483,8 +1910,7 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
cache_array_false0 = cache_type;
cache_length_false0 = efalse0 = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedArrayLength(graph()->zone())),
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
cache_array_false0, efalse0, if_false0);
}
@@ -1681,11 +2107,11 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSGreaterThanOrEqual:
return ReduceJSComparison(node);
case IrOpcode::kJSBitwiseOr:
- return ReduceInt32Binop(node, machine()->Word32Or());
+ return ReduceInt32Binop(node, simplified()->NumberBitwiseOr());
case IrOpcode::kJSBitwiseXor:
- return ReduceInt32Binop(node, machine()->Word32Xor());
+ return ReduceInt32Binop(node, simplified()->NumberBitwiseXor());
case IrOpcode::kJSBitwiseAnd:
- return ReduceInt32Binop(node, machine()->Word32And());
+ return ReduceInt32Binop(node, simplified()->NumberBitwiseAnd());
case IrOpcode::kJSShiftLeft:
return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftLeft());
case IrOpcode::kJSShiftRight:
@@ -1711,22 +2137,22 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSToNumber(node);
case IrOpcode::kJSToString:
return ReduceJSToString(node);
- case IrOpcode::kJSLoadGlobal:
- return ReduceJSLoadGlobal(node);
+ case IrOpcode::kJSToObject:
+ return ReduceJSToObject(node);
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
case IrOpcode::kJSLoadProperty:
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
return ReduceJSStoreProperty(node);
+ case IrOpcode::kJSInstanceOf:
+ return ReduceJSInstanceOf(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSStoreContext:
return ReduceJSStoreContext(node);
- case IrOpcode::kJSLoadDynamicGlobal:
- return ReduceJSLoadDynamicGlobal(node);
- case IrOpcode::kJSLoadDynamicContext:
- return ReduceJSLoadDynamicContext(node);
+ case IrOpcode::kJSConvertReceiver:
+ return ReduceJSConvertReceiver(node);
case IrOpcode::kJSCreateArguments:
return ReduceJSCreateArguments(node);
case IrOpcode::kJSCreateClosure:
@@ -1735,6 +2161,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSCreateLiteralArray(node);
case IrOpcode::kJSCreateLiteralObject:
return ReduceJSCreateLiteralObject(node);
+ case IrOpcode::kJSCreateFunctionContext:
+ return ReduceJSCreateFunctionContext(node);
case IrOpcode::kJSCreateWithContext:
return ReduceJSCreateWithContext(node);
case IrOpcode::kJSCreateBlockContext:
@@ -1763,6 +2191,81 @@ Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
}
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSTypedLowering::AllocateArguments(Node* effect, Node* control,
+ Node* frame_state) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto paratemers_it = ++parameters_access.begin();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(argument_count, factory()->fixed_array_map());
+ for (int i = 0; i < argument_count; ++i, ++paratemers_it) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+ }
+ return a.Finish();
+}
+
+
+// Helper that allocates a FixedArray serving as a parameter map for values
+// recorded in the given {frame_state}. Some elements map to slots within the
+// given {context}. Serves as backing store for JSCreateArguments nodes.
+Node* JSTypedLowering::AllocateAliasedArguments(
+ Node* effect, Node* control, Node* frame_state, Node* context,
+ Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // If there is no aliasing, the arguments object elements are not special in
+ // any way, we can just return an unmapped backing store instead.
+ int parameter_count = shared->internal_formal_parameter_count();
+ if (parameter_count == 0) {
+ return AllocateArguments(effect, control, frame_state);
+ }
+
+ // Calculate number of argument values being aliased/mapped.
+ int mapped_count = Min(argument_count, parameter_count);
+ *has_aliased_arguments = true;
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto paratemers_it = ++parameters_access.begin();
+
+ // The unmapped argument values recorded in the frame state are stored yet
+ // another indirection away and then linked into the parameter map below,
+ // whereas mapped argument values are replaced with a hole instead.
+ AllocationBuilder aa(jsgraph(), effect, control);
+ aa.AllocateArray(argument_count, factory()->fixed_array_map());
+ for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
+ }
+ for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+ }
+ Node* arguments = aa.Finish();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
+ a.Store(AccessBuilder::ForFixedArraySlot(0), context);
+ a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
+ for (int i = 0; i < mapped_count; ++i) {
+ int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
+ a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
+ }
+ return a.Finish();
+}
+
+
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
@@ -1782,10 +2285,20 @@ CommonOperatorBuilder* JSTypedLowering::common() const {
}
+SimplifiedOperatorBuilder* JSTypedLowering::simplified() const {
+ return jsgraph()->simplified();
+}
+
+
MachineOperatorBuilder* JSTypedLowering::machine() const {
return jsgraph()->machine();
}
+
+CompilationDependencies* JSTypedLowering::dependencies() const {
+ return dependencies_;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index c11f068e5b..f4e11ec03f 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -5,14 +5,15 @@
#ifndef V8_COMPILER_JS_TYPED_LOWERING_H_
#define V8_COMPILER_JS_TYPED_LOWERING_H_
+#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
// Forward declarations.
+class CompilationDependencies;
class Factory;
@@ -23,12 +24,21 @@ class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
// Lowers JS-level operators to simplified operators based on types.
class JSTypedLowering final : public AdvancedReducer {
public:
- JSTypedLowering(Editor* editor, JSGraph* jsgraph, Zone* zone);
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSTypedLowering(Editor* editor, CompilationDependencies* dependencies,
+ Flags flags, JSGraph* jsgraph, Zone* zone);
~JSTypedLowering() final {}
Reduction Reduce(Node* node) final;
@@ -41,14 +51,12 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSBitwiseOr(Node* node);
Reduction ReduceJSMultiply(Node* node);
Reduction ReduceJSComparison(Node* node);
- Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
+ Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
- Reduction ReduceJSLoadDynamicGlobal(Node* node);
- Reduction ReduceJSLoadDynamicContext(Node* node);
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
Reduction ReduceJSUnaryNot(Node* node);
@@ -57,10 +65,13 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSToNumber(Node* node);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToString(Node* node);
+ Reduction ReduceJSToObject(Node* node);
+ Reduction ReduceJSConvertReceiver(Node* node);
Reduction ReduceJSCreateArguments(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateLiteralArray(Node* node);
Reduction ReduceJSCreateLiteralObject(Node* node);
+ Reduction ReduceJSCreateFunctionContext(Node* node);
Reduction ReduceJSCreateWithContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
Reduction ReduceJSCallFunction(Node* node);
@@ -74,6 +85,10 @@ class JSTypedLowering final : public AdvancedReducer {
const Operator* shift_op);
Node* Word32Shl(Node* const lhs, int32_t const rhs);
+ Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
+ Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
+ Node* context, Handle<SharedFunctionInfo>,
+ bool* has_aliased_arguments);
Factory* factory() const;
Graph* graph() const;
@@ -81,17 +96,23 @@ class JSTypedLowering final : public AdvancedReducer {
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
+ CompilationDependencies* dependencies() const;
+ Flags flags() const { return flags_; }
// Limits up to which context allocations are inlined.
+ static const int kFunctionContextAllocationLimit = 16;
static const int kBlockContextAllocationLimit = 16;
+ CompilationDependencies* dependencies_;
+ Flags flags_;
JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
Type* shifted_int32_ranges_[4];
};
+DEFINE_OPERATORS_FOR_FLAGS(JSTypedLowering::Flags)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index af3decc5b5..c3f6074fa6 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -18,7 +18,7 @@ namespace compiler {
namespace {
LinkageLocation regloc(Register reg) {
- return LinkageLocation::ForRegister(Register::ToAllocationIndex(reg));
+ return LinkageLocation::ForRegister(reg.code());
}
@@ -63,6 +63,9 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
case CallDescriptor::kCallAddress:
os << "Addr";
break;
+ case CallDescriptor::kLazyBailout:
+ os << "LazyBail";
+ break;
}
return os;
}
@@ -224,7 +227,6 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kFinalizeClassDefinition: // TODO(conradw): Is it safe?
case Runtime::kForInDone:
case Runtime::kForInStep:
- case Runtime::kGetOriginalConstructor:
case Runtime::kNewClosure:
case Runtime::kNewClosure_Tenured:
case Runtime::kNewFunctionContext:
@@ -239,8 +241,6 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
return 0;
case Runtime::kInlineArguments:
case Runtime::kInlineArgumentsLength:
- case Runtime::kInlineCall:
- case Runtime::kInlineCallFunction:
case Runtime::kInlineDefaultConstructorCallSuper:
case Runtime::kInlineGetCallerJSFunction:
case Runtime::kInlineGetPrototype:
@@ -256,6 +256,7 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kInlineToPrimitive:
case Runtime::kInlineToString:
return 1;
+ case Runtime::kInlineCall:
case Runtime::kInlineDeoptimizeNow:
case Runtime::kInlineThrowNotDateError:
return 2;
@@ -351,12 +352,39 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
}
+CallDescriptor* Linkage::GetLazyBailoutDescriptor(Zone* zone) {
+ const size_t return_count = 0;
+ const size_t parameter_count = 0;
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // The target is ignored, but we need to give some values here.
+ MachineType target_type = kMachAnyTagged;
+ LinkageLocation target_loc = regloc(kJSFunctionRegister);
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kLazyBailout, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoThrow, // properties
+ kNoCalleeSaved, // callee-saved
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kNeedsFrameState, // flags
+ "lazy-bailout");
+}
+
+
CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
int js_parameter_count,
CallDescriptor::Flags flags) {
const size_t return_count = 1;
const size_t context_count = 1;
- const size_t parameter_count = js_parameter_count + context_count;
+ const size_t num_args_count = 1;
+ const size_t parameter_count =
+ js_parameter_count + num_args_count + context_count;
LocationSignature::Builder locations(zone, return_count, parameter_count);
MachineSignature::Builder types(zone, return_count, parameter_count);
@@ -371,6 +399,11 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
locations.AddParam(LinkageLocation::ForCallerFrameSlot(spill_slot_index));
types.AddParam(kMachAnyTagged);
}
+
+ // Add JavaScript call argument count.
+ locations.AddParam(regloc(kJavaScriptCallArgCountRegister));
+ types.AddParam(kMachInt32);
+
// Add context.
locations.AddParam(regloc(kContextRegister));
types.AddParam(kMachAnyTagged);
@@ -420,16 +453,18 @@ CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
types.AddParam(kMachPtr);
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
+ // TODO(rmcilroy): Make the context param the one spilled to the stack once
+ // Turbofan supports modified stack arguments in tail calls.
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(kInterpreterDispatchTableSpillSlot));
+#else
locations.AddParam(regloc(kInterpreterDispatchTableRegister));
+#endif
STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
types.AddParam(kMachAnyTagged);
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
- locations.AddParam(
- LinkageLocation::ForCallerFrameSlot(kInterpreterContextSpillSlot));
-#else
locations.AddParam(regloc(kContextRegister));
-#endif
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
@@ -515,8 +550,9 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
if (index == kOsrContextSpillSlotIndex) {
// Context. Use the parameter location of the context spill slot.
- // Parameter (arity + 1) is special for the context of the function frame.
- int context_index = 1 + 1 + parameter_count; // target + receiver + params
+ // Parameter (arity + 2) is special for the context of the function frame.
+ int context_index =
+ 1 + 1 + 1 + parameter_count; // target + receiver + params + #args
return incoming_->GetInputLocation(context_index);
} else if (index >= first_stack_slot) {
// Local variable stored in this (callee) stack.
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index b25fe413c9..0f4b8db1c4 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -110,9 +110,10 @@ class CallDescriptor final : public ZoneObject {
public:
// Describes the kind of this call, which determines the target.
enum Kind {
- kCallCodeObject, // target is a Code object
- kCallJSFunction, // target is a JSFunction object
- kCallAddress, // target is a machine pointer
+ kCallCodeObject, // target is a Code object
+ kCallJSFunction, // target is a JSFunction object
+ kCallAddress, // target is a machine pointer
+ kLazyBailout // the call is no-op, only used for lazy bailout
};
enum Flag {
@@ -257,7 +258,7 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
//
// #0 #1 #2 #3 [...] #n
// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context
-// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], context
+// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], #arg, context
// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
class Linkage : public ZoneObject {
public:
@@ -271,10 +272,13 @@ class Linkage : public ZoneObject {
static CallDescriptor* GetJSCallDescriptor(Zone* zone, bool is_osr,
int parameter_count,
CallDescriptor::Flags flags);
+
static CallDescriptor* GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function, int parameter_count,
Operator::Properties properties, bool needs_frame_state = true);
+ static CallDescriptor* GetLazyBailoutDescriptor(Zone* zone);
+
static CallDescriptor* GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
@@ -304,12 +308,14 @@ class Linkage : public ZoneObject {
}
// Get the location where this function should place its return value.
- LinkageLocation GetReturnLocation() const {
- return incoming_->GetReturnLocation(0);
+ LinkageLocation GetReturnLocation(size_t index = 0) const {
+ return incoming_->GetReturnLocation(index);
}
// Get the machine type of this function's return value.
- MachineType GetReturnType() const { return incoming_->GetReturnType(0); }
+ MachineType GetReturnType(size_t index = 0) const {
+ return incoming_->GetReturnType(index);
+ }
// Get the frame offset for a given spill slot. The location depends on the
// calling convention and the specific frame layout, and may thus be
diff --git a/deps/v8/src/compiler/live-range-separator.cc b/deps/v8/src/compiler/live-range-separator.cc
index f29e4b4a20..6591d71e72 100644
--- a/deps/v8/src/compiler/live-range-separator.cc
+++ b/deps/v8/src/compiler/live-range-separator.cc
@@ -18,40 +18,6 @@ namespace compiler {
namespace {
-// Starting from a deferred block, find the last consecutive deferred block.
-RpoNumber GetLastDeferredBlock(const InstructionBlock *block,
- const InstructionSequence *code) {
- DCHECK(block->IsDeferred());
- RpoNumber first = block->rpo_number();
-
- RpoNumber last = first;
- for (int i = first.ToInt(); i < code->InstructionBlockCount(); ++i) {
- RpoNumber at_i = RpoNumber::FromInt(i);
- const InstructionBlock *block_at_i = code->InstructionBlockAt(at_i);
- if (!block_at_i->IsDeferred()) break;
- last = at_i;
- }
-
- return last;
-}
-
-
-// Delimits consecutive deferred block sequences.
-void AssociateDeferredBlockSequences(InstructionSequence *code) {
- for (int blk_id = 0; blk_id < code->InstructionBlockCount(); ++blk_id) {
- InstructionBlock *block =
- code->InstructionBlockAt(RpoNumber::FromInt(blk_id));
- if (!block->IsDeferred()) continue;
- RpoNumber last = GetLastDeferredBlock(block, code);
- block->set_last_deferred(last);
- // We know last is still deferred, and that last + 1, is not (or is an
- // invalid index). So skip over last + 1 and continue from last + 2. This
- // way, we visit each block exactly once, and the total complexity of this
- // function is O(n), n being jthe number of blocks.
- blk_id = last.ToInt() + 1;
- }
-}
-
void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
LifetimePosition first_cut, LifetimePosition last_cut) {
@@ -78,76 +44,93 @@ void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
if (range->MayRequireSpillRange()) {
data->CreateSpillRangeForLiveRange(range);
}
- TopLevelLiveRange *result = data->NextLiveRange(range->machine_type());
- DCHECK_NULL(data->live_ranges()[result->vreg()]);
- data->live_ranges()[result->vreg()] = result;
-
+ if (range->splinter() == nullptr) {
+ TopLevelLiveRange *splinter = data->NextLiveRange(range->machine_type());
+ DCHECK_NULL(data->live_ranges()[splinter->vreg()]);
+ data->live_ranges()[splinter->vreg()] = splinter;
+ range->SetSplinter(splinter);
+ }
Zone *zone = data->allocation_zone();
- range->Splinter(start, end, result, zone);
+ TRACE("creating splinter for range %d between %d and %d\n", range->vreg(),
+ start.ToInstructionIndex(), end.ToInstructionIndex());
+ range->Splinter(start, end, zone);
}
}
-// Splinter all ranges live inside successive deferred blocks.
-// No control flow analysis is performed. After the register allocation, we will
-// merge the splinters back into the original ranges, and then rely on the
-// range connector to properly connect them.
-void SplinterRangesInDeferredBlocks(RegisterAllocationData *data) {
- InstructionSequence *code = data->code();
- int code_block_count = code->InstructionBlockCount();
- Zone *zone = data->allocation_zone();
- ZoneVector<BitVector *> &in_sets = data->live_in_sets();
-
- for (int i = 0; i < code_block_count; ++i) {
- InstructionBlock *block = code->InstructionBlockAt(RpoNumber::FromInt(i));
- if (!block->IsDeferred()) continue;
-
- RpoNumber last_deferred = block->last_deferred();
- // last_deferred + 1 is not deferred, so no point in visiting it.
- i = last_deferred.ToInt() + 1;
-
- LifetimePosition first_cut = LifetimePosition::GapFromInstructionIndex(
- block->first_instruction_index());
-
- LifetimePosition last_cut = LifetimePosition::GapFromInstructionIndex(
- static_cast<int>(code->instructions().size()));
-
- const BitVector *in_set = in_sets[block->rpo_number().ToInt()];
- BitVector ranges_to_splinter(*in_set, zone);
- InstructionBlock *last = code->InstructionBlockAt(last_deferred);
- for (int deferred_id = block->rpo_number().ToInt();
- deferred_id <= last->rpo_number().ToInt(); ++deferred_id) {
- const BitVector *ins = in_sets[deferred_id];
- ranges_to_splinter.Union(*ins);
- const BitVector *outs = LiveRangeBuilder::ComputeLiveOut(
- code->InstructionBlockAt(RpoNumber::FromInt(deferred_id)), data);
- ranges_to_splinter.Union(*outs);
- }
+int FirstInstruction(const UseInterval *interval) {
+ LifetimePosition start = interval->start();
+ int ret = start.ToInstructionIndex();
+ if (start.IsInstructionPosition() && start.IsEnd()) {
+ ++ret;
+ }
+ return ret;
+}
- int last_index = last->last_instruction_index();
- if (code->InstructionAt(last_index)->opcode() ==
- ArchOpcode::kArchDeoptimize) {
- ++last_index;
- }
- last_cut = LifetimePosition::GapFromInstructionIndex(last_index);
- BitVector::Iterator iterator(&ranges_to_splinter);
+int LastInstruction(const UseInterval *interval) {
+ LifetimePosition end = interval->end();
+ int ret = end.ToInstructionIndex();
+ if (end.IsGapPosition() || end.IsStart()) {
+ --ret;
+ }
+ return ret;
+}
- while (!iterator.Done()) {
- int range_id = iterator.Current();
- iterator.Advance();
- TopLevelLiveRange *range = data->live_ranges()[range_id];
- CreateSplinter(range, data, first_cut, last_cut);
+void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
+ const InstructionSequence *code = data->code();
+ UseInterval *interval = range->first_interval();
+
+ LifetimePosition first_cut = LifetimePosition::Invalid();
+ LifetimePosition last_cut = LifetimePosition::Invalid();
+
+ while (interval != nullptr) {
+ UseInterval *next_interval = interval->next();
+ const InstructionBlock *first_block =
+ code->GetInstructionBlock(FirstInstruction(interval));
+ const InstructionBlock *last_block =
+ code->GetInstructionBlock(LastInstruction(interval));
+ int first_block_nr = first_block->rpo_number().ToInt();
+ int last_block_nr = last_block->rpo_number().ToInt();
+ for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
+ const InstructionBlock *current_block =
+ code->InstructionBlockAt(RpoNumber::FromInt(block_id));
+ if (current_block->IsDeferred()) {
+ if (!first_cut.IsValid()) {
+ first_cut = LifetimePosition::GapFromInstructionIndex(
+ current_block->first_instruction_index());
+ }
+ last_cut = LifetimePosition::GapFromInstructionIndex(
+ current_block->last_instruction_index());
+ } else {
+ if (first_cut.IsValid()) {
+ CreateSplinter(range, data, first_cut, last_cut);
+ first_cut = LifetimePosition::Invalid();
+ last_cut = LifetimePosition::Invalid();
+ }
+ }
}
+ interval = next_interval;
+ }
+ // When the range ends in deferred blocks, first_cut will be valid here.
+ // Splinter from there to the last instruction that was in a deferred block.
+ if (first_cut.IsValid()) {
+ CreateSplinter(range, data, first_cut, last_cut);
}
}
} // namespace
void LiveRangeSeparator::Splinter() {
- AssociateDeferredBlockSequences(data()->code());
- SplinterRangesInDeferredBlocks(data());
+ size_t virt_reg_count = data()->live_ranges().size();
+ for (size_t vreg = 0; vreg < virt_reg_count; ++vreg) {
+ TopLevelLiveRange *range = data()->live_ranges()[vreg];
+ if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
+ continue;
+ }
+ SplinterLiveRange(range, data());
+ }
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index c174da2f7f..5951fb612a 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -648,6 +648,7 @@ Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+ if (m.IsRoundInt64ToFloat64()) return Replace(m.node()->InputAt(0));
if (m.IsPhi()) {
Node* const phi = m.node();
DCHECK_EQ(kRepFloat64, RepresentationOf(OpParameter<MachineType>(phi)));
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 38bb056157..43f7eda5c6 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -34,6 +34,10 @@ std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
switch (kind) {
case kNoWriteBarrier:
return os << "NoWriteBarrier";
+ case kMapWriteBarrier:
+ return os << "MapWriteBarrier";
+ case kPointerWriteBarrier:
+ return os << "PointerWriteBarrier";
case kFullWriteBarrier:
return os << "FullWriteBarrier";
}
@@ -99,6 +103,7 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
@@ -131,6 +136,8 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
@@ -167,6 +174,10 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
+ V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(Float32Max, Operator::kNoProperties, 2, 0, 1) \
V(Float32Min, Operator::kNoProperties, 2, 0, 1) \
V(Float64Max, Operator::kNoProperties, 2, 0, 1) \
@@ -250,6 +261,16 @@ struct MachineOperatorGlobalCache {
Store##Type##NoWriteBarrier##Operator() \
: Store##Type##Operator(kNoWriteBarrier) {} \
}; \
+ struct Store##Type##MapWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##MapWriteBarrier##Operator() \
+ : Store##Type##Operator(kMapWriteBarrier) {} \
+ }; \
+ struct Store##Type##PointerWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##PointerWriteBarrier##Operator() \
+ : Store##Type##Operator(kPointerWriteBarrier) {} \
+ }; \
struct Store##Type##FullWriteBarrier##Operator final \
: public Store##Type##Operator { \
Store##Type##FullWriteBarrier##Operator() \
@@ -263,6 +284,9 @@ struct MachineOperatorGlobalCache {
"CheckedStore", 4, 1, 1, 0, 1, 0, k##Type) {} \
}; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
+ Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
+ Store##Type##PointerWriteBarrier##Operator \
+ kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
CheckedStore##Type##Operator kCheckedStore##Type;
MACHINE_TYPE_LIST(STORE)
@@ -326,14 +350,18 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
switch (rep.machine_type()) {
-#define STORE(Type) \
- case k##Type: \
- switch (rep.write_barrier_kind()) { \
- case kNoWriteBarrier: \
- return &cache_.k##Store##Type##NoWriteBarrier; \
- case kFullWriteBarrier: \
- return &cache_.k##Store##Type##FullWriteBarrier; \
- } \
+#define STORE(Type) \
+ case k##Type: \
+ switch (rep.write_barrier_kind()) { \
+ case kNoWriteBarrier: \
+ return &cache_.k##Store##Type##NoWriteBarrier; \
+ case kMapWriteBarrier: \
+ return &cache_.k##Store##Type##MapWriteBarrier; \
+ case kPointerWriteBarrier: \
+ return &cache_.k##Store##Type##PointerWriteBarrier; \
+ case kFullWriteBarrier: \
+ return &cache_.k##Store##Type##FullWriteBarrier; \
+ } \
break;
MACHINE_TYPE_LIST(STORE)
#undef STORE
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 27abfb4acc..1280f91544 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -49,7 +49,12 @@ TruncationMode TruncationModeOf(Operator const*);
// Supported write barrier modes.
-enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
+enum WriteBarrierKind {
+ kNoWriteBarrier,
+ kMapWriteBarrier,
+ kPointerWriteBarrier,
+ kFullWriteBarrier
+};
std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
@@ -105,7 +110,7 @@ class MachineOperatorBuilder final : public ZoneObject {
// for operations that are unsupported by some back-ends.
enum Flag {
kNoFlags = 0u,
- // Note that Float*Max behaves like `(a < b) ? b : a`, not like Math.max().
+ // Note that Float*Max behaves like `(b < a) ? a : b`, not like Math.max().
// Note that Float*Min behaves like `(a < b) ? a : b`, not like Math.min().
kFloat32Max = 1u << 0,
kFloat32Min = 1u << 1,
@@ -117,9 +122,14 @@ class MachineOperatorBuilder final : public ZoneObject {
kInt32DivIsSafe = 1u << 7,
kUint32DivIsSafe = 1u << 8,
kWord32ShiftIsSafe = 1u << 9,
+ kWord32Ctz = 1u << 10,
+ kWord32Popcnt = 1u << 11,
+ kWord64Ctz = 1u << 12,
+ kWord64Popcnt = 1u << 13,
kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
kFloat64RoundDown | kFloat64RoundTruncate |
- kFloat64RoundTiesAway
+ kFloat64RoundTiesAway | kWord32Ctz | kWord32Popcnt |
+ kWord64Ctz | kWord64Popcnt
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -135,6 +145,9 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Word32Ror();
const Operator* Word32Equal();
const Operator* Word32Clz();
+ const OptionalOperator Word32Ctz();
+ const OptionalOperator Word32Popcnt();
+ const OptionalOperator Word64Popcnt();
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
const Operator* Word64And();
@@ -144,6 +157,8 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Word64Shr();
const Operator* Word64Sar();
const Operator* Word64Ror();
+ const Operator* Word64Clz();
+ const OptionalOperator Word64Ctz();
const Operator* Word64Equal();
const Operator* Int32Add();
@@ -189,11 +204,13 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* ChangeUint32ToFloat64();
const Operator* ChangeUint32ToUint64();
- // These operators truncate numbers, both changing the representation of
- // the number and mapping multiple input values onto the same output value.
+ // These operators truncate or round numbers, both changing the representation
+ // of the number and mapping multiple input values onto the same output value.
const Operator* TruncateFloat64ToFloat32();
const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
+ const Operator* RoundInt64ToFloat32();
+ const Operator* RoundInt64ToFloat64();
// These operators reinterpret the bits of a floating point number as an
// integer and vice versa.
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 7769b9e739..54bb55a146 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -54,6 +54,18 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return ToDoubleRegister(op);
}
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
@@ -201,6 +213,48 @@ class OutOfLineCeil final : public OutOfLineRound {
};
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Addu(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
switch (condition) {
case kEqual:
@@ -457,6 +511,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(at);
break;
}
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -503,6 +562,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Addu(at, object, index);
+ __ sw(value, MemOperand(at));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kMipsAdd:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -704,6 +781,62 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
+ case kMipsFloat64Max: {
+ // (b < a) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat64Min: {
+ // (a < b) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat32Max: {
+ // (b < a) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat32Min: {
+ // (a < b) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
case kMipsCvtSD: {
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
@@ -810,18 +943,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kMipsStoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ addu(index, object, index);
- __ sw(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RAStatus ra_status = kRAHasNotBeenSaved;
- __ RecordWrite(object, index, value, ra_status, mode);
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
break;
@@ -927,14 +1048,24 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpS, branch->condition);
}
- __ BranchF32(tlabel, NULL, cc, i.InputSingleRegister(0),
- i.InputSingleRegister(1));
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF32(tlabel, NULL, cc, left, right);
} else if (instr->arch_opcode() == kMipsCmpD) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
}
- __ BranchF64(tlabel, NULL, cc, i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF64(tlabel, NULL, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
@@ -968,19 +1099,10 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);
__ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
- __ xori(result, zero_reg, 1); // Create 1 for true.
- if (IsMipsArchVariant(kMips32r6)) {
- if (cc == eq) {
- __ seleqz(result, result, kScratchReg);
- } else {
- __ selnez(result, result, kScratchReg);
- }
- } else {
- if (cc == eq) {
- __ Movn(result, zero_reg, kScratchReg);
- } else {
- __ Movz(result, zero_reg, kScratchReg);
- }
+ __ Sltu(result, zero_reg, kScratchReg);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
return;
} else if (instr->arch_opcode() == kMipsAddOvf ||
@@ -999,20 +1121,18 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case ne: {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
- __ Subu(kScratchReg, left, right);
- __ xori(result, zero_reg, 1);
- if (IsMipsArchVariant(kMips32r6)) {
- if (cc == eq) {
- __ seleqz(result, result, kScratchReg);
- } else {
- __ selnez(result, result, kScratchReg);
- }
+ Register select;
+ if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
+ // Pass left operand if right is zero.
+ select = left;
} else {
- if (cc == eq) {
- __ Movn(result, zero_reg, kScratchReg);
- } else {
- __ Movz(result, zero_reg, kScratchReg);
- }
+ __ Subu(kScratchReg, left, right);
+ select = kScratchReg;
+ }
+ __ Sltu(result, zero_reg, select);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
} break;
case lt:
@@ -1057,8 +1177,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
return;
} else if (instr->arch_opcode() == kMipsCmpD ||
instr->arch_opcode() == kMipsCmpS) {
- FPURegister left = i.InputDoubleRegister(0);
- FPURegister right = i.InputDoubleRegister(1);
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
bool predicate;
FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
if (!IsMipsArchVariant(kMips32r6)) {
@@ -1081,8 +1205,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
DCHECK(instr->arch_opcode() == kMipsCmpS);
__ cmp(cc, W, kDoubleCompareReg, left, right);
}
- __ mfc1(at, kDoubleCompareReg);
- __ srl(result, at, 31); // Cmp returns all 1s for true.
+ __ mfc1(result, kDoubleCompareReg);
+ __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
if (!predicate) // Toggle result for not equal.
__ xori(result, result, 1);
}
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index db8f2511e9..a251ba93ca 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -79,10 +79,13 @@ namespace compiler {
V(MipsFloat64ExtractHighWord32) \
V(MipsFloat64InsertLowWord32) \
V(MipsFloat64InsertHighWord32) \
+ V(MipsFloat64Max) \
+ V(MipsFloat64Min) \
+ V(MipsFloat32Max) \
+ V(MipsFloat32Min) \
V(MipsPush) \
V(MipsStoreToStackSlot) \
- V(MipsStackClaim) \
- V(MipsStoreWriteBarrier)
+ V(MipsStackClaim)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 3c4b378553..b43edb17cd 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -44,11 +44,10 @@ class MipsOperandGenerator final : public OperandGenerator {
return is_uint16(value);
case kMipsLdc1:
case kMipsSdc1:
- case kCheckedLoadFloat32:
case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
case kCheckedStoreFloat64:
- return is_int16(value + kIntSize);
+ return std::numeric_limits<int16_t>::min() <= (value + kIntSize) &&
+ std::numeric_limits<int16_t>::max() >= (value + kIntSize);
default:
return is_int16(value);
}
@@ -179,53 +178,75 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMipsStoreWriteBarrier, g.NoOutput(), g.UseFixed(base, t0),
- g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kMipsSwc1;
- break;
- case kRepFloat64:
- opcode = kMipsSdc1;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kMipsSb;
- break;
- case kRepWord16:
- opcode = kMipsSh;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kMipsSw;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ // TODO(mips): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(kRepTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kMipsSwc1;
+ break;
+ case kRepFloat64:
+ opcode = kMipsSdc1;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kMipsSb;
+ break;
+ case kRepWord16:
+ opcode = kMipsSh;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kMipsSw;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
}
}
@@ -270,6 +291,12 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
MipsOperandGenerator g(this);
@@ -476,16 +503,64 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat32Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat64Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat32Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat64Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -523,20 +598,10 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
+ const CallDescriptor* descriptor,
+ Node* node) {
MipsOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -546,7 +611,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kCArgSlotCount;
- for (Node* input : buffer.pushed_nodes) {
+ for (Node* input : (*arguments)) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
@@ -558,133 +623,17 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Emit(kMipsStackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ if (Node* input = (*arguments)[n]) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(n << kPointerSizeLog2));
}
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- MipsOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
- // Possibly align stack here for functions.
- int push_count = static_cast<int>(descriptor->StackParameterCount());
- if (push_count > 0) {
- Emit(kMipsStackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
- }
- int slot = static_cast<int>(buffer.pushed_nodes.size()) - 1;
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitCheckedLoad(Node* node) {
@@ -797,10 +746,14 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMipsCmpS, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
}
@@ -808,10 +761,14 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMipsCmpD, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
}
@@ -826,6 +783,16 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
@@ -840,6 +807,16 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
@@ -1127,7 +1104,10 @@ InstructionSelector::SupportedMachineOperatorFlags() {
flags |= MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
}
- return flags;
+ return flags | MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 053434eec9..62fd2f5efc 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -53,6 +53,18 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return ToDoubleRegister(op);
}
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
@@ -201,6 +213,48 @@ class OutOfLineCeil final : public OutOfLineRound {
};
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Daddu(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
switch (condition) {
case kEqual:
@@ -455,6 +509,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(at);
break;
}
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -501,6 +560,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Daddu(at, object, index);
+ __ sd(value, MemOperand(at));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kMips64Add:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -561,6 +638,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64Clz:
__ Clz(i.OutputRegister(), i.InputRegister(0));
break;
+ case kMips64Dclz:
+ __ dclz(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -769,6 +849,62 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
+ case kMips64Float64Max: {
+ // (b < a) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64Float64Min: {
+ // (a < b) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64Float32Max: {
+ // (b < a) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64Float32Min: {
+ // (a < b) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
case kMips64CvtSD:
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
@@ -781,6 +917,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
+ case kMips64CvtSL: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ dmtc1(i.InputRegister(0), scratch);
+ __ cvt_s_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kMips64CvtDL: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ dmtc1(i.InputRegister(0), scratch);
+ __ cvt_d_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
case kMips64CvtDUw: {
FPURegister scratch = kScratchDoubleReg;
__ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
@@ -885,18 +1033,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kMips64StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ daddu(index, object, index);
- __ sd(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RAStatus ra_status = kRAHasNotBeenSaved;
- __ RecordWrite(object, index, value, ra_status, mode);
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
break;
@@ -1005,14 +1141,24 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpS, branch->condition);
}
- __ BranchF32(tlabel, NULL, cc, i.InputSingleRegister(0),
- i.InputSingleRegister(1));
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF32(tlabel, NULL, cc, left, right);
} else if (instr->arch_opcode() == kMips64CmpD) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
}
- __ BranchF64(tlabel, NULL, cc, i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF64(tlabel, NULL, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
@@ -1046,19 +1192,10 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMips64Tst) {
cc = FlagsConditionToConditionTst(condition);
__ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
- __ xori(result, zero_reg, 1); // Create 1 for true.
- if (kArchVariant == kMips64r6) {
- if (cc == eq) {
- __ seleqz(result, result, kScratchReg);
- } else {
- __ selnez(result, result, kScratchReg);
- }
- } else {
- if (cc == eq) {
- __ Movn(result, zero_reg, kScratchReg);
- } else {
- __ Movz(result, zero_reg, kScratchReg);
- }
+ __ Sltu(result, zero_reg, kScratchReg);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
return;
} else if (instr->arch_opcode() == kMips64Dadd ||
@@ -1078,20 +1215,18 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case ne: {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
- __ Dsubu(kScratchReg, left, right);
- __ xori(result, zero_reg, 1);
- if (kArchVariant == kMips64r6) {
- if (cc == eq) {
- __ seleqz(result, result, kScratchReg);
- } else {
- __ selnez(result, result, kScratchReg);
- }
+ Register select;
+ if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
+ // Pass left operand if right is zero.
+ select = left;
} else {
- if (cc == eq) {
- __ Movn(result, zero_reg, kScratchReg);
- } else {
- __ Movz(result, zero_reg, kScratchReg);
- }
+ __ Dsubu(kScratchReg, left, right);
+ select = kScratchReg;
+ }
+ __ Sltu(result, zero_reg, select);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
} break;
case lt:
@@ -1136,8 +1271,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
return;
} else if (instr->arch_opcode() == kMips64CmpD ||
instr->arch_opcode() == kMips64CmpS) {
- FPURegister left = i.InputDoubleRegister(0);
- FPURegister right = i.InputDoubleRegister(1);
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
bool predicate;
FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
if (kArchVariant != kMips64r6) {
@@ -1160,9 +1299,10 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
DCHECK(instr->arch_opcode() == kMips64CmpS);
__ cmp(cc, W, kDoubleCompareReg, left, right);
}
- __ dmfc1(at, kDoubleCompareReg);
- __ dsrl32(result, at, 31); // Cmp returns all 1s for true.
- if (!predicate) // Toggle result for not equal.
+ __ dmfc1(result, kDoubleCompareReg);
+ __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
+
+ if (!predicate) // Toggle result for not equal.
__ xori(result, result, 1);
}
return;
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 38e4c46485..e64a0fa812 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -37,6 +37,7 @@ namespace compiler {
V(Mips64Sar) \
V(Mips64Ext) \
V(Mips64Dext) \
+ V(Mips64Dclz) \
V(Mips64Dshl) \
V(Mips64Dshr) \
V(Mips64Dsar) \
@@ -73,6 +74,8 @@ namespace compiler {
V(Mips64TruncWD) \
V(Mips64TruncUwD) \
V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtDL) \
V(Mips64CvtDUw) \
V(Mips64Lb) \
V(Mips64Lbu) \
@@ -94,10 +97,13 @@ namespace compiler {
V(Mips64Float64ExtractHighWord32) \
V(Mips64Float64InsertLowWord32) \
V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float64Max) \
+ V(Mips64Float64Min) \
+ V(Mips64Float32Max) \
+ V(Mips64Float32Min) \
V(Mips64Push) \
V(Mips64StoreToStackSlot) \
- V(Mips64StackClaim) \
- V(Mips64StoreWriteBarrier)
+ V(Mips64StackClaim)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index d20c1c72f6..c62d0ef372 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -186,56 +186,78 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMips64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, t0),
- g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kMips64Swc1;
- break;
- case kRepFloat64:
- opcode = kMips64Sdc1;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kMips64Sb;
- break;
- case kRepWord16:
- opcode = kMips64Sh;
- break;
- case kRepWord32:
- opcode = kMips64Sw;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kMips64Sd;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ // TODO(mips): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(kRepTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kMips64Swc1;
+ break;
+ case kRepFloat64:
+ opcode = kMips64Sdc1;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kMips64Sb;
+ break;
+ case kRepWord16:
+ opcode = kMips64Sh;
+ break;
+ case kRepWord32:
+ opcode = kMips64Sw;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kMips64Sd;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
}
}
@@ -321,11 +343,28 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kMips64Dror, node);
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kMips64Dclz, node);
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
Mips64OperandGenerator g(this);
// TODO(plind): Consider multiply & add optimization from arm port.
@@ -578,6 +617,16 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSL, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDL, node);
+}
+
+
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMips64Float64ExtractLowWord32, node);
}
@@ -663,16 +712,64 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float32Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float32Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float64Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float32Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float32Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float64Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -710,20 +807,10 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
+ const CallDescriptor* descriptor,
+ Node* node) {
Mips64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -733,7 +820,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kCArgSlotCount;
- for (Node* input : buffer.pushed_nodes) {
+ for (Node* input : (*arguments)) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
@@ -744,134 +831,17 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Emit(kMips64StackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ if (Node* input = (*arguments)[n]) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
}
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(flags);
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- Mips64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
-
- const int32_t push_count = static_cast<int32_t>(buffer.pushed_nodes.size());
- if (push_count > 0) {
- Emit(kMips64StackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
- }
- int slot = push_count - 1;
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitCheckedLoad(Node* node) {
@@ -990,10 +960,14 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMips64CmpS, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
}
@@ -1001,10 +975,14 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
}
@@ -1019,6 +997,16 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
@@ -1033,6 +1021,16 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
@@ -1384,7 +1382,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64RoundDown |
+ return MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
}
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index 7c2bbe06b8..e99b5e9ebe 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -14,10 +14,17 @@ typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
struct MoveKeyCompare {
bool operator()(const MoveKey& a, const MoveKey& b) const {
- if (a.first.EqualsModuloType(b.first)) {
- return a.second.CompareModuloType(b.second);
+ if (a.first.EqualsCanonicalized(b.first)) {
+ return a.second.CompareCanonicalized(b.second);
}
- return a.first.CompareModuloType(b.first);
+ return a.first.CompareCanonicalized(b.first);
+ }
+};
+
+struct OperandCompare {
+ bool operator()(const InstructionOperand& a,
+ const InstructionOperand& b) const {
+ return a.CompareCanonicalized(b);
}
};
@@ -25,7 +32,37 @@ typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
-bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); }
+bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
+ if (instr->IsNop()) return true;
+ if (instr->ClobbersTemps() || instr->ClobbersRegisters() ||
+ instr->ClobbersDoubleRegisters()) {
+ return false;
+ }
+ if (instr->arch_opcode() != ArchOpcode::kArchNop) return false;
+
+ ZoneSet<InstructionOperand, OperandCompare> operands(zone);
+ for (size_t i = 0; i < instr->InputCount(); ++i) {
+ operands.insert(*instr->InputAt(i));
+ }
+ for (size_t i = 0; i < instr->OutputCount(); ++i) {
+ operands.insert(*instr->OutputAt(i));
+ }
+ for (size_t i = 0; i < instr->TempCount(); ++i) {
+ operands.insert(*instr->TempAt(i));
+ }
+ for (int i = Instruction::GapPosition::FIRST_GAP_POSITION;
+ i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) {
+ ParallelMove* moves = instr->parallel_moves()[i];
+ if (moves == nullptr) continue;
+ for (MoveOperands* move : *moves) {
+ if (operands.count(move->source()) > 0 ||
+ operands.count(move->destination()) > 0) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
int FindFirstNonEmptySlot(Instruction* instr) {
@@ -135,7 +172,7 @@ void MoveOptimizer::CompressBlock(InstructionBlock* block) {
std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
}
prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
- if (GapsCanMoveOver(instr)) continue;
+ if (GapsCanMoveOver(instr, local_zone())) continue;
if (prev_instr != nullptr) {
to_finalize_.push_back(prev_instr);
prev_instr = nullptr;
@@ -198,7 +235,8 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
for (int i = block->first_instruction_index();
i <= block->last_instruction_index(); ++i) {
instr = code()->instructions()[i];
- if (!GapsCanMoveOver(instr) || !instr->AreMovesRedundant()) break;
+ if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
+ break;
}
DCHECK(instr != nullptr);
bool gap_initialized = true;
@@ -245,12 +283,12 @@ bool IsSlot(const InstructionOperand& op) {
bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
- if (!a->source().EqualsModuloType(b->source())) {
- return a->source().CompareModuloType(b->source());
+ if (!a->source().EqualsCanonicalized(b->source())) {
+ return a->source().CompareCanonicalized(b->source());
}
if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
- return a->destination().CompareModuloType(b->destination());
+ return a->destination().CompareCanonicalized(b->destination());
}
} // namespace
@@ -276,7 +314,7 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
for (auto load : loads) {
// New group.
if (group_begin == nullptr ||
- !load->source().EqualsModuloType(group_begin->source())) {
+ !load->source().EqualsCanonicalized(group_begin->source())) {
group_begin = load;
continue;
}
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 0d061a36c4..4fb4aa487c 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -7,6 +7,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/verifier.h"
+#include "src/types-inl.h"
namespace v8 {
namespace internal {
@@ -130,6 +131,13 @@ bool NodeProperties::IsExceptionalCall(Node* node) {
// static
+void NodeProperties::ReplaceValueInput(Node* node, Node* value, int index) {
+ DCHECK(index < node->op()->ValueInputCount());
+ node->ReplaceInput(FirstValueIndex(node) + index, value);
+}
+
+
+// static
void NodeProperties::ReplaceContextInput(Node* node, Node* context) {
node->ReplaceInput(FirstContextIndex(node), context);
}
@@ -157,6 +165,13 @@ void NodeProperties::ReplaceFrameStateInput(Node* node, int index,
// static
+void NodeProperties::RemoveFrameStateInput(Node* node, int index) {
+ DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->RemoveInput(FirstFrameStateIndex(node) + index);
+}
+
+
+// static
void NodeProperties::RemoveNonValueInputs(Node* node) {
node->TrimInputCount(node->op()->ValueInputCount());
}
@@ -269,6 +284,12 @@ void NodeProperties::CollectControlProjections(Node* node, Node** projections,
// static
+Type* NodeProperties::GetTypeOrAny(Node* node) {
+ return IsTyped(node) ? node->type() : Type::Any();
+}
+
+
+// static
bool NodeProperties::AllValueInputsAreTyped(Node* node) {
int input_count = node->op()->ValueInputCount();
for (int index = 0; index < input_count; ++index) {
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 313d3749bb..ca8d228ae4 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -72,15 +72,19 @@ class NodeProperties final {
return IrOpcode::IsPhiOpcode(node->opcode());
}
+ // Determines whether exceptions thrown by the given node are handled locally
+ // within the graph (i.e. an IfException projection is present).
static bool IsExceptionalCall(Node* node);
// ---------------------------------------------------------------------------
// Miscellaneous mutators.
+ static void ReplaceValueInput(Node* node, Node* value, int index);
static void ReplaceContextInput(Node* node, Node* context);
static void ReplaceControlInput(Node* node, Node* control);
static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
+ static void RemoveFrameStateInput(Node* node, int index);
static void RemoveNonValueInputs(Node* node);
// Merge the control node {node} into the end of the graph, introducing a
@@ -118,6 +122,7 @@ class NodeProperties final {
DCHECK(IsTyped(node));
return node->type();
}
+ static Type* GetTypeOrAny(Node* node);
static void SetType(Node* node, Type* type) {
DCHECK_NOT_NULL(type);
node->set_type(type);
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 33e17f6dd0..d94c60d468 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -44,8 +44,9 @@
V(Phi) \
V(EffectSet) \
V(EffectPhi) \
- V(ValueEffect) \
- V(Finish) \
+ V(Guard) \
+ V(BeginRegion) \
+ V(FinishRegion) \
V(FrameState) \
V(StateValues) \
V(TypedStateValues) \
@@ -126,8 +127,7 @@
#define JS_CONTEXT_OP_LIST(V) \
V(JSLoadContext) \
V(JSStoreContext) \
- V(JSLoadDynamicGlobal) \
- V(JSLoadDynamicContext) \
+ V(JSLoadDynamic) \
V(JSCreateFunctionContext) \
V(JSCreateCatchContext) \
V(JSCreateWithContext) \
@@ -139,10 +139,13 @@
V(JSCallConstruct) \
V(JSCallFunction) \
V(JSCallRuntime) \
+ V(JSConvertReceiver) \
V(JSForInDone) \
V(JSForInNext) \
V(JSForInPrepare) \
V(JSForInStep) \
+ V(JSLoadMessage) \
+ V(JSStoreMessage) \
V(JSYield) \
V(JSStackCheck)
@@ -172,6 +175,9 @@
V(NumberMultiply) \
V(NumberDivide) \
V(NumberModulus) \
+ V(NumberBitwiseOr) \
+ V(NumberBitwiseXor) \
+ V(NumberBitwiseAnd) \
V(NumberShiftLeft) \
V(NumberShiftRight) \
V(NumberShiftRightLogical) \
@@ -193,6 +199,7 @@
V(StoreField) \
V(StoreBuffer) \
V(StoreElement) \
+ V(ObjectIsNumber) \
V(ObjectIsSmi)
// Opcodes for Machine-level operators.
@@ -226,6 +233,9 @@
V(Word32Sar) \
V(Word32Ror) \
V(Word32Clz) \
+ V(Word32Ctz) \
+ V(Word32Popcnt) \
+ V(Word64Popcnt) \
V(Word64And) \
V(Word64Or) \
V(Word64Xor) \
@@ -233,6 +243,8 @@
V(Word64Shr) \
V(Word64Sar) \
V(Word64Ror) \
+ V(Word64Clz) \
+ V(Word64Ctz) \
V(Int32Add) \
V(Int32AddWithOverflow) \
V(Int32Sub) \
@@ -261,6 +273,8 @@
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt32) \
V(TruncateInt64ToInt32) \
+ V(RoundInt64ToFloat64) \
+ V(RoundInt64ToFloat32) \
V(BitcastFloat32ToInt32) \
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 60e6ad7636..d1bea56091 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -34,8 +34,12 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSStrictNotEqual:
return 0;
- // Calls
+ // We record the frame state immediately before and immediately after every
+ // function call.
case IrOpcode::kJSCallFunction:
+ return 2;
+
+ // Construct calls
case IrOpcode::kJSCallConstruct:
// Compare operations
@@ -50,16 +54,18 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSCreateLiteralObject:
// Context operations
- case IrOpcode::kJSLoadDynamicContext:
+ case IrOpcode::kJSLoadDynamic:
case IrOpcode::kJSCreateScriptContext:
case IrOpcode::kJSCreateWithContext:
// Conversions
- case IrOpcode::kJSToObject:
- case IrOpcode::kJSToNumber:
case IrOpcode::kJSToName:
+ case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSToString:
// Misc operations
+ case IrOpcode::kJSConvertReceiver:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
@@ -74,7 +80,6 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSLoadGlobal:
case IrOpcode::kJSStoreGlobal:
- case IrOpcode::kJSLoadDynamicGlobal:
return 2;
// Binary operators that can deopt in the middle the operation (e.g.,
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 209ddfdf0d..ceff8d660b 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -12,6 +12,8 @@
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/basic-block-instrumentor.h"
+#include "src/compiler/binary-operator-reducer.h"
+#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/code-generator.h"
@@ -30,10 +32,10 @@
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-frame-specialization.h"
#include "src/compiler/js-generic-lowering.h"
-#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-global-object-specialization.h"
+#include "src/compiler/js-inlining-heuristic.h"
#include "src/compiler/js-intrinsic-lowering.h"
-#include "src/compiler/js-type-feedback.h"
-#include "src/compiler/js-type-feedback-lowering.h"
+#include "src/compiler/js-native-context-specialization.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/jump-threading.h"
#include "src/compiler/live-range-separator.h"
@@ -50,6 +52,7 @@
#include "src/compiler/scheduler.h"
#include "src/compiler/select-lowering.h"
#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/tail-call-optimization.h"
#include "src/compiler/typer.h"
@@ -57,6 +60,7 @@
#include "src/compiler/verifier.h"
#include "src/compiler/zone-pool.h"
#include "src/ostreams.h"
+#include "src/register-configuration.h"
#include "src/type-info.h"
#include "src/utils.h"
@@ -80,11 +84,11 @@ class PipelineData {
graph_zone_(graph_zone_scope_.zone()),
graph_(nullptr),
loop_assignment_(nullptr),
+ simplified_(nullptr),
machine_(nullptr),
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
- js_type_feedback_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
@@ -96,13 +100,14 @@ class PipelineData {
PhaseScope scope(pipeline_statistics, "init pipeline data");
graph_ = new (graph_zone_) Graph(graph_zone_);
source_positions_.Reset(new SourcePositionTable(graph_));
+ simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
machine_ = new (graph_zone_) MachineOperatorBuilder(
graph_zone_, kMachPtr,
InstructionSelector::SupportedMachineOperatorFlags());
common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
- JSGraph(isolate_, graph_, common_, javascript_, machine_);
+ JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
}
// For machine graph testing entry point.
@@ -120,11 +125,11 @@ class PipelineData {
graph_(graph),
source_positions_(new SourcePositionTable(graph_)),
loop_assignment_(nullptr),
+ simplified_(nullptr),
machine_(nullptr),
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
- js_type_feedback_(nullptr),
schedule_(schedule),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
@@ -148,11 +153,11 @@ class PipelineData {
graph_zone_(nullptr),
graph_(nullptr),
loop_assignment_(nullptr),
+ simplified_(nullptr),
machine_(nullptr),
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
- js_type_feedback_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(sequence->zone()),
@@ -192,10 +197,6 @@ class PipelineData {
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
- JSTypeFeedbackTable* js_type_feedback() { return js_type_feedback_; }
- void set_js_type_feedback(JSTypeFeedbackTable* js_type_feedback) {
- js_type_feedback_ = js_type_feedback;
- }
LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) {
@@ -227,11 +228,11 @@ class PipelineData {
graph_zone_ = nullptr;
graph_ = nullptr;
loop_assignment_ = nullptr;
+ simplified_ = nullptr;
machine_ = nullptr;
common_ = nullptr;
javascript_ = nullptr;
jsgraph_ = nullptr;
- js_type_feedback_ = nullptr;
schedule_ = nullptr;
}
@@ -294,11 +295,11 @@ class PipelineData {
// TODO(dcarney): make this into a ZoneObject.
base::SmartPointer<SourcePositionTable> source_positions_;
LoopAssignmentAnalysis* loop_assignment_;
+ SimplifiedOperatorBuilder* simplified_;
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
JSOperatorBuilder* javascript_;
JSGraph* jsgraph_;
- JSTypeFeedbackTable* js_type_feedback_;
Schedule* schedule_;
// All objects in the following group of fields are allocated in
@@ -356,10 +357,8 @@ class AstGraphBuilderWithPositions final : public AstGraphBuilder {
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph,
LoopAssignmentAnalysis* loop_assignment,
- JSTypeFeedbackTable* js_type_feedback,
SourcePositionTable* source_positions)
- : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment,
- js_type_feedback),
+ : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment),
source_positions_(source_positions),
start_position_(info->shared_info()->start_position()) {}
@@ -395,6 +394,8 @@ class SourcePositionWrapper final : public Reducer {
return reducer_->Reduce(node);
}
+ void Finalize() final { reducer_->Finalize(); }
+
private:
Reducer* const reducer_;
SourcePositionTable* const table_;
@@ -483,7 +484,7 @@ struct GraphBuilderPhase {
} else {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
- data->js_type_feedback(), data->source_positions());
+ data->source_positions());
succeeded = graph_builder.CreateGraph(stack_check);
}
@@ -494,6 +495,39 @@ struct GraphBuilderPhase {
};
+struct NativeContextSpecializationPhase {
+ static const char* phase_name() { return "native context specialization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ JSGlobalObjectSpecialization global_object_specialization(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSGlobalObjectSpecialization::kDeoptimizationEnabled
+ : JSGlobalObjectSpecialization::kNoFlags,
+ handle(data->info()->global_object(), data->isolate()),
+ data->info()->dependencies());
+ JSNativeContextSpecialization native_context_specialization(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSNativeContextSpecialization::kDeoptimizationEnabled
+ : JSNativeContextSpecialization::kNoFlags,
+ handle(data->info()->global_object()->native_context(),
+ data->isolate()),
+ data->info()->dependencies(), temp_zone);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &global_object_specialization);
+ AddReducer(data, &graph_reducer, &native_context_specialization);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+
struct InliningPhase {
static const char* phase_name() { return "inlining"; }
@@ -510,17 +544,18 @@ struct InliningPhase {
: MaybeHandle<Context>());
JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
data->jsgraph());
- JSInliner inliner(&graph_reducer, data->info()->is_inlining_enabled()
- ? JSInliner::kGeneralInlining
- : JSInliner::kRestrictedInlining,
- temp_zone, data->info(), data->jsgraph());
+ JSInliningHeuristic inlining(&graph_reducer,
+ data->info()->is_inlining_enabled()
+ ? JSInliningHeuristic::kGeneralInlining
+ : JSInliningHeuristic::kRestrictedInlining,
+ temp_zone, data->info(), data->jsgraph());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
if (data->info()->is_frame_specializing()) {
AddReducer(data, &graph_reducer, &frame_specialization);
}
AddReducer(data, &graph_reducer, &context_specialization);
- AddReducer(data, &graph_reducer, &inliner);
+ AddReducer(data, &graph_reducer, &inlining);
graph_reducer.ReduceGraph();
}
};
@@ -547,34 +582,6 @@ struct OsrDeconstructionPhase {
};
-struct JSTypeFeedbackPhase {
- static const char* phase_name() { return "type feedback specializing"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- Handle<Context> native_context(data->info()->context()->native_context());
- TypeFeedbackOracle oracle(data->isolate(), temp_zone,
- data->info()->unoptimized_code(),
- data->info()->feedback_vector(), native_context);
- JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
- Handle<GlobalObject> global_object = Handle<GlobalObject>::null();
- if (data->info()->has_global_object()) {
- global_object =
- Handle<GlobalObject>(data->info()->global_object(), data->isolate());
- }
- // TODO(titzer): introduce a specialization mode/flags enum to control
- // specializing to the global object here.
- JSTypeFeedbackSpecializer specializer(
- &graph_reducer, data->jsgraph(), data->js_type_feedback(), &oracle,
- global_object, data->info()->is_deoptimization_enabled()
- ? JSTypeFeedbackSpecializer::kDeoptimizationEnabled
- : JSTypeFeedbackSpecializer::kDeoptimizationDisabled,
- data->info()->dependencies());
- AddReducer(data, &graph_reducer, &specializer);
- graph_reducer.ReduceGraph();
- }
-};
-
-
struct TypedLoweringPhase {
static const char* phase_name() { return "typed lowering"; }
@@ -584,12 +591,11 @@ struct TypedLoweringPhase {
data->common());
LoadElimination load_elimination(&graph_reducer);
JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
- JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(), temp_zone);
- JSTypeFeedbackLowering type_feedback_lowering(
- &graph_reducer, data->info()->is_deoptimization_enabled()
- ? JSTypeFeedbackLowering::kDeoptimizationEnabled
- : JSTypeFeedbackLowering::kNoFlags,
- data->jsgraph());
+ JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
+ data->info()->is_deoptimization_enabled()
+ ? JSTypedLowering::kDeoptimizationEnabled
+ : JSTypedLowering::kNoFlags,
+ data->jsgraph(), temp_zone);
JSIntrinsicLowering intrinsic_lowering(
&graph_reducer, data->jsgraph(),
data->info()->is_deoptimization_enabled()
@@ -601,7 +607,6 @@ struct TypedLoweringPhase {
AddReducer(data, &graph_reducer, &builtin_reducer);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
- AddReducer(data, &graph_reducer, &type_feedback_lowering);
AddReducer(data, &graph_reducer, &load_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
@@ -609,6 +614,22 @@ struct TypedLoweringPhase {
};
+struct BranchEliminationPhase {
+ static const char* phase_name() { return "branch condition elimination"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+
struct SimplifiedLoweringPhase {
static const char* phase_name() { return "simplified lowering"; }
@@ -624,11 +645,14 @@ struct SimplifiedLoweringPhase {
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
+ BinaryOperatorReducer binary_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &binary_reducer);
graph_reducer.ReduceGraph();
}
};
@@ -1043,11 +1067,6 @@ Handle<Code> Pipeline::GenerateCode() {
PipelineData data(&zone_pool, info(), pipeline_statistics.get());
this->data_ = &data;
- if (info()->is_type_feedback_enabled()) {
- data.set_js_type_feedback(new (data.graph_zone())
- JSTypeFeedbackTable(data.graph_zone()));
- }
-
BeginPhaseKind("graph creation");
if (FLAG_trace_turbo) {
@@ -1075,7 +1094,13 @@ Handle<Code> Pipeline::GenerateCode() {
RunPrintAndVerify("OSR deconstruction", true);
}
- // Perform context specialization and inlining (if enabled).
+ // Perform native context specialization (if enabled).
+ if (info()->is_native_context_specializing()) {
+ Run<NativeContextSpecializationPhase>();
+ RunPrintAndVerify("Native context specialized", true);
+ }
+
+ // Perform function context specialization and inlining (if enabled).
Run<InliningPhase>();
RunPrintAndVerify("Inlined", true);
@@ -1091,7 +1116,11 @@ Handle<Code> Pipeline::GenerateCode() {
base::SmartPointer<Typer> typer;
if (info()->is_typing_enabled()) {
// Type the graph.
- typer.Reset(new Typer(isolate(), data.graph(), info()->function_type()));
+ typer.Reset(new Typer(isolate(), data.graph(),
+ info()->is_deoptimization_enabled()
+ ? Typer::kDeoptimizationEnabled
+ : Typer::kNoFlags,
+ info()->dependencies(), info()->function_type()));
Run<TyperPhase>(typer.get());
RunPrintAndVerify("Typed");
}
@@ -1108,15 +1137,13 @@ Handle<Code> Pipeline::GenerateCode() {
RunPrintAndVerify("Loop peeled");
}
- if (info()->is_type_feedback_enabled()) {
- Run<JSTypeFeedbackPhase>();
- RunPrintAndVerify("JSType feedback");
- }
-
// Lower simplified operators and insert changes.
Run<SimplifiedLoweringPhase>();
RunPrintAndVerify("Lowered simplified");
+ Run<BranchEliminationPhase>();
+ RunPrintAndVerify("Branch conditions eliminated");
+
// Optimize control flow.
if (FLAG_turbo_cf_optimization) {
Run<ControlFlowOptimizationPhase>();
@@ -1265,8 +1292,9 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
bool run_verifier = FLAG_turbo_verify_allocation;
// Allocate registers.
- AllocateRegisters(RegisterConfiguration::ArchDefault(), call_descriptor,
- run_verifier);
+ AllocateRegisters(
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ call_descriptor, run_verifier);
if (data->compilation_failed()) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
return Handle<Code>();
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 90c223f67e..d437c7e585 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -11,6 +11,9 @@
namespace v8 {
namespace internal {
+
+class RegisterConfiguration;
+
namespace compiler {
class CallDescriptor;
@@ -18,7 +21,6 @@ class Graph;
class InstructionSequence;
class Linkage;
class PipelineData;
-class RegisterConfiguration;
class Schedule;
class Pipeline {
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index df776fac68..c6b003d8bb 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -157,6 +157,48 @@ class OutOfLineLoadZero final : public OutOfLineCode {
};
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(offset),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ add(scratch1_, object_, offset_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const offset_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
Condition FlagsConditionToCondition(FlagsCondition condition) {
switch (condition) {
case kEqual:
@@ -563,21 +605,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-#define ASSEMBLE_STORE_WRITE_BARRIER() \
- do { \
- Register object = i.InputRegister(0); \
- Register index = i.InputRegister(1); \
- Register value = i.InputRegister(2); \
- __ add(index, object, index); \
- __ StoreP(value, MemOperand(index)); \
- SaveFPRegsMode mode = \
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; \
- LinkRegisterStatus lr_status = kLRHasNotBeenSaved; \
- __ RecordWrite(object, index, value, lr_status, mode); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-
void CodeGenerator::AssembleDeconstructActivationRecord() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_slots = frame()->GetSpillSlotCount();
@@ -594,6 +621,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
switch (opcode) {
case kArchCallCodeObject: {
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
EnsureSpaceForLazyDeopt();
if (HasRegisterInput(instr, 0)) {
__ addi(ip, i.InputRegister(0),
@@ -624,6 +653,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchCallJSFunction: {
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -654,6 +685,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
}
+ case kArchLazyBailout: {
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -709,6 +747,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register offset = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+ scratch0, scratch1, mode);
+ __ StorePX(value, MemOperand(object, offset));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kPPC_And:
if (HasRegisterInput(instr, 1)) {
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -939,6 +994,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cntlzw_(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Cntlz64:
+ __ cntlzd_(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Popcnt32:
+ __ popcntw(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Popcnt64:
+ __ popcntd(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
case kPPC_Cmp32:
ASSEMBLE_COMPARE(cmpw, cmplw);
break;
@@ -1022,6 +1093,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Move(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kPPC_Int64ToFloat32:
+ __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Int64ToDouble:
+ __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
#endif
case kPPC_Int32ToDouble:
__ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
@@ -1136,9 +1215,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kPPC_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
- case kPPC_StoreWriteBarrier:
- ASSEMBLE_STORE_WRITE_BARRIER();
- break;
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
@@ -1632,6 +1708,9 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index ed9bbcd91c..64a14ae09d 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -62,6 +62,9 @@ namespace compiler {
V(PPC_MinDouble) \
V(PPC_AbsDouble) \
V(PPC_Cntlz32) \
+ V(PPC_Cntlz64) \
+ V(PPC_Popcnt32) \
+ V(PPC_Popcnt64) \
V(PPC_Cmp32) \
V(PPC_Cmp64) \
V(PPC_CmpDouble) \
@@ -75,6 +78,8 @@ namespace compiler {
V(PPC_ExtendSignWord32) \
V(PPC_Uint32ToUint64) \
V(PPC_Int64ToInt32) \
+ V(PPC_Int64ToFloat32) \
+ V(PPC_Int64ToDouble) \
V(PPC_Int32ToDouble) \
V(PPC_Uint32ToDouble) \
V(PPC_Float32ToDouble) \
@@ -103,8 +108,7 @@ namespace compiler {
V(PPC_StoreWord32) \
V(PPC_StoreWord64) \
V(PPC_StoreFloat32) \
- V(PPC_StoreDouble) \
- V(PPC_StoreWriteBarrier)
+ V(PPC_StoreDouble)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 91c65d14c1..f2a00044c2 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -205,61 +205,83 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(r8), g.TempRegister(r9)};
- Emit(kPPC_StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r7),
- g.UseFixed(offset, r8), g.UseFixed(value, r9), arraysize(temps),
- temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- ImmediateMode mode = kInt16Imm;
- switch (rep) {
- case kRepFloat32:
- opcode = kPPC_StoreFloat32;
- break;
- case kRepFloat64:
- opcode = kPPC_StoreDouble;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kPPC_StoreWord8;
- break;
- case kRepWord16:
- opcode = kPPC_StoreWord16;
- break;
+
+ // TODO(ppc): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(kRepTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(offset);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode;
+ ImmediateMode mode = kInt16Imm;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kPPC_StoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kPPC_StoreDouble;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kPPC_StoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kPPC_StoreWord16;
+ break;
#if !V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
+ case kRepTagged: // Fall through.
#endif
- case kRepWord32:
- opcode = kPPC_StoreWord32;
- break;
+ case kRepWord32:
+ opcode = kPPC_StoreWord32;
+ break;
#if V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kPPC_StoreWord64;
- mode = kInt16Imm_4ByteAligned;
- break;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kPPC_StoreWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
#endif
- default:
- UNREACHABLE();
- return;
- }
- if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
- } else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+ }
}
}
@@ -417,8 +439,8 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
void InstructionSelector::VisitWord32And(Node* node) {
PPCOperandGenerator g(this);
Int32BinopMatcher m(node);
- int mb;
- int me;
+ int mb = 0;
+ int me = 0;
if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
@@ -456,8 +478,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
void InstructionSelector::VisitWord64And(Node* node) {
PPCOperandGenerator g(this);
Int64BinopMatcher m(node);
- int mb;
- int me;
+ int mb = 0;
+ int me = 0;
if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
@@ -737,6 +759,38 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Cntlz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Popcnt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Popcnt64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+#endif
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
}
@@ -907,6 +961,16 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kPPC_Int64ToInt32, node);
}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kPPC_Int64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Int64ToDouble, node);
+}
#endif
@@ -1471,23 +1535,10 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
+ const CallDescriptor* descriptor,
+ Node* node) {
PPCOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on PPC it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, true);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -1497,7 +1548,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
- for (Node* node : buffer.pushed_nodes) {
+ for (Node* node : (*arguments)) {
Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
g.TempImmediate(slot));
++slot;
@@ -1506,7 +1557,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Push any stack arguments.
int num_slots = static_cast<int>(descriptor->StackParameterCount());
int slot = 0;
- for (Node* input : buffer.pushed_nodes) {
+ for (Node* input : (*arguments)) {
if (slot == 0) {
DCHECK(input);
Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input),
@@ -1521,134 +1572,10 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
++slot;
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- PPCOperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on PPC it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on PPC it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Push any stack arguments.
- int num_slots = static_cast<int>(descriptor->StackParameterCount());
- int slot = 0;
- for (Node* input : buffer.pushed_nodes) {
- if (slot == 0) {
- Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(num_slots));
- } else {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot));
- }
- ++slot;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
@@ -1702,7 +1629,9 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kFloat64RoundTiesAway;
+ MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt;
// We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index f590902df3..d658c294d9 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -22,14 +22,11 @@ RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
machine_(zone(), word, flags),
common_(zone()),
call_descriptor_(call_descriptor),
- parameters_(nullptr),
+ parameters_(parameter_count(), zone()),
current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
- // Add an extra input node for the JSFunction parameter to the start node.
- Node* s = graph->NewNode(common_.Start(param_count + 1));
- graph->SetStart(s);
- if (parameter_count() == 0) return;
- parameters_ = zone()->NewArray<Node*>(param_count);
+ // Add an extra input for the JSFunction parameter to the start node.
+ graph->SetStart(graph->NewNode(common_.Start(param_count + 1)));
for (size_t i = 0; i < parameter_count(); ++i) {
parameters_[i] =
AddNode(common()->Parameter(static_cast<int>(i)), graph->start());
@@ -101,6 +98,22 @@ void RawMachineAssembler::Return(Node* value) {
}
+void RawMachineAssembler::Return(Node* v1, Node* v2) {
+ Node* values[] = {v1, v2};
+ Node* ret = MakeNode(common()->Return(2), 2, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+
+void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
+ Node* values[] = {v1, v2, v3};
+ Node* ret = MakeNode(common()->Return(3), 3, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+
Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
Node** args) {
int param_count =
@@ -158,19 +171,6 @@ Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
}
-Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
- Node* context, Node* frame_state,
- CallFunctionFlags flags) {
- Callable callable = CodeFactory::CallFunction(isolate(), 0, flags);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 1,
- CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
- Node* stub_code = HeapConstant(callable.code());
- return AddNode(common()->Call(desc), stub_code, function, receiver, context,
- frame_state, graph()->start(), graph()->start());
-}
-
-
Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
Node* arg1, Node* context) {
CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
@@ -201,6 +201,22 @@ Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
}
+Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 4, Operator::kNoProperties, false);
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(4);
+
+ return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
+ ref, arity, context, graph()->start(), graph()->start());
+}
+
+
Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
Node* function) {
MachineSignature::Builder builder(zone(), 1, 0);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 291f69f3b0..d4b8e93d10 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -75,6 +75,10 @@ class RawMachineAssembler {
// place them into the current basic block. They don't perform control flow,
// hence will not switch the current basic block.
+ Node* NullConstant() {
+ return HeapConstant(isolate()->factory()->null_value());
+ }
+
Node* UndefinedConstant() {
return HeapConstant(isolate()->factory()->undefined_value());
}
@@ -126,11 +130,13 @@ class RawMachineAssembler {
return AddNode(machine()->Load(rep), base, index, graph()->start(),
graph()->start());
}
- Node* Store(MachineType rep, Node* base, Node* value) {
- return Store(rep, base, IntPtrConstant(0), value);
+ Node* Store(MachineType rep, Node* base, Node* value,
+ WriteBarrierKind write_barrier) {
+ return Store(rep, base, IntPtrConstant(0), value, write_barrier);
}
- Node* Store(MachineType rep, Node* base, Node* index, Node* value) {
- return AddNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
+ Node* Store(MachineType rep, Node* base, Node* index, Node* value,
+ WriteBarrierKind write_barrier) {
+ return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
base, index, value, graph()->start(), graph()->start());
}
@@ -227,6 +233,7 @@ class RawMachineAssembler {
Node* Word64Ror(Node* a, Node* b) {
return AddNode(machine()->Word64Ror(), a, b);
}
+ Node* Word64Clz(Node* a) { return AddNode(machine()->Word64Clz(), a); }
Node* Word64Equal(Node* a, Node* b) {
return AddNode(machine()->Word64Equal(), a, b);
}
@@ -355,6 +362,12 @@ class RawMachineAssembler {
Node* Float32Div(Node* a, Node* b) {
return AddNode(machine()->Float32Div(), a, b);
}
+ Node* Float32Max(Node* a, Node* b) {
+ return AddNode(machine()->Float32Max().op(), a, b);
+ }
+ Node* Float32Min(Node* a, Node* b) {
+ return AddNode(machine()->Float32Min().op(), a, b);
+ }
Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
Node* Float32Equal(Node* a, Node* b) {
@@ -389,6 +402,12 @@ class RawMachineAssembler {
Node* Float64Mod(Node* a, Node* b) {
return AddNode(machine()->Float64Mod(), a, b);
}
+ Node* Float64Max(Node* a, Node* b) {
+ return AddNode(machine()->Float64Max().op(), a, b);
+ }
+ Node* Float64Min(Node* a, Node* b) {
+ return AddNode(machine()->Float64Min().op(), a, b);
+ }
Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
Node* Float64Equal(Node* a, Node* b) {
@@ -439,6 +458,12 @@ class RawMachineAssembler {
Node* TruncateInt64ToInt32(Node* a) {
return AddNode(machine()->TruncateInt64ToInt32(), a);
}
+ Node* RoundInt64ToFloat32(Node* a) {
+ return AddNode(machine()->RoundInt64ToFloat32(), a);
+ }
+ Node* RoundInt64ToFloat64(Node* a) {
+ return AddNode(machine()->RoundInt64ToFloat64(), a);
+ }
Node* BitcastFloat32ToInt32(Node* a) {
return AddNode(machine()->BitcastFloat32ToInt32(), a);
}
@@ -487,7 +512,7 @@ class RawMachineAssembler {
return Load(rep, PointerConstant(address), Int32Constant(offset));
}
Node* StoreToPointer(void* address, MachineType rep, Node* node) {
- return Store(rep, PointerConstant(address), node);
+ return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
}
Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
@@ -500,14 +525,14 @@ class RawMachineAssembler {
Node* frame_state);
// Tail call the given call descriptor and the given arguments.
Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
- // Call through CallFunctionStub with lazy deopt and frame-state.
- Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
- Node* frame_state, CallFunctionFlags flags);
// Call to a runtime function with one arguments.
Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
// Call to a runtime function with two arguments.
Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
Node* context);
+ // Call to a runtime function with four arguments.
+ Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* context);
// Call to a C function with zero arguments.
Node* CallCFunction0(MachineType return_type, Node* function);
// Call to a C function with one parameter.
@@ -536,6 +561,8 @@ class RawMachineAssembler {
void Switch(Node* index, Label* default_label, int32_t* case_values,
Label** case_labels, size_t case_count);
void Return(Node* value);
+ void Return(Node* v1, Node* v2);
+ void Return(Node* v1, Node* v2, Node* v3);
void Bind(Label* label);
void Deoptimize(Node* state);
@@ -579,7 +606,7 @@ class RawMachineAssembler {
MachineOperatorBuilder machine_;
CommonOperatorBuilder common_;
CallDescriptor* call_descriptor_;
- Node** parameters_;
+ NodeVector parameters_;
BasicBlock* current_block_;
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index 0b775d29e1..68862add46 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -48,7 +48,7 @@ void VerifyAllocatedGaps(const Instruction* instr) {
void RegisterAllocatorVerifier::VerifyInput(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
- if (constraint.type_ != kImmediate) {
+ if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -59,6 +59,7 @@ void RegisterAllocatorVerifier::VerifyTemp(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
CHECK_NE(kConstant, constraint.type_);
}
@@ -66,6 +67,7 @@ void RegisterAllocatorVerifier::VerifyTemp(
void RegisterAllocatorVerifier::VerifyOutput(
const OperandConstraint& constraint) {
CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -143,6 +145,8 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->type_ = kConstant;
constraint->value_ = ConstantOperand::cast(op)->virtual_register();
constraint->virtual_register_ = constraint->value_;
+ } else if (op->IsExplicit()) {
+ constraint->type_ = kExplicit;
} else if (op->IsImmediate()) {
auto imm = ImmediateOperand::cast(op);
int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
@@ -160,8 +164,6 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
} else {
switch (unallocated->extended_policy()) {
case UnallocatedOperand::ANY:
- CHECK(false);
- break;
case UnallocatedOperand::NONE:
if (sequence()->IsFloat(vreg)) {
constraint->type_ = kNoneDouble;
@@ -216,20 +218,25 @@ void RegisterAllocatorVerifier::CheckConstraint(
case kRegister:
CHECK(op->IsRegister());
return;
- case kFixedRegister:
- CHECK(op->IsRegister());
- CHECK_EQ(RegisterOperand::cast(op)->index(), constraint->value_);
- return;
case kDoubleRegister:
CHECK(op->IsDoubleRegister());
return;
+ case kExplicit:
+ CHECK(op->IsExplicit());
+ return;
+ case kFixedRegister:
+ CHECK(op->IsRegister());
+ CHECK_EQ(LocationOperand::cast(op)->GetRegister().code(),
+ constraint->value_);
+ return;
case kFixedDoubleRegister:
CHECK(op->IsDoubleRegister());
- CHECK_EQ(DoubleRegisterOperand::cast(op)->index(), constraint->value_);
+ CHECK_EQ(LocationOperand::cast(op)->GetDoubleRegister().code(),
+ constraint->value_);
return;
case kFixedSlot:
CHECK(op->IsStackSlot());
- CHECK_EQ(StackSlotOperand::cast(op)->index(), constraint->value_);
+ CHECK_EQ(LocationOperand::cast(op)->index(), constraint->value_);
return;
case kSlot:
CHECK(op->IsStackSlot());
@@ -282,7 +289,7 @@ class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
struct OperandLess {
bool operator()(const InstructionOperand* a,
const InstructionOperand* b) const {
- return a->CompareModuloType(*b);
+ return a->CompareCanonicalized(*b);
}
};
@@ -316,7 +323,7 @@ class OperandMap : public ZoneObject {
this->erase(it++);
if (it == this->end()) return;
}
- if (it->first->EqualsModuloType(*o.first)) {
+ if (it->first->EqualsCanonicalized(*o.first)) {
++it;
if (it == this->end()) return;
} else {
@@ -676,7 +683,10 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
const auto op_constraints = instr_constraint.operand_constraints_;
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
- if (op_constraints[count].type_ == kImmediate) continue;
+ if (op_constraints[count].type_ == kImmediate ||
+ op_constraints[count].type_ == kExplicit) {
+ continue;
+ }
int virtual_register = op_constraints[count].virtual_register_;
auto op = instr->InputAt(i);
if (!block_maps->IsPhi(virtual_register)) {
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index 15db782a68..87b5cfbb7a 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -35,6 +35,7 @@ class RegisterAllocatorVerifier final : public ZoneObject {
kFixedSlot,
kNone,
kNoneDouble,
+ kExplicit,
kSameAsFirst
};
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 840c13b1a7..0dc76000f7 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -27,11 +27,26 @@ void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
- return kind == DOUBLE_REGISTERS ? cfg->num_aliased_double_registers()
+ return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
: cfg->num_general_registers();
}
+int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
+ RegisterKind kind) {
+ return kind == DOUBLE_REGISTERS
+ ? cfg->num_allocatable_aliased_double_registers()
+ : cfg->num_allocatable_general_registers();
+}
+
+
+const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
+ RegisterKind kind) {
+ return kind == DOUBLE_REGISTERS ? cfg->allocatable_double_codes()
+ : cfg->allocatable_general_codes();
+}
+
+
const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
const InstructionBlock* block) {
auto index = block->loop_header();
@@ -52,11 +67,11 @@ Instruction* GetLastInstruction(InstructionSequence* code,
}
-bool IsOutputRegisterOf(Instruction* instr, int index) {
+bool IsOutputRegisterOf(Instruction* instr, Register reg) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
auto output = instr->OutputAt(i);
if (output->IsRegister() &&
- RegisterOperand::cast(output)->index() == index) {
+ LocationOperand::cast(output)->GetRegister().is(reg)) {
return true;
}
}
@@ -64,11 +79,11 @@ bool IsOutputRegisterOf(Instruction* instr, int index) {
}
-bool IsOutputDoubleRegisterOf(Instruction* instr, int index) {
+bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
auto output = instr->OutputAt(i);
if (output->IsDoubleRegister() &&
- DoubleRegisterOperand::cast(output)->index() == index) {
+ LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
return true;
}
}
@@ -129,7 +144,7 @@ bool UsePosition::HasHint() const {
}
-bool UsePosition::HintRegister(int* register_index) const {
+bool UsePosition::HintRegister(int* register_code) const {
if (hint_ == nullptr) return false;
switch (HintTypeField::decode(flags_)) {
case UsePositionHintType::kNone:
@@ -139,20 +154,23 @@ bool UsePosition::HintRegister(int* register_index) const {
auto use_pos = reinterpret_cast<UsePosition*>(hint_);
int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
if (assigned_register == kUnassignedRegister) return false;
- *register_index = assigned_register;
+ *register_code = assigned_register;
return true;
}
case UsePositionHintType::kOperand: {
auto operand = reinterpret_cast<InstructionOperand*>(hint_);
- int assigned_register = AllocatedOperand::cast(operand)->index();
- *register_index = assigned_register;
+ int assigned_register =
+ operand->IsRegister()
+ ? LocationOperand::cast(operand)->GetRegister().code()
+ : LocationOperand::cast(operand)->GetDoubleRegister().code();
+ *register_code = assigned_register;
return true;
}
case UsePositionHintType::kPhi: {
auto phi = reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
int assigned_register = phi->assigned_register();
if (assigned_register == kUnassignedRegister) return false;
- *register_index = assigned_register;
+ *register_code = assigned_register;
return true;
}
}
@@ -166,17 +184,16 @@ UsePositionHintType UsePosition::HintTypeForOperand(
switch (op.kind()) {
case InstructionOperand::CONSTANT:
case InstructionOperand::IMMEDIATE:
+ case InstructionOperand::EXPLICIT:
return UsePositionHintType::kNone;
case InstructionOperand::UNALLOCATED:
return UsePositionHintType::kUnresolved;
case InstructionOperand::ALLOCATED:
- switch (AllocatedOperand::cast(op).allocated_kind()) {
- case AllocatedOperand::REGISTER:
- case AllocatedOperand::DOUBLE_REGISTER:
- return UsePositionHintType::kOperand;
- case AllocatedOperand::STACK_SLOT:
- case AllocatedOperand::DOUBLE_STACK_SLOT:
- return UsePositionHintType::kNone;
+ if (op.IsRegister() || op.IsDoubleRegister()) {
+ return UsePositionHintType::kOperand;
+ } else {
+ DCHECK(op.IsStackSlot() || op.IsDoubleStackSlot());
+ return UsePositionHintType::kNone;
}
case InstructionOperand::INVALID:
break;
@@ -246,6 +263,7 @@ LiveRange::LiveRange(int relative_id, MachineType machine_type,
current_interval_(nullptr),
last_processed_use_(nullptr),
current_hint_position_(nullptr),
+ splitting_pointer_(nullptr),
size_(kInvalidSize),
weight_(kInvalidWeight),
group_(nullptr) {
@@ -379,12 +397,8 @@ bool LiveRange::IsTopLevel() const { return top_level_ == this; }
InstructionOperand LiveRange::GetAssignedOperand() const {
if (HasRegisterAssigned()) {
DCHECK(!spilled());
- switch (kind()) {
- case GENERAL_REGISTERS:
- return RegisterOperand(machine_type(), assigned_register());
- case DOUBLE_REGISTERS:
- return DoubleRegisterOperand(machine_type(), assigned_register());
- }
+ return AllocatedOperand(LocationOperand::REGISTER, machine_type(),
+ assigned_register());
}
DCHECK(spilled());
DCHECK(!HasRegisterAssigned());
@@ -435,8 +449,8 @@ LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
}
-void LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
- Zone* zone) {
+UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
+ Zone* zone) {
DCHECK(Start() < position);
DCHECK(End() > position);
DCHECK(result->IsEmpty());
@@ -482,7 +496,10 @@ void LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
// Find the last use position before the split and the first use
// position after it.
- auto use_after = first_pos_;
+ auto use_after =
+ splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
+ ? first_pos()
+ : splitting_pointer_;
UsePosition* use_before = nullptr;
if (split_at_start) {
// The split position coincides with the beginning of a use interval (the
@@ -520,6 +537,7 @@ void LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
Verify();
result->Verify();
#endif
+ return use_before;
}
@@ -673,11 +691,19 @@ TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineType machine_type)
spilled_in_deferred_blocks_(false),
spill_start_index_(kMaxInt),
last_child_(this),
- last_insertion_point_(this) {
+ last_pos_(nullptr),
+ splinter_(nullptr) {
bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
}
+#if DEBUG
+int TopLevelLiveRange::debug_virt_reg() const {
+ return IsSplinter() ? splintered_from()->vreg() : vreg();
+}
+#endif
+
+
void TopLevelLiveRange::SpillAtDefinition(Zone* zone, int gap_index,
InstructionOperand* operand) {
DCHECK(HasNoSpillType());
@@ -686,11 +712,11 @@ void TopLevelLiveRange::SpillAtDefinition(Zone* zone, int gap_index,
}
-bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
- InstructionSequence* code, const InstructionOperand& spill_operand) {
+void TopLevelLiveRange::MarkSpilledInDeferredBlock(
+ const InstructionSequence* code) {
if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() ||
- spill_operand.IsConstant() || spill_operand.IsImmediate()) {
- return false;
+ !HasSpillRange()) {
+ return;
}
int count = 0;
@@ -718,16 +744,23 @@ bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
"Live Range %d must be spilled at definition: found a "
"slot-requiring non-deferred child range %d.\n",
TopLevel()->vreg(), child->relative_id());
- return false;
+ return;
}
} else {
if (child->spilled() || has_slot_use) ++count;
}
}
- if (count == 0) return false;
+ if (count == 0) return;
spill_start_index_ = -1;
spilled_in_deferred_blocks_ = true;
+ spills_at_definition_ = nullptr;
+}
+
+
+bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
+ InstructionSequence* code, const InstructionOperand& spill_operand) {
+ if (!IsSpilledOnlyInDeferredBlocks()) return false;
TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg());
// If we have ranges that aren't spilled but require the operand on the stack,
@@ -808,24 +841,17 @@ void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
auto spill_range = GetSpillRange();
int index = spill_range->assigned_slot();
- switch (kind()) {
- case GENERAL_REGISTERS:
- return StackSlotOperand(machine_type(), index);
- case DOUBLE_REGISTERS:
- return DoubleStackSlotOperand(machine_type(), index);
- }
- UNREACHABLE();
- return StackSlotOperand(kMachNone, 0);
+ return AllocatedOperand(LocationOperand::STACK_SLOT, machine_type(), index);
}
void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
- TopLevelLiveRange* result, Zone* zone) {
+ Zone* zone) {
DCHECK(start != Start() || end != End());
DCHECK(start < end);
- result->set_spill_type(spill_type());
-
+ TopLevelLiveRange splinter_temp(-1, machine_type());
+ UsePosition* last_in_splinter = nullptr;
if (start <= Start()) {
// TODO(mtrofin): here, the TopLevel part is in the deferred range, so we
// may want to continue processing the splinter. However, if the value is
@@ -834,21 +860,21 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
// should check this, however, this may not be the place, because we don't
// have access to the instruction sequence.
DCHECK(end < End());
- DetachAt(end, result, zone);
+ DetachAt(end, &splinter_temp, zone);
next_ = nullptr;
} else if (end >= End()) {
DCHECK(start > Start());
- DetachAt(start, result, zone);
+ DetachAt(start, &splinter_temp, zone);
next_ = nullptr;
} else {
DCHECK(start < End() && Start() < end);
const int kInvalidId = std::numeric_limits<int>::max();
- DetachAt(start, result, zone);
+ UsePosition* last = DetachAt(start, &splinter_temp, zone);
LiveRange end_part(kInvalidId, this->machine_type(), nullptr);
- result->DetachAt(end, &end_part, zone);
+ last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone);
next_ = end_part.next_;
last_interval_->set_next(end_part.first_interval_);
@@ -858,30 +884,46 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
current_interval_ = last_interval_;
last_interval_ = end_part.last_interval_;
-
if (first_pos_ == nullptr) {
first_pos_ = end_part.first_pos_;
} else {
- UsePosition* pos = first_pos_;
- for (; pos->next() != nullptr; pos = pos->next()) {
- }
- pos->set_next(end_part.first_pos_);
+ splitting_pointer_ = last;
+ if (last != nullptr) last->set_next(end_part.first_pos_);
}
}
- result->next_ = nullptr;
- result->top_level_ = result;
- result->SetSplinteredFrom(this);
- // Ensure the result's relative ID is unique within the IDs used for this
- // virtual register's children and splinters.
- result->relative_id_ = GetNextChildId();
+ if (splinter()->IsEmpty()) {
+ splinter()->first_interval_ = splinter_temp.first_interval_;
+ splinter()->last_interval_ = splinter_temp.last_interval_;
+ } else {
+ splinter()->last_interval_->set_next(splinter_temp.first_interval_);
+ splinter()->last_interval_ = splinter_temp.last_interval_;
+ }
+ if (splinter()->first_pos() == nullptr) {
+ splinter()->first_pos_ = splinter_temp.first_pos_;
+ } else {
+ splinter()->last_pos_->set_next(splinter_temp.first_pos_);
+ }
+ if (last_in_splinter != nullptr) {
+ splinter()->last_pos_ = last_in_splinter;
+ } else {
+ if (splinter()->first_pos() != nullptr &&
+ splinter()->last_pos_ == nullptr) {
+ splinter()->last_pos_ = splinter()->first_pos();
+ for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
+ pos = pos->next()) {
+ splinter()->last_pos_ = pos;
+ }
+ }
+ }
+#if DEBUG
+ Verify();
+ splinter()->Verify();
+#endif
}
void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
- // The splinter parent is always the original "Top".
- DCHECK(splinter_parent->Start() < Start());
-
splintered_from_ = splinter_parent;
if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
SetSpillRange(splinter_parent->spill_range_);
@@ -906,43 +948,55 @@ void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
DCHECK(Start() < other->Start());
DCHECK(other->splintered_from() == this);
- LiveRange* last_other = other->last_child();
- LiveRange* last_me = last_child();
-
- // Simple case: we just append at the end.
- if (last_me->End() <= other->Start()) return last_me->AppendAsChild(other);
-
- DCHECK(last_me->End() > last_other->End());
+ LiveRange* first = this;
+ LiveRange* second = other;
+ DCHECK(first->Start() < second->Start());
+ while (first != nullptr && second != nullptr) {
+ DCHECK(first != second);
+ // Make sure the ranges are in order each time we iterate.
+ if (second->Start() < first->Start()) {
+ LiveRange* tmp = second;
+ second = first;
+ first = tmp;
+ continue;
+ }
- // In the more general case, we need to find the ranges between which to
- // insert.
- if (other->Start() < last_insertion_point_->Start()) {
- last_insertion_point_ = this;
- }
+ if (first->End() <= second->Start()) {
+ if (first->next() == nullptr ||
+ first->next()->Start() > second->Start()) {
+ // First is in order before second.
+ LiveRange* temp = first->next();
+ first->next_ = second;
+ first = temp;
+ } else {
+ // First is in order before its successor (or second), so advance first.
+ first = first->next();
+ }
+ continue;
+ }
- for (; last_insertion_point_->next() != nullptr &&
- last_insertion_point_->next()->Start() <= other->Start();
- last_insertion_point_ = last_insertion_point_->next()) {
- }
+ DCHECK(first->Start() < second->Start());
+ // If first and second intersect, split first.
+ if (first->Start() < second->End() && second->Start() < first->End()) {
+ LiveRange* temp = first->SplitAt(second->Start(), zone);
+ CHECK(temp != first);
+ temp->set_spilled(first->spilled());
+ if (!temp->spilled())
+ temp->set_assigned_register(first->assigned_register());
- // When we splintered the original range, we reconstituted the original range
- // into one range without children, but with discontinuities. To merge the
- // splinter back in, we need to split the range - or a child obtained after
- // register allocation splitting.
- LiveRange* after = last_insertion_point_->next();
- if (last_insertion_point_->End() > other->Start()) {
- LiveRange* new_after = last_insertion_point_->SplitAt(other->Start(), zone);
- new_after->set_spilled(last_insertion_point_->spilled());
- if (!new_after->spilled())
- new_after->set_assigned_register(
- last_insertion_point_->assigned_register());
- after = new_after;
+ first->next_ = second;
+ first = temp;
+ continue;
+ }
+ DCHECK(first->End() <= second->Start());
}
- last_other->next_ = after;
- last_insertion_point_->next_ = other;
- other->UpdateParentForAllChildren(TopLevel());
+ TopLevel()->UpdateParentForAllChildren(TopLevel());
TopLevel()->UpdateSpillRangePostMerge(other);
+
+#if DEBUG
+ Verify();
+#endif
}
@@ -1213,6 +1267,10 @@ RegisterAllocationData::RegisterAllocationData(
debug_name_(debug_name),
config_(config),
phi_map_(allocation_zone()),
+ allocatable_codes_(this->config()->num_general_registers(), -1,
+ allocation_zone()),
+ allocatable_double_codes_(this->config()->num_double_registers(), -1,
+ allocation_zone()),
live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
@@ -1233,7 +1291,7 @@ RegisterAllocationData::RegisterAllocationData(
assigned_registers_ = new (code_zone())
BitVector(this->config()->num_general_registers(), code_zone());
assigned_double_registers_ = new (code_zone())
- BitVector(this->config()->num_aliased_double_registers(), code_zone());
+ BitVector(this->config()->num_double_registers(), code_zone());
this->frame()->SetAllocatedRegisters(assigned_registers_);
this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
}
@@ -1467,18 +1525,17 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
machine_type = data()->MachineTypeFor(virtual_register);
}
if (operand->HasFixedSlotPolicy()) {
- AllocatedOperand::AllocatedKind kind =
- IsFloatingPoint(machine_type) ? AllocatedOperand::DOUBLE_STACK_SLOT
- : AllocatedOperand::STACK_SLOT;
- allocated =
- AllocatedOperand(kind, machine_type, operand->fixed_slot_index());
+ allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, machine_type,
+ operand->fixed_slot_index());
} else if (operand->HasFixedRegisterPolicy()) {
+ DCHECK(!IsFloatingPoint(machine_type));
allocated = AllocatedOperand(AllocatedOperand::REGISTER, machine_type,
operand->fixed_register_index());
} else if (operand->HasFixedDoubleRegisterPolicy()) {
+ DCHECK(IsFloatingPoint(machine_type));
DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
- allocated = AllocatedOperand(AllocatedOperand::DOUBLE_REGISTER,
- machine_type, operand->fixed_register_index());
+ allocated = AllocatedOperand(AllocatedOperand::REGISTER, machine_type,
+ operand->fixed_register_index());
} else {
UNREACHABLE();
}
@@ -1529,9 +1586,9 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
AllocateFixed(output, -1, false);
// This value is produced on the stack, we never need to spill it.
if (output->IsStackSlot()) {
- DCHECK(StackSlotOperand::cast(output)->index() <
+ DCHECK(LocationOperand::cast(output)->index() <
data()->frame()->GetSpillSlotCount());
- range->SetSpillOperand(StackSlotOperand::cast(output));
+ range->SetSpillOperand(LocationOperand::cast(output));
range->SetSpillStartIndex(end);
assigned = true;
}
@@ -1589,9 +1646,9 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
// This value is produced on the stack, we never need to spill it.
if (first_output->IsStackSlot()) {
- DCHECK(StackSlotOperand::cast(first_output)->index() <
+ DCHECK(LocationOperand::cast(first_output)->index() <
data()->frame()->GetTotalFrameSlotCount());
- range->SetSpillOperand(StackSlotOperand::cast(first_output));
+ range->SetSpillOperand(LocationOperand::cast(first_output));
range->SetSpillStartIndex(instr_index + 1);
assigned = true;
}
@@ -1614,7 +1671,9 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
// Handle fixed input operands of second instruction.
for (size_t i = 0; i < second->InputCount(); i++) {
auto input = second->InputAt(i);
- if (input->IsImmediate()) continue; // Ignore immediates.
+ if (input->IsImmediate() || input->IsExplicit()) {
+ continue; // Ignore immediates and explicitly reserved registers.
+ }
auto cur_input = UnallocatedOperand::cast(input);
if (cur_input->HasFixedPolicy()) {
int input_vreg = cur_input->virtual_register();
@@ -1772,7 +1831,7 @@ TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < config()->num_aliased_double_registers());
+ DCHECK(index < config()->num_double_registers());
auto result = data()->fixed_double_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedDoubleLiveRangeID(index), kRepFloat64);
@@ -1793,10 +1852,11 @@ TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
return data()->GetOrCreateLiveRangeFor(
ConstantOperand::cast(operand)->virtual_register());
} else if (operand->IsRegister()) {
- return FixedLiveRangeFor(RegisterOperand::cast(operand)->index());
+ return FixedLiveRangeFor(
+ LocationOperand::cast(operand)->GetRegister().code());
} else if (operand->IsDoubleRegister()) {
return FixedDoubleLiveRangeFor(
- DoubleRegisterOperand::cast(operand)->index());
+ LocationOperand::cast(operand)->GetDoubleRegister().code());
} else {
return nullptr;
}
@@ -1886,9 +1946,10 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
if (instr->ClobbersRegisters()) {
- for (int i = 0; i < config()->num_general_registers(); ++i) {
- if (!IsOutputRegisterOf(instr, i)) {
- auto range = FixedLiveRangeFor(i);
+ for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
+ int code = config()->GetAllocatableGeneralCode(i);
+ if (!IsOutputRegisterOf(instr, Register::from_code(code))) {
+ auto range = FixedLiveRangeFor(code);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
@@ -1896,9 +1957,11 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
if (instr->ClobbersDoubleRegisters()) {
- for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
- if (!IsOutputDoubleRegisterOf(instr, i)) {
- auto range = FixedDoubleLiveRangeFor(i);
+ for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
+ ++i) {
+ int code = config()->GetAllocatableDoubleCode(i);
+ if (!IsOutputDoubleRegisterOf(instr, DoubleRegister::from_code(code))) {
+ auto range = FixedDoubleLiveRangeFor(code);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
@@ -1907,7 +1970,9 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
for (size_t i = 0; i < instr->InputCount(); i++) {
auto input = instr->InputAt(i);
- if (input->IsImmediate()) continue; // Ignore immediates.
+ if (input->IsImmediate() || input->IsExplicit()) {
+ continue; // Ignore immediates and explicitly reserved registers.
+ }
LifetimePosition use_pos;
if (input->IsUnallocated() &&
UnallocatedOperand::cast(input)->IsUsedAtStart()) {
@@ -2144,7 +2209,63 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
RegisterKind kind)
: data_(data),
mode_(kind),
- num_registers_(GetRegisterCount(data->config(), kind)) {}
+ num_registers_(GetRegisterCount(data->config(), kind)),
+ num_allocatable_registers_(
+ GetAllocatableRegisterCount(data->config(), kind)),
+ allocatable_register_codes_(
+ GetAllocatableRegisterCodes(data->config(), kind)) {}
+
+
+LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
+ const LiveRange* range, int instruction_index) {
+ LifetimePosition ret = LifetimePosition::Invalid();
+
+ ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
+ if (range->Start() >= ret || ret >= range->End()) {
+ return LifetimePosition::Invalid();
+ }
+ return ret;
+}
+
+
+void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand(
+ bool operands_only) {
+ size_t initial_range_count = data()->live_ranges().size();
+ for (size_t i = 0; i < initial_range_count; ++i) {
+ TopLevelLiveRange* range = data()->live_ranges()[i];
+ if (!CanProcessRange(range)) continue;
+ if (range->HasNoSpillType() || (operands_only && range->HasSpillRange())) {
+ continue;
+ }
+
+ LifetimePosition start = range->Start();
+ TRACE("Live range %d:%d is defined by a spill operand.\n",
+ range->TopLevel()->vreg(), range->relative_id());
+ LifetimePosition next_pos = start;
+ if (next_pos.IsGapPosition()) {
+ next_pos = next_pos.NextStart();
+ }
+ UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ // If the range already has a spill operand and it doesn't need a
+ // register immediately, split it and spill the first part of the range.
+ if (pos == nullptr) {
+ Spill(range);
+ } else if (pos->pos() > range->Start().NextStart()) {
+ // Do not spill live range eagerly if use position that can benefit from
+ // the register is too close to the start of live range.
+ LifetimePosition split_pos = GetSplitPositionForInstruction(
+ range, pos->pos().ToInstructionIndex());
+ // There is no place to split, so we can't split and spill.
+ if (!split_pos.IsValid()) continue;
+
+ split_pos =
+ FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
+
+ SplitRangeAt(range, split_pos);
+ Spill(range);
+ }
+ }
+}
LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
@@ -2267,11 +2388,11 @@ const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
}
-const char* RegisterAllocator::RegisterName(int allocation_index) const {
+const char* RegisterAllocator::RegisterName(int register_code) const {
if (mode() == GENERAL_REGISTERS) {
- return data()->config()->general_register_name(allocation_index);
+ return data()->config()->GetGeneralRegisterName(register_code);
} else {
- return data()->config()->double_register_name(allocation_index);
+ return data()->config()->GetDoubleRegisterName(register_code);
}
}
@@ -2298,10 +2419,15 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(active_live_ranges().empty());
DCHECK(inactive_live_ranges().empty());
- for (LiveRange* range : data()->live_ranges()) {
- if (range == nullptr) continue;
- if (range->kind() == mode()) {
- AddToUnhandledUnsorted(range);
+ SplitAndSpillRangesDefinedByMemoryOperand(false);
+
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
+ if (!CanProcessRange(range)) continue;
+ for (LiveRange* to_add = range; to_add != nullptr;
+ to_add = to_add->next()) {
+ if (!to_add->spilled()) {
+ AddToUnhandledUnsorted(to_add);
+ }
}
}
SortUnhandled();
@@ -2327,28 +2453,6 @@ void LinearScanAllocator::AllocateRegisters() {
TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
current->relative_id(), position.value());
- if (current->IsTopLevel() && !current->TopLevel()->HasNoSpillType()) {
- TRACE("Live range %d:%d already has a spill operand\n",
- current->TopLevel()->vreg(), current->relative_id());
- auto next_pos = position;
- if (next_pos.IsGapPosition()) {
- next_pos = next_pos.NextStart();
- }
- auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
- // If the range already has a spill operand and it doesn't need a
- // register immediately, split it and spill the first part of the range.
- if (pos == nullptr) {
- Spill(current);
- continue;
- } else if (pos->pos() > current->Start().NextStart()) {
- // Do not spill live range eagerly if use position that can benefit from
- // the register is too close to the start of live range.
- SpillBetween(current, current->Start(), pos->pos());
- DCHECK(UnhandledIsSorted());
- continue;
- }
- }
-
if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
continue;
@@ -2510,6 +2614,9 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
for (auto cur_active : active_live_ranges()) {
free_until_pos[cur_active->assigned_register()] =
LifetimePosition::GapFromInstructionIndex(0);
+ TRACE("Register %s is free until pos %d (1)\n",
+ RegisterName(cur_active->assigned_register()),
+ LifetimePosition::GapFromInstructionIndex(0).value());
}
for (auto cur_inactive : inactive_live_ranges()) {
@@ -2518,6 +2625,8 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+ TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+ Min(free_until_pos[cur_reg], next_intersection).value());
}
int hint_register;
@@ -2539,10 +2648,11 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
}
// Find the register which stays free for the longest time.
- int reg = 0;
- for (int i = 1; i < num_registers(); ++i) {
- if (free_until_pos[i] > free_until_pos[reg]) {
- reg = i;
+ int reg = allocatable_register_code(0);
+ for (int i = 1; i < num_allocatable_registers(); ++i) {
+ int code = allocatable_register_code(i);
+ if (free_until_pos[code] > free_until_pos[reg]) {
+ reg = code;
}
}
@@ -2617,10 +2727,11 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- int reg = 0;
- for (int i = 1; i < num_registers(); ++i) {
- if (use_pos[i] > use_pos[reg]) {
- reg = i;
+ int reg = allocatable_register_code(0);
+ for (int i = 1; i < num_allocatable_registers(); ++i) {
+ int code = allocatable_register_code(i);
+ if (use_pos[code] > use_pos[reg]) {
+ reg = code;
}
}
@@ -2839,10 +2950,20 @@ void SpillSlotLocator::LocateSpillSlots() {
if (range == nullptr || range->IsEmpty()) continue;
// We care only about ranges which spill in the frame.
if (!range->HasSpillRange()) continue;
- auto spills = range->spills_at_definition();
- DCHECK_NOT_NULL(spills);
- for (; spills != nullptr; spills = spills->next) {
- code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
+ range->MarkSpilledInDeferredBlock(data()->code());
+ if (range->IsSpilledOnlyInDeferredBlocks()) {
+ for (LiveRange* child = range; child != nullptr; child = child->next()) {
+ if (child->spilled()) {
+ code->GetInstructionBlock(child->Start().ToInstructionIndex())
+ ->mark_needs_frame();
+ }
+ }
+ } else {
+ auto spills = range->spills_at_definition();
+ DCHECK_NOT_NULL(spills);
+ for (; spills != nullptr; spills = spills->next) {
+ code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
+ }
}
}
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 117ddedbcd..443232abb1 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -7,6 +7,7 @@
#include "src/compiler/instruction.h"
#include "src/ostreams.h"
+#include "src/register-configuration.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -57,6 +58,8 @@ class LifetimePosition final {
// Returns true if this lifetime position corresponds to a START value
bool IsStart() const { return (value_ & (kHalfStep - 1)) == 0; }
+ // Returns true if this lifetime position corresponds to an END value
+ bool IsEnd() const { return (value_ & (kHalfStep - 1)) == 1; }
// Returns true if this lifetime position corresponds to a gap START value
bool IsFullStart() const { return (value_ & (kStep - 1)) == 0; }
@@ -241,15 +244,15 @@ class UsePosition final : public ZoneObject {
void set_next(UsePosition* next) { next_ = next; }
// For hinting only.
- void set_assigned_register(int register_index) {
- flags_ = AssignedRegisterField::update(flags_, register_index);
+ void set_assigned_register(int register_code) {
+ flags_ = AssignedRegisterField::update(flags_, register_code);
}
UsePositionHintType hint_type() const {
return HintTypeField::decode(flags_);
}
bool HasHint() const;
- bool HintRegister(int* register_index) const;
+ bool HintRegister(int* register_code) const;
void ResolveHint(UsePosition* use_pos);
bool IsResolved() const {
return hint_type() != UsePositionHintType::kUnresolved;
@@ -341,7 +344,8 @@ class LiveRange : public ZoneObject {
// live range to the result live range.
// The current range will terminate at position, while result will start from
// position.
- void DetachAt(LifetimePosition position, LiveRange* result, Zone* zone);
+ UsePosition* DetachAt(LifetimePosition position, LiveRange* result,
+ Zone* zone);
// Detaches at position, and then links the resulting ranges. Returns the
// child, which starts at position.
@@ -424,7 +428,8 @@ class LiveRange : public ZoneObject {
mutable UsePosition* last_processed_use_;
// This is used as a cache, it's invalid outside of BuildLiveRanges.
mutable UsePosition* current_hint_position_;
-
+ // Cache the last position splintering stopped at.
+ mutable UsePosition* splitting_pointer_;
// greedy: the number of LifetimePositions covered by this range. Used to
// prioritize selecting live ranges for register assignment, as well as
// in weight calculations.
@@ -490,8 +495,7 @@ class TopLevelLiveRange final : public LiveRange {
// result.
// The current range is pointed to as "splintered_from". No parent/child
// relationship is established between this and result.
- void Splinter(LifetimePosition start, LifetimePosition end,
- TopLevelLiveRange* result, Zone* zone);
+ void Splinter(LifetimePosition start, LifetimePosition end, Zone* zone);
// Assuming other was splintered from this range, embeds other and its
// children as part of the children sequence of this range.
@@ -535,7 +539,6 @@ class TopLevelLiveRange final : public LiveRange {
spill_start_index_ = Min(start, spill_start_index_);
}
- void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
void CommitSpillsAtDefinition(InstructionSequence* sequence,
const InstructionOperand& operand,
bool might_be_duplicated);
@@ -547,6 +550,7 @@ class TopLevelLiveRange final : public LiveRange {
// and instead let the LiveRangeConnector perform the spills within the
// deferred blocks. If so, we insert here spills for non-spilled ranges
// with slot use positions.
+ void MarkSpilledInDeferredBlock(const InstructionSequence* code);
bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
const InstructionOperand& spill_operand);
@@ -559,6 +563,10 @@ class TopLevelLiveRange final : public LiveRange {
void UpdateSpillRangePostMerge(TopLevelLiveRange* merged);
int vreg() const { return vreg_; }
+#if DEBUG
+ int debug_virt_reg() const;
+#endif
+
int GetNextChildId() {
return IsSplinter() ? splintered_from()->GetNextChildId()
: ++last_child_id_;
@@ -575,8 +583,20 @@ class TopLevelLiveRange final : public LiveRange {
}
void set_last_child(LiveRange* range) { last_child_ = range; }
LiveRange* last_child() const { return last_child_; }
+ TopLevelLiveRange* splinter() const { return splinter_; }
+ void SetSplinter(TopLevelLiveRange* splinter) {
+ DCHECK_NULL(splinter_);
+ DCHECK_NOT_NULL(splinter);
+
+ splinter_ = splinter;
+ splinter->relative_id_ = GetNextChildId();
+ splinter->set_spill_type(spill_type());
+ splinter->SetSplinteredFrom(this);
+ }
private:
+ void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
+
typedef BitField<bool, 1, 1> HasSlotUseField;
typedef BitField<bool, 2, 1> IsPhiField;
typedef BitField<bool, 3, 1> IsNonLoopPhiField;
@@ -596,7 +616,8 @@ class TopLevelLiveRange final : public LiveRange {
bool spilled_in_deferred_blocks_;
int spill_start_index_;
LiveRange* last_child_;
- LiveRange* last_insertion_point_;
+ UsePosition* last_pos_;
+ TopLevelLiveRange* splinter_;
DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
@@ -666,9 +687,9 @@ class RegisterAllocationData final : public ZoneObject {
// For hinting.
int assigned_register() const { return assigned_register_; }
- void set_assigned_register(int register_index) {
+ void set_assigned_register(int register_code) {
DCHECK_EQ(assigned_register_, kUnassignedRegister);
- assigned_register_ = register_index;
+ assigned_register_ = register_code;
}
void UnsetAssignedRegister() { assigned_register_ = kUnassignedRegister; }
@@ -769,6 +790,8 @@ class RegisterAllocationData final : public ZoneObject {
const char* const debug_name_;
const RegisterConfiguration* const config_;
PhiMap phi_map_;
+ ZoneVector<int> allocatable_codes_;
+ ZoneVector<int> allocatable_double_codes_;
ZoneVector<BitVector*> live_in_sets_;
ZoneVector<BitVector*> live_out_sets_;
ZoneVector<TopLevelLiveRange*> live_ranges_;
@@ -886,9 +909,21 @@ class RegisterAllocator : public ZoneObject {
InstructionSequence* code() const { return data()->code(); }
RegisterKind mode() const { return mode_; }
int num_registers() const { return num_registers_; }
+ int num_allocatable_registers() const { return num_allocatable_registers_; }
+ int allocatable_register_code(int allocatable_index) const {
+ return allocatable_register_codes_[allocatable_index];
+ }
+
+ // TODO(mtrofin): explain why splitting in gap START is always OK.
+ LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
+ int instruction_index);
Zone* allocation_zone() const { return data()->allocation_zone(); }
+ // Find the optimal split for ranges defined by a memory operand, e.g.
+ // constants or function parameters passed on the stack.
+ void SplitAndSpillRangesDefinedByMemoryOperand(bool operands_only);
+
// Split the given range at the given position.
// If range starts at or after the given position then the
// original range is returned.
@@ -897,6 +932,11 @@ class RegisterAllocator : public ZoneObject {
// still be owned by the original range after splitting.
LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
+ bool CanProcessRange(LiveRange* range) const {
+ return range != nullptr && !range->IsEmpty() && range->kind() == mode();
+ }
+
+
// Split the given range in a position from the interval [start, end].
LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
LifetimePosition end);
@@ -921,6 +961,8 @@ class RegisterAllocator : public ZoneObject {
RegisterAllocationData* const data_;
const RegisterKind mode_;
const int num_registers_;
+ int num_allocatable_registers_;
+ const int* allocatable_register_codes_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
diff --git a/deps/v8/src/compiler/register-configuration.cc b/deps/v8/src/compiler/register-configuration.cc
deleted file mode 100644
index ebe6cfe23c..0000000000
--- a/deps/v8/src/compiler/register-configuration.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/register-configuration.h"
-#include "src/globals.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
- Register::kNumRegisters);
-STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
- DoubleRegister::kMaxNumRegisters);
-
-class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
- public:
- ArchDefaultRegisterConfiguration()
- : RegisterConfiguration(Register::kMaxNumAllocatableRegisters,
-#if V8_TARGET_ARCH_X87
- 1,
- 1,
-#else
- DoubleRegister::NumAllocatableRegisters(),
- DoubleRegister::NumAllocatableAliasedRegisters(),
-#endif
- general_register_name_table_,
- double_register_name_table_) {
- DCHECK_EQ(Register::kMaxNumAllocatableRegisters,
- Register::NumAllocatableRegisters());
- for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
- general_register_name_table_[i] = Register::AllocationIndexToString(i);
- }
- DCHECK_GE(DoubleRegister::kMaxNumAllocatableRegisters,
- DoubleRegister::NumAllocatableRegisters());
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_name_table_[i] =
- DoubleRegister::AllocationIndexToString(i);
- }
- }
-
- const char*
- general_register_name_table_[Register::kMaxNumAllocatableRegisters];
- const char*
- double_register_name_table_[DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-static base::LazyInstance<ArchDefaultRegisterConfiguration>::type
- kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
-
-const RegisterConfiguration* RegisterConfiguration::ArchDefault() {
- return &kDefaultRegisterConfiguration.Get();
-}
-
-RegisterConfiguration::RegisterConfiguration(
- int num_general_registers, int num_double_registers,
- int num_aliased_double_registers, const char* const* general_register_names,
- const char* const* double_register_names)
- : num_general_registers_(num_general_registers),
- num_double_registers_(num_double_registers),
- num_aliased_double_registers_(num_aliased_double_registers),
- general_register_names_(general_register_names),
- double_register_names_(double_register_names) {}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/register-configuration.h b/deps/v8/src/compiler/register-configuration.h
deleted file mode 100644
index f0d58735ba..0000000000
--- a/deps/v8/src/compiler/register-configuration.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
-#define V8_COMPILER_REGISTER_CONFIGURATION_H_
-
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// An architecture independent representation of the sets of registers available
-// for instruction creation.
-class RegisterConfiguration {
- public:
- // Architecture independent maxes.
- static const int kMaxGeneralRegisters = 32;
- static const int kMaxDoubleRegisters = 32;
-
- static const RegisterConfiguration* ArchDefault();
-
- RegisterConfiguration(int num_general_registers, int num_double_registers,
- int num_aliased_double_registers,
- const char* const* general_register_name,
- const char* const* double_register_name);
-
- int num_general_registers() const { return num_general_registers_; }
- int num_double_registers() const { return num_double_registers_; }
- int num_aliased_double_registers() const {
- return num_aliased_double_registers_;
- }
-
- const char* general_register_name(int offset) const {
- DCHECK(offset >= 0 && offset < kMaxGeneralRegisters);
- return general_register_names_[offset];
- }
- const char* double_register_name(int offset) const {
- DCHECK(offset >= 0 && offset < kMaxDoubleRegisters);
- return double_register_names_[offset];
- }
-
- private:
- const int num_general_registers_;
- const int num_double_registers_;
- const int num_aliased_double_registers_;
- const char* const* general_register_names_;
- const char* const* double_register_names_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_REGISTER_CONFIGURATION_H_
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 0c38e020ad..98de04d3a5 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -21,10 +21,8 @@ namespace compiler {
// Eagerly folds any representation changes for constants.
class RepresentationChanger {
public:
- RepresentationChanger(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
- Isolate* isolate)
+ RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
: jsgraph_(jsgraph),
- simplified_(simplified),
isolate_(isolate),
testing_type_errors_(false),
type_error_(false) {}
@@ -333,6 +331,12 @@ class RepresentationChanger {
return machine()->Int32Div();
case IrOpcode::kNumberModulus:
return machine()->Int32Mod();
+ case IrOpcode::kNumberBitwiseOr:
+ return machine()->Word32Or();
+ case IrOpcode::kNumberBitwiseXor:
+ return machine()->Word32Xor();
+ case IrOpcode::kNumberBitwiseAnd:
+ return machine()->Word32And();
case IrOpcode::kNumberEqual:
return machine()->Word32Equal();
case IrOpcode::kNumberLessThan:
@@ -413,7 +417,6 @@ class RepresentationChanger {
private:
JSGraph* jsgraph_;
- SimplifiedOperatorBuilder* simplified_;
Isolate* isolate_;
friend class RepresentationChangerTester; // accesses the below fields.
@@ -453,7 +456,7 @@ class RepresentationChanger {
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return isolate_; }
Factory* factory() const { return isolate()->factory(); }
- SimplifiedOperatorBuilder* simplified() { return simplified_; }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
};
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index aa9a7cfdb2..2c53acf1e5 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -1394,6 +1394,8 @@ class ScheduleLateNodeVisitor {
// Schedule the node or a floating control structure.
if (IrOpcode::IsMergeOpcode(node->opcode())) {
ScheduleFloatingControl(block, node);
+ } else if (node->opcode() == IrOpcode::kFinishRegion) {
+ ScheduleRegion(block, node);
} else {
ScheduleNode(block, node);
}
@@ -1572,6 +1574,34 @@ class ScheduleLateNodeVisitor {
scheduler_->FuseFloatingControl(block, node);
}
+ void ScheduleRegion(BasicBlock* block, Node* region_end) {
+ // We only allow regions of instructions connected into a linear
+ // effect chain. The only value allowed to be produced by a node
+ // in the chain must be the value consumed by the FinishRegion node.
+
+ // We schedule back to front; we first schedule FinishRegion.
+ CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
+ ScheduleNode(block, region_end);
+
+ // Schedule the chain.
+ Node* node = NodeProperties::GetEffectInput(region_end);
+ while (node->opcode() != IrOpcode::kBeginRegion) {
+ DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+ DCHECK_EQ(0, node->op()->ControlOutputCount());
+ // The value output (if there is any) must be consumed
+ // by the EndRegion node.
+ DCHECK(node->op()->ValueOutputCount() == 0 ||
+ node == region_end->InputAt(0));
+ ScheduleNode(block, node);
+ node = NodeProperties::GetEffectInput(node);
+ }
+ // Schedule the BeginRegion node.
+ DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
+ ScheduleNode(block, node);
+ }
+
void ScheduleNode(BasicBlock* block, Node* node) {
schedule_->PlanNode(block, node);
scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 7d495bf983..80c7ff5a94 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -6,6 +6,7 @@
#include <limits>
+#include "src/address-map.h"
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/compiler/common-operator.h"
@@ -331,9 +332,7 @@ class RepresentationSelector {
if (upper->Is(Type::Signed32()) || upper->Is(Type::Unsigned32())) {
// multiple uses, but we are within 32 bits range => pick kRepWord32.
return kRepWord32;
- } else if (((use & kRepMask) == kRepWord32 &&
- !CanObserveNonWord32(use)) ||
- (use & kTypeMask) == kTypeInt32 ||
+ } else if ((use & kTypeMask) == kTypeInt32 ||
(use & kTypeMask) == kTypeUint32) {
// We only use 32 bits or we use the result consistently.
return kRepWord32;
@@ -474,47 +473,29 @@ class RepresentationSelector {
bool CanLowerToInt32Binop(Node* node, MachineTypeUnion use) {
return BothInputsAre(node, Type::Signed32()) &&
- (!CanObserveNonInt32(use) ||
+ (!CanObserveNonWord32(use) ||
NodeProperties::GetType(node)->Is(Type::Signed32()));
}
- bool CanLowerToInt32AdditiveBinop(Node* node, MachineTypeUnion use) {
+ bool CanLowerToWord32AdditiveBinop(Node* node, MachineTypeUnion use) {
return BothInputsAre(node, safe_int_additive_range_) &&
- !CanObserveNonInt32(use);
+ !CanObserveNonWord32(use);
}
bool CanLowerToUint32Binop(Node* node, MachineTypeUnion use) {
return BothInputsAre(node, Type::Unsigned32()) &&
- (!CanObserveNonUint32(use) ||
+ (!CanObserveNonWord32(use) ||
NodeProperties::GetType(node)->Is(Type::Unsigned32()));
}
- bool CanLowerToUint32AdditiveBinop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, safe_int_additive_range_) &&
- !CanObserveNonUint32(use);
- }
-
bool CanObserveNonWord32(MachineTypeUnion use) {
- return (use & ~(kTypeUint32 | kTypeInt32)) != 0;
- }
-
- bool CanObserveNonInt32(MachineTypeUnion use) {
- return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
- }
-
- bool CanObserveMinusZero(MachineTypeUnion use) {
- // TODO(turbofan): technically Uint32 cannot observe minus zero either.
- return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
+ return (use & kTypeMask & ~(kTypeInt32 | kTypeUint32)) != 0;
}
bool CanObserveNaN(MachineTypeUnion use) {
return (use & (kTypeNumber | kTypeAny)) != 0;
}
- bool CanObserveNonUint32(MachineTypeUnion use) {
- return (use & (kTypeInt32 | kTypeNumber | kTypeAny)) != 0;
- }
-
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, MachineTypeUnion use,
@@ -644,22 +625,16 @@ class RepresentationSelector {
// => signed Int32Add/Sub
VisitInt32Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (CanLowerToInt32AdditiveBinop(node, use)) {
- // => signed Int32Add/Sub, truncating inputs
- ProcessTruncateWord32Input(node, 0, kTypeInt32);
- ProcessTruncateWord32Input(node, 1, kTypeInt32);
- SetOutput(node, kMachInt32);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else if (CanLowerToUint32Binop(node, use)) {
// => unsigned Int32Add/Sub
VisitUint32Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
- } else if (CanLowerToUint32AdditiveBinop(node, use)) {
+ } else if (CanLowerToWord32AdditiveBinop(node, use)) {
// => signed Int32Add/Sub, truncating inputs
- ProcessTruncateWord32Input(node, 0, kTypeUint32);
- ProcessTruncateWord32Input(node, 1, kTypeUint32);
- SetOutput(node, kMachUint32);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ ProcessTruncateWord32Input(node, 0, kTypeInt32);
+ ProcessTruncateWord32Input(node, 1, kTypeInt32);
+ SetOutput(node, kMachInt32);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else {
// => Float64Add/Sub
VisitFloat64Binop(node);
@@ -718,6 +693,13 @@ class RepresentationSelector {
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
break;
}
+ case IrOpcode::kNumberBitwiseOr:
+ case IrOpcode::kNumberBitwiseXor:
+ case IrOpcode::kNumberBitwiseAnd: {
+ VisitInt32Binop(node);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ break;
+ }
case IrOpcode::kNumberShiftLeft: {
VisitBinop(node, kMachInt32, kMachUint32, kMachInt32);
if (lower()) lowering->DoShift(node, lowering->machine()->Word32Shl());
@@ -914,18 +896,16 @@ class RepresentationSelector {
if (lower()) lowering->DoStoreElement(node);
break;
}
+ case IrOpcode::kObjectIsNumber: {
+ ProcessInput(node, 0, kMachAnyTagged);
+ SetOutput(node, kRepBit | kTypeBool);
+ if (lower()) lowering->DoObjectIsNumber(node);
+ break;
+ }
case IrOpcode::kObjectIsSmi: {
ProcessInput(node, 0, kMachAnyTagged);
SetOutput(node, kRepBit | kTypeBool);
- if (lower()) {
- Node* is_tagged = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordAnd(), node->InputAt(0),
- jsgraph_->IntPtrConstant(kSmiTagMask));
- Node* is_smi = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordEqual(), is_tagged,
- jsgraph_->IntPtrConstant(kSmiTag));
- DeferReplacement(node, is_smi);
- }
+ if (lower()) lowering->DoObjectIsSmi(node);
break;
}
@@ -1146,14 +1126,6 @@ class RepresentationSelector {
};
-Node* SimplifiedLowering::IsTagged(Node* node) {
- // TODO(titzer): factor this out to a TaggingScheme abstraction.
- STATIC_ASSERT(kSmiTagMask == 1); // Only works if tag is the low bit.
- return graph()->NewNode(machine()->WordAnd(), node,
- jsgraph()->Int32Constant(kSmiTagMask));
-}
-
-
SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
@@ -1163,44 +1135,52 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
void SimplifiedLowering::LowerAllNodes() {
- SimplifiedOperatorBuilder simplified(graph()->zone());
- RepresentationChanger changer(jsgraph(), &simplified, jsgraph()->isolate());
+ RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
RepresentationSelector selector(jsgraph(), zone_, &changer,
source_positions_);
selector.Run(this);
}
-Node* SimplifiedLowering::Untag(Node* node) {
- // TODO(titzer): factor this out to a TaggingScheme abstraction.
- Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
- return graph()->NewNode(machine()->WordSar(), node, shift_amount);
-}
-
-
-Node* SimplifiedLowering::SmiTag(Node* node) {
- // TODO(titzer): factor this out to a TaggingScheme abstraction.
- Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
- return graph()->NewNode(machine()->WordShl(), node, shift_amount);
-}
-
-
-Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
- return jsgraph()->Int32Constant(offset - kHeapObjectTag);
-}
-
-
namespace {
WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
MachineType representation,
- Type* type) {
- if (type->Is(Type::TaggedSigned())) {
+ Type* field_type, Type* input_type) {
+ if (field_type->Is(Type::TaggedSigned()) ||
+ input_type->Is(Type::TaggedSigned())) {
// Write barriers are only for writes of heap objects.
return kNoWriteBarrier;
}
+ if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
+ // Write barriers are not necessary when storing true, false, null or
+ // undefined, because these special oddballs are always in the root set.
+ return kNoWriteBarrier;
+ }
if (base_is_tagged == kTaggedBase &&
RepresentationOf(representation) == kRepTagged) {
+ if (input_type->IsConstant() &&
+ input_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<HeapObject> input =
+ Handle<HeapObject>::cast(input_type->AsConstant()->Value());
+ if (input->IsMap()) {
+ // Write barriers for storing maps are cheaper.
+ return kMapWriteBarrier;
+ }
+ Isolate* const isolate = input->GetIsolate();
+ RootIndexMap root_index_map(isolate);
+ int root_index = root_index_map.Lookup(*input);
+ if (root_index != RootIndexMap::kInvalidRootIndex &&
+ isolate->heap()->RootIsImmortalImmovable(root_index)) {
+ // Write barriers are unnecessary for immortal immovable roots.
+ return kNoWriteBarrier;
+ }
+ }
+ if (field_type->Is(Type::TaggedPointer()) ||
+ input_type->Is(Type::TaggedPointer())) {
+ // Write barriers for heap objects don't need a Smi check.
+ return kPointerWriteBarrier;
+ }
// Write barriers are only for writes into heap objects (i.e. tagged base).
return kFullWriteBarrier;
}
@@ -1212,18 +1192,32 @@ WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
void SimplifiedLowering::DoAllocate(Node* node) {
PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
- AllocationSpace space = pretenure == TENURED ? OLD_SPACE : NEW_SPACE;
- Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
- Operator::Properties props = node->op()->properties();
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(zone(), f, 2, props);
- ExternalReference ref(f, jsgraph()->isolate());
- int32_t flags = AllocateTargetSpace::encode(space);
- node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
- node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
- node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
- node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
- node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ if (pretenure == NOT_TENURED) {
+ Callable callable = CodeFactory::AllocateInNewSpace(isolate());
+ Node* target = jsgraph()->HeapConstant(callable.code());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ const Operator* op = common()->Call(descriptor);
+ node->InsertInput(graph()->zone(), 0, target);
+ node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
+ NodeProperties::ChangeOp(node, op);
+ } else {
+ DCHECK_EQ(TENURED, pretenure);
+ AllocationSpace space = OLD_SPACE;
+ Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
+ Operator::Properties props = node->op()->properties();
+ CallDescriptor* desc =
+ Linkage::GetRuntimeCallDescriptor(zone(), f, 2, props);
+ ExternalReference ref(f, jsgraph()->isolate());
+ int32_t flags = AllocateTargetSpace::encode(space);
+ node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
+ node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ }
}
@@ -1238,8 +1232,8 @@ void SimplifiedLowering::DoLoadField(Node* node) {
void SimplifiedLowering::DoStoreField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
Type* type = NodeProperties::GetType(node->InputAt(1));
- WriteBarrierKind kind =
- ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type, type);
+ WriteBarrierKind kind = ComputeWriteBarrierKind(
+ access.base_is_tagged, access.machine_type, access.type, type);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(
@@ -1347,10 +1341,47 @@ void SimplifiedLowering::DoStoreElement(Node* node) {
Type* type = NodeProperties::GetType(node->InputAt(2));
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type,
- ComputeWriteBarrierKind(access.base_is_tagged,
- access.machine_type, type))));
+ node,
+ machine()->Store(StoreRepresentation(
+ access.machine_type,
+ ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type,
+ access.type, type))));
+}
+
+
+void SimplifiedLowering::DoObjectIsNumber(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ // TODO(bmeurer): Optimize somewhat based on input type.
+ Node* check =
+ graph()->NewNode(machine()->WordEqual(),
+ graph()->NewNode(machine()->WordAnd(), input,
+ jsgraph()->IntPtrConstant(kSmiTagMask)),
+ jsgraph()->IntPtrConstant(kSmiTag));
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->Int32Constant(1);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(
+ machine()->WordEqual(),
+ graph()->NewNode(
+ machine()->Load(kMachAnyTagged), input,
+ jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+ graph()->start(), if_false),
+ jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
+ Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ node->ReplaceInput(0, vtrue);
+ node->AppendInput(graph()->zone(), vfalse);
+ node->AppendInput(graph()->zone(), control);
+ NodeProperties::ChangeOp(node, common()->Phi(kMachBool, 2));
+}
+
+
+void SimplifiedLowering::DoObjectIsSmi(Node* node) {
+ node->ReplaceInput(0,
+ graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
+ jsgraph()->IntPtrConstant(kSmiTagMask)));
+ node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
+ NodeProperties::ChangeOp(node, machine()->WordEqual());
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 4b9e86b786..49662a60c6 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -37,6 +37,8 @@ class SimplifiedLowering final {
void DoStoreBuffer(Node* node);
void DoLoadElement(Node* node);
void DoStoreElement(Node* node);
+ void DoObjectIsNumber(Node* node);
+ void DoObjectIsSmi(Node* node);
void DoShift(Node* node, Operator const* op);
void DoStringEqual(Node* node);
void DoStringLessThan(Node* node);
@@ -54,10 +56,6 @@ class SimplifiedLowering final {
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
- Node* SmiTag(Node* node);
- Node* IsTagged(Node* node);
- Node* Untag(Node* node);
- Node* OffsetMinusTagConstant(int32_t offset);
Node* ComputeIndex(const ElementAccess& access, Node* const key);
Node* StringComparison(Node* node);
Node* Int32Div(Node* const node);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index a7f790563e..acd0f66ef6 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -15,7 +15,7 @@ namespace internal {
namespace compiler {
SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+ : jsgraph_(jsgraph) {}
SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index c302250d26..831090ac9f 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -15,6 +14,7 @@ namespace compiler {
// Forward declarations.
class JSGraph;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
class SimplifiedOperatorReducer final : public Reducer {
@@ -37,10 +37,9 @@ class SimplifiedOperatorReducer final : public Reducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
MachineOperatorBuilder* machine() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SimplifiedOperatorBuilder* simplified() const;
JSGraph* const jsgraph_;
- SimplifiedOperatorBuilder simplified_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 8432d21d95..62dc8df621 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -168,6 +168,9 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(NumberMultiply, Operator::kCommutative, 2) \
V(NumberDivide, Operator::kNoProperties, 2) \
V(NumberModulus, Operator::kNoProperties, 2) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2) \
V(NumberShiftLeft, Operator::kNoProperties, 2) \
V(NumberShiftRight, Operator::kNoProperties, 2) \
V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
@@ -182,6 +185,7 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
V(ChangeBoolToBit, Operator::kNoProperties, 1) \
V(ChangeBitToBool, Operator::kNoProperties, 1) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1) \
V(ObjectIsSmi, Operator::kNoProperties, 1)
#define NO_THROW_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 53b6b044a1..ee6b8930b9 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -125,7 +125,7 @@ ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
// - Bool: a tagged pointer to either the canonical JS #false or
// the canonical JS #true object
// - Bit: an untagged integer 0 or 1, but word-sized
-class SimplifiedOperatorBuilder final {
+class SimplifiedOperatorBuilder final : public ZoneObject {
public:
explicit SimplifiedOperatorBuilder(Zone* zone);
@@ -140,6 +140,9 @@ class SimplifiedOperatorBuilder final {
const Operator* NumberMultiply();
const Operator* NumberDivide();
const Operator* NumberModulus();
+ const Operator* NumberBitwiseOr();
+ const Operator* NumberBitwiseXor();
+ const Operator* NumberBitwiseAnd();
const Operator* NumberShiftLeft();
const Operator* NumberShiftRight();
const Operator* NumberShiftRightLogical();
@@ -163,6 +166,7 @@ class SimplifiedOperatorBuilder final {
const Operator* ChangeBoolToBit();
const Operator* ChangeBitToBool();
+ const Operator* ObjectIsNumber();
const Operator* ObjectIsSmi();
const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 4707aef1e5..d6a4a58fa0 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -5,8 +5,8 @@
#include "src/compiler/typer.h"
#include "src/base/flags.h"
-#include "src/base/lazy-instance.h"
#include "src/bootstrapper.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
@@ -14,19 +14,12 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
-#include "src/zone-type-cache.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-base::LazyInstance<ZoneTypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
-
class Typer::Decorator final : public GraphDecorator {
public:
explicit Decorator(Typer* typer) : typer_(typer) {}
@@ -37,12 +30,16 @@ class Typer::Decorator final : public GraphDecorator {
};
-Typer::Typer(Isolate* isolate, Graph* graph, Type::FunctionType* function_type)
+Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
+ CompilationDependencies* dependencies,
+ Type::FunctionType* function_type)
: isolate_(isolate),
graph_(graph),
+ flags_(flags),
+ dependencies_(dependencies),
function_type_(function_type),
decorator_(nullptr),
- cache_(kCache.Get()) {
+ cache_(TypeCache::Get()) {
Zone* zone = this->zone();
Factory* const factory = isolate->factory();
@@ -204,6 +201,10 @@ class Typer::Visitor : public Reducer {
Zone* zone() { return typer_->zone(); }
Isolate* isolate() { return typer_->isolate(); }
Graph* graph() { return typer_->graph(); }
+ Typer::Flags flags() const { return typer_->flags(); }
+ CompilationDependencies* dependencies() const {
+ return typer_->dependencies();
+ }
void SetWeakened(NodeId node_id) { weakened_nodes_.insert(node_id); }
bool IsWeakened(NodeId node_id) {
@@ -230,7 +231,11 @@ class Typer::Visitor : public Reducer {
static Type* ToPrimitive(Type*, Typer*);
static Type* ToBoolean(Type*, Typer*);
+ static Type* ToInteger(Type*, Typer*);
+ static Type* ToLength(Type*, Typer*);
+ static Type* ToName(Type*, Typer*);
static Type* ToNumber(Type*, Typer*);
+ static Type* ToObject(Type*, Typer*);
static Type* ToString(Type*, Typer*);
static Type* NumberToInt32(Type*, Typer*);
static Type* NumberToUint32(Type*, Typer*);
@@ -252,6 +257,8 @@ class Typer::Visitor : public Reducer {
static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
static Type* JSCallFunctionTyper(Type*, Typer*);
+ static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
+
Reduction UpdateType(Node* node, Type* current) {
if (NodeProperties::IsTyped(node)) {
// Widen the type of a previously typed node.
@@ -261,10 +268,10 @@ class Typer::Visitor : public Reducer {
current = Weaken(node, current, previous);
}
- DCHECK(previous->Is(current));
+ CHECK(previous->Is(current));
NodeProperties::SetType(node, current);
- if (!(previous->Is(current) && current->Is(previous))) {
+ if (!current->Is(previous)) {
// If something changed, revisit all uses.
return Changed(node);
}
@@ -399,6 +406,39 @@ Type* Typer::Visitor::ToBoolean(Type* type, Typer* t) {
}
+// static
+Type* Typer::Visitor::ToInteger(Type* type, Typer* t) {
+ // ES6 section 7.1.4 ToInteger ( argument )
+ type = ToNumber(type, t);
+ if (type->Is(t->cache_.kIntegerOrMinusZero)) return type;
+ return t->cache_.kIntegerOrMinusZero;
+}
+
+
+// static
+Type* Typer::Visitor::ToLength(Type* type, Typer* t) {
+ // ES6 section 7.1.15 ToLength ( argument )
+ type = ToInteger(type, t);
+ double min = type->Min();
+ double max = type->Max();
+ if (min <= 0.0) min = 0.0;
+ if (max > kMaxSafeInteger) max = kMaxSafeInteger;
+ if (max <= min) max = min;
+ return Type::Range(min, max, t->zone());
+}
+
+
+// static
+Type* Typer::Visitor::ToName(Type* type, Typer* t) {
+ // ES6 section 7.1.14 ToPropertyKey ( argument )
+ type = ToPrimitive(type, t);
+ if (type->Is(Type::Name())) return type;
+ if (type->Maybe(Type::Symbol())) return Type::Name();
+ return ToString(type, t);
+}
+
+
+// static
Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
if (type->Is(Type::Number())) return type;
if (type->Is(Type::NullOrUndefined())) {
@@ -421,7 +461,20 @@ Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
}
+// static
+Type* Typer::Visitor::ToObject(Type* type, Typer* t) {
+ // ES6 section 7.1.13 ToObject ( argument )
+ if (type->Is(Type::Receiver())) return type;
+ if (type->Is(Type::Primitive())) return Type::OtherObject();
+ if (!type->Maybe(Type::Undetectable())) return Type::DetectableReceiver();
+ return Type::Receiver();
+}
+
+
+// static
Type* Typer::Visitor::ToString(Type* type, Typer* t) {
+ // ES6 section 7.1.12 ToString ( argument )
+ type = ToPrimitive(type, t);
if (type->Is(Type::String())) return type;
return Type::String();
}
@@ -485,7 +538,7 @@ Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeInt32Constant(Node* node) {
double number = OpParameter<int32_t>(node);
return Type::Intersect(Type::Range(number, number, zone()),
- Type::UntaggedSigned32(), zone());
+ Type::UntaggedIntegral32(), zone());
}
@@ -554,13 +607,20 @@ Type* Typer::Visitor::TypeEffectSet(Node* node) {
}
-Type* Typer::Visitor::TypeValueEffect(Node* node) {
+Type* Typer::Visitor::TypeGuard(Node* node) {
+ Type* input_type = Operand(node, 0);
+ Type* guard_type = OpParameter<Type*>(node);
+ return Type::Intersect(input_type, guard_type, zone());
+}
+
+
+Type* Typer::Visitor::TypeBeginRegion(Node* node) {
UNREACHABLE();
return nullptr;
}
-Type* Typer::Visitor::TypeFinish(Node* node) { return Operand(node, 0); }
+Type* Typer::Visitor::TypeFinishRegion(Node* node) { return Operand(node, 0); }
Type* Typer::Visitor::TypeFrameState(Node* node) {
@@ -985,7 +1045,7 @@ Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
(rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
(rhs->Maybe(t->cache_.kSingletonZero) &&
(lmin == -V8_INFINITY || lmax == +V8_INFINITY));
- if (maybe_nan) return t->cache_.kWeakint; // Giving up.
+ if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
(rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
Type* range =
@@ -1092,6 +1152,8 @@ Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
return Type::Constant(f->boolean_string(), t->zone());
} else if (type->Is(Type::Number())) {
return Type::Constant(f->number_string(), t->zone());
+ } else if (type->Is(Type::String())) {
+ return Type::Constant(f->string_string(), t->zone());
} else if (type->Is(Type::Symbol())) {
return Type::Constant(f->symbol_string(), t->zone());
} else if (type->Is(Type::Union(Type::Undefined(), Type::Undetectable(),
@@ -1099,6 +1161,11 @@ Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
return Type::Constant(f->undefined_string(), t->zone());
} else if (type->Is(Type::Null())) {
return Type::Constant(f->object_string(), t->zone());
+ } else if (type->Is(Type::Function())) {
+ return Type::Constant(f->function_string(), t->zone());
+ } else if (type->IsConstant()) {
+ return Type::Constant(
+ Object::TypeOf(t->isolate(), type->AsConstant()->Value()), t->zone());
}
return Type::InternalizedString();
}
@@ -1127,10 +1194,14 @@ Type* Typer::Visitor::TypeJSToString(Node* node) {
}
-Type* Typer::Visitor::TypeJSToName(Node* node) { return Type::Name(); }
+Type* Typer::Visitor::TypeJSToName(Node* node) {
+ return TypeUnaryOp(node, ToName);
+}
-Type* Typer::Visitor::TypeJSToObject(Node* node) { return Type::Receiver(); }
+Type* Typer::Visitor::TypeJSToObject(Node* node) {
+ return TypeUnaryOp(node, ToObject);
+}
// JS object operators.
@@ -1309,14 +1380,7 @@ Type* Typer::Visitor::TypeJSStoreContext(Node* node) {
}
-Type* Typer::Visitor::TypeJSLoadDynamicGlobal(Node* node) {
- return Type::Any();
-}
-
-
-Type* Typer::Visitor::TypeJSLoadDynamicContext(Node* node) {
- return Type::Any();
-}
+Type* Typer::Visitor::TypeJSLoadDynamic(Node* node) { return Type::Any(); }
Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
@@ -1373,12 +1437,64 @@ Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
- return fun->IsFunction() ? fun->AsFunction()->Result() : Type::Any();
+ if (fun->IsFunction()) {
+ return fun->AsFunction()->Result();
+ }
+ if (fun->IsConstant() && fun->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(fun->AsConstant()->Value());
+ if (function->shared()->HasBuiltinFunctionId()) {
+ switch (function->shared()->builtin_function_id()) {
+ case kMathRandom:
+ return Type::OrderedNumber();
+ case kMathFloor:
+ case kMathRound:
+ case kMathCeil:
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
+ // Unary math functions.
+ case kMathAbs:
+ case kMathLog:
+ case kMathExp:
+ case kMathSqrt:
+ case kMathCos:
+ case kMathSin:
+ case kMathTan:
+ case kMathAcos:
+ case kMathAsin:
+ case kMathAtan:
+ case kMathFround:
+ return Type::Number();
+ // Binary math functions.
+ case kMathAtan2:
+ case kMathPow:
+ case kMathMax:
+ case kMathMin:
+ return Type::Number();
+ case kMathImul:
+ return Type::Signed32();
+ case kMathClz32:
+ return t->cache_.kZeroToThirtyTwo;
+ // String functions.
+ case kStringCharAt:
+ case kStringFromCharCode:
+ return Type::String();
+ // Array functions.
+ case kArrayIndexOf:
+ case kArrayLastIndexOf:
+ return Type::Number();
+ default:
+ break;
+ }
+ }
+ }
+ return Type::Any();
}
Type* Typer::Visitor::TypeJSCallFunction(Node* node) {
- return TypeUnaryOp(node, JSCallFunctionTyper); // We ignore argument types.
+ // TODO(bmeurer): We could infer better types if we wouldn't ignore the
+ // argument types for the JSCallFunctionTyper above.
+ return TypeUnaryOp(node, JSCallFunctionTyper);
}
@@ -1408,8 +1524,22 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
return Type::Range(0, 32, zone());
case Runtime::kInlineStringGetLength:
return Type::Range(0, String::kMaxLength, zone());
+ case Runtime::kInlineToInteger:
+ return TypeUnaryOp(node, ToInteger);
+ case Runtime::kInlineToLength:
+ return TypeUnaryOp(node, ToLength);
+ case Runtime::kInlineToName:
+ return TypeUnaryOp(node, ToName);
+ case Runtime::kInlineToNumber:
+ return TypeUnaryOp(node, ToNumber);
case Runtime::kInlineToObject:
- return Type::Receiver();
+ return TypeUnaryOp(node, ToObject);
+ case Runtime::kInlineToPrimitive:
+ case Runtime::kInlineToPrimitive_Number:
+ case Runtime::kInlineToPrimitive_String:
+ return TypeUnaryOp(node, ToPrimitive);
+ case Runtime::kInlineToString:
+ return TypeUnaryOp(node, ToString);
default:
break;
}
@@ -1417,6 +1547,11 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
}
+Type* Typer::Visitor::TypeJSConvertReceiver(Node* node) {
+ return Type::Receiver();
+}
+
+
Type* Typer::Visitor::TypeJSForInNext(Node* node) {
return Type::Union(Type::Name(), Type::Undefined(), zone());
}
@@ -1439,6 +1574,15 @@ Type* Typer::Visitor::TypeJSForInStep(Node* node) {
}
+Type* Typer::Visitor::TypeJSLoadMessage(Node* node) { return Type::Any(); }
+
+
+Type* Typer::Visitor::TypeJSStoreMessage(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+
Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
@@ -1493,6 +1637,21 @@ Type* Typer::Visitor::TypeNumberModulus(Node* node) {
}
+Type* Typer::Visitor::TypeNumberBitwiseOr(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberBitwiseXor(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberBitwiseAnd(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
Type* Typer::Visitor::TypeNumberShiftLeft(Node* node) {
return Type::Signed32(zone());
}
@@ -1523,8 +1682,17 @@ Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
}
+// static
+Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
+ if (lhs->IsConstant() && rhs->Is(lhs)) {
+ return t->singleton_true_;
+ }
+ return Type::Boolean();
+}
+
+
Type* Typer::Visitor::TypeReferenceEqual(Node* node) {
- return Type::Boolean(zone());
+ return TypeBinaryOp(node, ReferenceEqualTyper);
}
@@ -1556,14 +1724,14 @@ Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
Type* arg = Operand(node, 0);
// TODO(neis): DCHECK(arg->Is(Type::Signed32()));
- return ChangeRepresentation(arg, Type::UntaggedSigned32(), zone());
+ return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
}
Type* Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
Type* arg = Operand(node, 0);
// TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
- return ChangeRepresentation(arg, Type::UntaggedUnsigned32(), zone());
+ return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
}
@@ -1614,8 +1782,53 @@ Type* Typer::Visitor::TypeChangeBitToBool(Node* node) {
Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
+namespace {
+
+MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
+ if (object_type->IsConstant() &&
+ object_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<Map> object_map(
+ Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
+ if (object_map->is_stable()) return object_map;
+ } else if (object_type->IsClass()) {
+ Handle<Map> object_map = object_type->AsClass()->Map();
+ if (object_map->is_stable()) return object_map;
+ }
+ return MaybeHandle<Map>();
+}
+
+} // namespace
+
+
Type* Typer::Visitor::TypeLoadField(Node* node) {
- return FieldAccessOf(node->op()).type;
+ FieldAccess const& access = FieldAccessOf(node->op());
+ if (access.base_is_tagged == kTaggedBase &&
+ access.offset == HeapObject::kMapOffset) {
+ // The type of LoadField[Map](o) is Constant(map) if map is stable and
+ // either
+ // (a) o has type Constant(object) and map == object->map, or
+ // (b) o has type Class(map),
+ // and either
+ // (1) map cannot transition further, or
+ // (2) deoptimization is enabled and we can add a code dependency on the
+ // stability of map (to guard the Constant type information).
+ Type* const object = Operand(node, 0);
+ if (object->Is(Type::None())) return Type::None();
+ Handle<Map> object_map;
+ if (GetStableMapFromObjectType(object).ToHandle(&object_map)) {
+ if (object_map->CanTransition()) {
+ if (flags() & kDeoptimizationEnabled) {
+ dependencies()->AssumeMapStable(object_map);
+ } else {
+ return access.type;
+ }
+ }
+ Type* object_map_type = Type::Constant(object_map, zone());
+ DCHECK(object_map_type->Is(access.type));
+ return object_map_type;
+ }
+ }
+ return access.type;
}
@@ -1657,7 +1870,22 @@ Type* Typer::Visitor::TypeStoreElement(Node* node) {
}
-Type* Typer::Visitor::TypeObjectIsSmi(Node* node) { return Type::Boolean(); }
+Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
+ Type* arg = Operand(node, 0);
+ if (arg->Is(Type::None())) return Type::None();
+ if (arg->Is(Type::Number())) return typer_->singleton_true_;
+ if (!arg->Maybe(Type::Number())) return typer_->singleton_false_;
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeObjectIsSmi(Node* node) {
+ Type* arg = Operand(node, 0);
+ if (arg->Is(Type::None())) return Type::None();
+ if (arg->Is(Type::TaggedSigned())) return typer_->singleton_true_;
+ if (arg->Is(Type::TaggedPointer())) return typer_->singleton_false_;
+ return Type::Boolean();
+}
// Machine operators.
@@ -1698,6 +1926,14 @@ Type* Typer::Visitor::TypeWord32Equal(Node* node) { return Type::Boolean(); }
Type* Typer::Visitor::TypeWord32Clz(Node* node) { return Type::Integral32(); }
+Type* Typer::Visitor::TypeWord32Ctz(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
+ return Type::Integral32();
+}
+
+
Type* Typer::Visitor::TypeWord64And(Node* node) { return Type::Internal(); }
@@ -1719,6 +1955,15 @@ Type* Typer::Visitor::TypeWord64Sar(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeWord64Ror(Node* node) { return Type::Internal(); }
+Type* Typer::Visitor::TypeWord64Clz(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Ctz(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
+
+
Type* Typer::Visitor::TypeWord64Equal(Node* node) { return Type::Boolean(); }
@@ -1820,12 +2065,12 @@ Type* Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
Type* Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone());
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
}
Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
- return Type::Intersect(Type::Unsigned32(), Type::UntaggedUnsigned32(),
+ return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
zone());
}
@@ -1856,12 +2101,22 @@ Type* Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
Type* Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone());
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
}
Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone());
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundInt64ToFloat32(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundInt64ToFloat64(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
}
@@ -2029,64 +2284,7 @@ Type* Typer::Visitor::TypeCheckedStore(Node* node) {
Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
- if (value->IsJSFunction()) {
- if (JSFunction::cast(*value)->shared()->HasBuiltinFunctionId()) {
- switch (JSFunction::cast(*value)->shared()->builtin_function_id()) {
- case kMathRandom:
- return typer_->cache_.kRandomFunc0;
- case kMathFloor:
- case kMathRound:
- case kMathCeil:
- return typer_->cache_.kWeakintFunc1;
- // Unary math functions.
- case kMathAbs: // TODO(rossberg): can't express overloading
- case kMathLog:
- case kMathExp:
- case kMathSqrt:
- case kMathCos:
- case kMathSin:
- case kMathTan:
- case kMathAcos:
- case kMathAsin:
- case kMathAtan:
- case kMathFround:
- return typer_->cache_.kNumberFunc1;
- // Binary math functions.
- case kMathAtan2:
- case kMathPow:
- case kMathMax:
- case kMathMin:
- return typer_->cache_.kNumberFunc2;
- case kMathImul:
- return typer_->cache_.kImulFunc;
- case kMathClz32:
- return typer_->cache_.kClz32Func;
- default:
- break;
- }
- }
- int const arity =
- JSFunction::cast(*value)->shared()->internal_formal_parameter_count();
- switch (arity) {
- case SharedFunctionInfo::kDontAdaptArgumentsSentinel:
- // Some smart optimization at work... &%$!&@+$!
- break;
- case 0:
- return typer_->cache_.kAnyFunc0;
- case 1:
- return typer_->cache_.kAnyFunc1;
- case 2:
- return typer_->cache_.kAnyFunc2;
- case 3:
- return typer_->cache_.kAnyFunc3;
- default: {
- DCHECK_LT(3, arity);
- Type** const params = zone()->NewArray<Type*>(arity);
- std::fill(&params[0], &params[arity], Type::Any(zone()));
- return Type::Function(Type::Any(zone()), arity, params, zone());
- }
- }
- } else if (value->IsJSTypedArray()) {
+ if (value->IsJSTypedArray()) {
switch (JSTypedArray::cast(*value)->type()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 065262907b..3b9b31b77f 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_TYPER_H_
#define V8_COMPILER_TYPER_H_
+#include "src/base/flags.h"
#include "src/compiler/graph.h"
#include "src/types.h"
@@ -12,14 +13,23 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class ZoneTypeCache;
+class CompilationDependencies;
+class TypeCache;
namespace compiler {
class Typer {
public:
- Typer(Isolate* isolate, Graph* graph,
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ Typer(Isolate* isolate, Graph* graph, Flags flags = kNoFlags,
+ CompilationDependencies* dependencies = nullptr,
Type::FunctionType* function_type = nullptr);
~Typer();
@@ -34,13 +44,17 @@ class Typer {
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
Isolate* isolate() const { return isolate_; }
+ Flags flags() const { return flags_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
Type::FunctionType* function_type() const { return function_type_; }
Isolate* const isolate_;
Graph* const graph_;
+ Flags const flags_;
+ CompilationDependencies* const dependencies_;
Type::FunctionType* function_type_;
Decorator* decorator_;
- ZoneTypeCache const& cache_;
+ TypeCache const& cache_;
Type* singleton_false_;
Type* singleton_true_;
@@ -52,6 +66,8 @@ class Typer {
DISALLOW_COPY_AND_ASSIGN(Typer);
};
+DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 57bcef16a0..2b4bb9d092 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -413,10 +413,13 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_LT(1, effect_count);
break;
}
- case IrOpcode::kValueEffect:
+ case IrOpcode::kGuard:
+ // TODO(bmeurer): what are the constraints on these?
+ break;
+ case IrOpcode::kBeginRegion:
// TODO(rossberg): what are the constraints on these?
break;
- case IrOpcode::kFinish: {
+ case IrOpcode::kFinishRegion: {
// TODO(rossberg): what are the constraints on these?
// Type must be subsumed by input type.
if (typing == TYPED) {
@@ -541,8 +544,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSLoadContext:
- case IrOpcode::kJSLoadDynamicGlobal:
- case IrOpcode::kJSLoadDynamicContext:
+ case IrOpcode::kJSLoadDynamic:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
@@ -566,6 +568,7 @@ void Verifier::Visitor::Check(Node* node) {
}
case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
CheckUpperIs(node, Type::Receiver());
break;
@@ -599,6 +602,10 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
+ case IrOpcode::kJSLoadMessage:
+ case IrOpcode::kJSStoreMessage:
+ break;
+
case IrOpcode::kJSStackCheck:
// Type is empty.
CheckNotTyped(node);
@@ -635,6 +642,14 @@ void Verifier::Visitor::Check(Node* node) {
// TODO(rossberg): activate once we retype after opcode changes.
// CheckUpperIs(node, Type::Number());
break;
+ case IrOpcode::kNumberBitwiseOr:
+ case IrOpcode::kNumberBitwiseXor:
+ case IrOpcode::kNumberBitwiseAnd:
+ // (Signed32, Signed32) -> Signed32
+ CheckValueInputIs(node, 0, Type::Signed32());
+ CheckValueInputIs(node, 1, Type::Signed32());
+ CheckUpperIs(node, Type::Signed32());
+ break;
case IrOpcode::kNumberShiftLeft:
case IrOpcode::kNumberShiftRight:
// (Signed32, Unsigned32) -> Signed32
@@ -677,6 +692,7 @@ void Verifier::Visitor::Check(Node* node) {
CheckUpperIs(node, Type::Boolean());
break;
}
+ case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsSmi:
CheckValueInputIs(node, 0, Type::Any());
CheckUpperIs(node, Type::Boolean());
@@ -803,6 +819,8 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord32Ror:
case IrOpcode::kWord32Equal:
case IrOpcode::kWord32Clz:
+ case IrOpcode::kWord32Ctz:
+ case IrOpcode::kWord32Popcnt:
case IrOpcode::kWord64And:
case IrOpcode::kWord64Or:
case IrOpcode::kWord64Xor:
@@ -810,6 +828,9 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord64Shr:
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Ror:
+ case IrOpcode::kWord64Clz:
+ case IrOpcode::kWord64Popcnt:
+ case IrOpcode::kWord64Ctz:
case IrOpcode::kWord64Equal:
case IrOpcode::kInt32Add:
case IrOpcode::kInt32AddWithOverflow:
@@ -864,6 +885,8 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kRoundInt64ToFloat32:
+ case IrOpcode::kRoundInt64ToFloat64:
case IrOpcode::kTruncateFloat64ToFloat32:
case IrOpcode::kTruncateFloat64ToInt32:
case IrOpcode::kBitcastFloat32ToInt32:
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index cee323e480..428558d42d 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -56,8 +56,8 @@ class ScheduleVerifier {
public:
static void Run(Schedule* schedule);
};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_VERIFIER_H_
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 4c991718f8..eab8fe3233 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -166,7 +166,7 @@ class OutOfLineLoadNaN final : public OutOfLineCode {
OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() final { __ pcmpeqd(result_, result_); }
+ void Generate() final { __ Pcmpeqd(result_, result_); }
private:
XMMRegister const result_;
@@ -181,7 +181,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
void Generate() final {
__ subp(rsp, Immediate(kDoubleSize));
- __ movsd(MemOperand(rsp, 0), input_);
+ __ Movsd(MemOperand(rsp, 0), input_);
__ SlowTruncateToI(result_, rsp, 0);
__ addp(rsp, Immediate(kDoubleSize));
}
@@ -191,6 +191,46 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
XMMRegister const input_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ operand_(operand),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ leap(scratch1_, operand_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Operand const operand_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
} // namespace
@@ -335,7 +375,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
\
void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ pcmpeqd(result_, result_); \
+ __ Pcmpeqd(result_, result_); \
__ cmpl(kScratchRegister, Immediate(length_)); \
__ j(above_equal, exit()); \
__ asm_instr(result_, \
@@ -555,8 +595,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- int entry = Code::kHeaderSize - kHeapObjectTag;
- __ Call(Operand(reg, entry));
+ __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(reg);
}
RecordCallPosition(instr);
break;
@@ -596,6 +636,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
break;
}
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters);
@@ -643,12 +688,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
- __ cvttsd2siq(result, input);
+ __ Cvttsd2siq(result, input);
__ cmpq(result, Immediate(1));
__ j(overflow, ool->entry());
__ bind(ool->exit());
break;
}
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ Register value = i.InputRegister(index);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
+ scratch0, scratch1, mode);
+ __ movp(operand, value);
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ not_zero, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kX64Add32:
ASSEMBLE_BINOP(addl);
break;
@@ -763,6 +826,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64Ror:
ASSEMBLE_SHIFT(rorq, 6);
break;
+ case kX64Lzcnt:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Lzcntq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Lzcntq(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
case kX64Lzcnt32:
if (instr->InputAt(0)->IsRegister()) {
__ Lzcntl(i.OutputRegister(), i.InputRegister(0));
@@ -770,8 +840,36 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Lzcntl(i.OutputRegister(), i.InputOperand(0));
}
break;
+ case kX64Tzcnt:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Tzcntq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Tzcntq(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Tzcnt32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Tzcntl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Tzcntl(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Popcnt:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Popcntq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Popcntq(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Popcnt32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Popcntl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Popcntl(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
case kSSEFloat32Cmp:
- ASSEMBLE_SSE_BINOP(ucomiss);
+ ASSEMBLE_SSE_BINOP(Ucomiss);
break;
case kSSEFloat32Add:
ASSEMBLE_SSE_BINOP(addss);
@@ -812,10 +910,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_SSE_BINOP(minss);
break;
case kSSEFloat32ToFloat64:
- ASSEMBLE_SSE_UNOP(cvtss2sd);
+ ASSEMBLE_SSE_UNOP(Cvtss2sd);
break;
case kSSEFloat64Cmp:
- ASSEMBLE_SSE_BINOP(ucomisd);
+ ASSEMBLE_SSE_BINOP(Ucomisd);
break;
case kSSEFloat64Add:
ASSEMBLE_SSE_BINOP(addsd);
@@ -830,14 +928,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_SSE_BINOP(divsd);
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kSSEFloat64Mod: {
__ subq(rsp, Immediate(kDoubleSize));
// Move values to st(0) and st(1).
- __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
__ fld_d(Operand(rsp, 0));
- __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
__ fld_d(Operand(rsp, 0));
// Loop while fprem isn't done.
Label mod_loop;
@@ -860,7 +958,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Move output to stack and clean up.
__ fstp(1);
__ fstp_d(Operand(rsp, 0));
- __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
+ __ Movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
break;
}
@@ -891,33 +989,47 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
CpuFeatureScope sse_scope(masm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
case kSSEFloat64ToFloat32:
- ASSEMBLE_SSE_UNOP(cvtsd2ss);
+ ASSEMBLE_SSE_UNOP(Cvtsd2ss);
break;
case kSSEFloat64ToInt32:
if (instr->InputAt(0)->IsDoubleRegister()) {
- __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
- __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+ __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
}
break;
case kSSEFloat64ToUint32: {
if (instr->InputAt(0)->IsDoubleRegister()) {
- __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
- __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
+ __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
}
__ AssertZeroExtended(i.OutputRegister());
break;
}
case kSSEInt32ToFloat64:
if (instr->InputAt(0)->IsRegister()) {
- __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSEInt64ToFloat32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSEInt64ToFloat64:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kSSEUint32ToFloat64:
@@ -926,13 +1038,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
__ movl(kScratchRegister, i.InputOperand(0));
}
- __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
+ __ Cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
break;
case kSSEFloat64ExtractLowWord32:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
- __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kSSEFloat64ExtractHighWord32:
@@ -958,9 +1070,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kSSEFloat64LoadLowWord32:
if (instr->InputAt(0)->IsRegister()) {
- __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kAVXFloat32Cmp: {
@@ -985,7 +1097,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_AVX_BINOP(vdivss);
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulss depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kAVXFloat32Max:
ASSEMBLE_AVX_BINOP(vmaxss);
@@ -1015,7 +1127,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_AVX_BINOP(vdivsd);
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kAVXFloat64Max:
ASSEMBLE_AVX_BINOP(vmaxsd);
@@ -1025,9 +1137,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
@@ -1039,9 +1151,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kAVXFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
@@ -1053,9 +1165,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kAVXFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
@@ -1067,9 +1179,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kAVXFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
@@ -1164,39 +1276,39 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kX64Movsd:
if (instr->HasOutput()) {
- __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movsd(operand, i.InputDoubleRegister(index));
+ __ Movsd(operand, i.InputDoubleRegister(index));
}
break;
case kX64BitcastFI:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
- __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kX64BitcastDL:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movq(i.OutputRegister(), i.InputOperand(0));
} else {
- __ movq(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Movq(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kX64BitcastIF:
if (instr->InputAt(0)->IsRegister()) {
- __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
__ movss(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64BitcastLD:
if (instr->InputAt(0)->IsRegister()) {
- __ movq(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ Movq(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ movsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Movsd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64Lea32: {
@@ -1251,7 +1363,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else if (instr->InputAt(0)->IsDoubleRegister()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
} else {
__ pushq(i.InputOperand(0));
}
@@ -1266,24 +1378,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kX64StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register value = i.InputRegister(2);
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (HasImmediateInput(instr, 1)) {
- int index = i.InputInt32(1);
- Register scratch = i.TempRegister(1);
- __ movq(Operand(object, index), value);
- __ RecordWriteContextSlot(object, index, value, scratch, mode);
- } else {
- Register index = i.InputRegister(1);
- __ movq(Operand(object, index, times_1, 0), value);
- __ leaq(index, Operand(object, index, times_1, 0));
- __ RecordWrite(object, index, value, mode);
- }
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
@@ -1303,10 +1397,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(movb);
@@ -1321,10 +1415,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_INTEGER(movq);
break;
case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
break;
case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);
break;
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
@@ -1618,7 +1712,7 @@ void CodeGenerator::AssembleReturn() {
}
size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
// Might need rcx for scratch if pop_size is too big.
- DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & rcx.bit());
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
__ Ret(static_cast<int>(pop_size), rcx);
}
@@ -1720,23 +1814,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
XMMRegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(dst, src);
+ __ Movapd(dst, src);
} else {
DCHECK(destination->IsDoubleStackSlot());
Operand dst = g.ToOperand(destination);
- __ movsd(dst, src);
+ __ Movsd(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
Operand src = g.ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movsd(dst, src);
+ __ Movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = g.ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(dst, xmm0);
+ __ Movsd(xmm0, src);
+ __ Movsd(dst, xmm0);
}
} else {
UNREACHABLE();
@@ -1771,17 +1865,17 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(xmm0, src);
- __ movaps(src, dst);
- __ movaps(dst, xmm0);
+ __ Movapd(xmm0, src);
+ __ Movapd(src, dst);
+ __ Movapd(dst, xmm0);
} else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(src, dst);
- __ movsd(dst, xmm0);
+ __ Movsd(xmm0, src);
+ __ Movsd(src, dst);
+ __ Movsd(dst, xmm0);
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -1816,6 +1910,6 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
#undef __
-} // namespace internal
} // namespace compiler
+} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 7d3b434d15..a9165cfaca 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -46,7 +46,12 @@ namespace compiler {
V(X64Sar32) \
V(X64Ror) \
V(X64Ror32) \
+ V(X64Lzcnt) \
V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -74,6 +79,8 @@ namespace compiler {
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSEFloat64ExtractLowWord32) \
V(SSEFloat64ExtractHighWord32) \
@@ -119,7 +126,6 @@ namespace compiler {
V(X64Inc32) \
V(X64Push) \
V(X64Poke) \
- V(X64StoreWriteBarrier) \
V(X64StackCheck)
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 516a9a7691..0f1dc816a1 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -157,61 +157,84 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+
+ if (write_barrier_kind != kNoWriteBarrier) {
DCHECK_EQ(kRepTagged, rep);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
- InstructionOperand temps[] = {g.TempRegister(rcx), g.TempRegister()};
- Emit(kX64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, rbx),
- g.UseImmediate(index), g.UseFixed(value, rcx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
} else {
- InstructionOperand temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
- Emit(kX64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, rbx),
- g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
}
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
-
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kX64Movss;
- break;
- case kRepFloat64:
- opcode = kX64Movsd;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kX64Movb;
- break;
- case kRepWord16:
- opcode = kX64Movw;
- break;
- case kRepWord32:
- opcode = kX64Movl;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kX64Movq;
- break;
- default:
- UNREACHABLE();
- return;
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kX64Movss;
+ break;
+ case kRepFloat64:
+ opcode = kX64Movsd;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kX64Movb;
+ break;
+ case kRepWord16:
+ opcode = kX64Movw;
+ break;
+ case kRepWord32:
+ opcode = kX64Movl;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kX64Movq;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ InstructionOperand value_operand =
+ g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+ inputs[input_count++] = value_operand;
+ Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
- inputs[input_count++] = value_operand;
- Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
@@ -572,12 +595,42 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitWord32Clz(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
+void InstructionSelector::VisitWord64Ctz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Tzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
@@ -906,6 +959,18 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEInt64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
X64OperandGenerator g(this);
Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -1060,20 +1125,10 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
+ const CallDescriptor* descriptor,
+ Node* node) {
X64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -1082,8 +1137,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
0, nullptr, 0, nullptr);
// Poke any stack arguments.
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ if (Node* input = (*arguments)[n]) {
int slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(input)
? g.UseImmediate(input)
@@ -1093,7 +1148,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ for (Node* input : base::Reversed(*arguments)) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
InstructionOperand value =
@@ -1106,123 +1161,10 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Emit(kX64Push, g.NoOutput(), value);
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- X64OperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): Handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
- Emit(kX64Push, g.NoOutput(), value);
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t output_count = buffer.outputs.size();
- auto* outputs = &buffer.outputs.front();
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
namespace {
@@ -1704,7 +1646,12 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kWord32ShiftIsSafe;
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ flags |= MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt;
+ }
if (CpuFeatures::IsSupported(SSE4_1)) {
flags |= MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index 9ca9a3076f..bda59bb139 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -218,6 +218,46 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
X87Register const input_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ operand_(operand),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ lea(scratch1_, operand_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Operand const operand_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
} // namespace
@@ -312,7 +352,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(reg);
}
RecordCallPosition(instr);
bool double_result =
@@ -378,6 +419,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
break;
}
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, i.TempRegister(0));
@@ -409,6 +455,25 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ int double_register_param_count = 0;
+ int x87_layout = 0;
+ for (size_t i = 0; i < instr->InputCount(); i++) {
+ if (instr->InputAt(i)->IsDoubleRegister()) {
+ double_register_param_count++;
+ }
+ }
+ // Currently we use only one X87 register. If double_register_param_count
+ // is bigger than 1, it means duplicated double register is added to input
+ // of this instruction.
+ if (double_register_param_count > 0) {
+ x87_layout = (0 << 3) | 1;
+ }
+ // The layout of x87 register stack is loaded on the top of FPU register
+ // stack for deoptimization.
+ __ push(Immediate(x87_layout));
+ __ fild_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kPointerSize));
+
AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
break;
}
@@ -431,6 +496,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ Register value = i.InputRegister(index);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
+ scratch0, scratch1, mode);
+ __ mov(operand, value);
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ not_zero, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kX87Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -538,6 +621,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Lzcnt:
__ Lzcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kX87Popcnt:
+ __ Popcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
case kX87LoadFloat64Constant: {
InstructionOperand* source = instr->InputAt(0);
InstructionOperand* destination = instr->Output();
@@ -963,10 +1049,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float64Sqrt: {
+ __ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
__ fsqrt();
__ lea(esp, Operand(esp, kDoubleSize));
+ __ X87SetFPUCW(0x037F);
break;
}
case kX87Float64Round: {
@@ -1186,24 +1274,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
UNREACHABLE();
}
break;
- case kX87StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register value = i.InputRegister(2);
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (HasImmediateInput(instr, 1)) {
- int index = i.InputInt32(1);
- Register scratch = i.TempRegister(1);
- __ mov(Operand(object, index), value);
- __ RecordWriteContextSlot(object, index, value, scratch, mode);
- } else {
- Register index = i.InputRegister(1);
- __ mov(Operand(object, index, times_1, 0), value);
- __ lea(index, Operand(object, index, times_1, 0));
- __ RecordWrite(object, index, value, mode);
- }
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
diff --git a/deps/v8/src/compiler/x87/instruction-codes-x87.h b/deps/v8/src/compiler/x87/instruction-codes-x87.h
index 9408e41724..24871e94ca 100644
--- a/deps/v8/src/compiler/x87/instruction-codes-x87.h
+++ b/deps/v8/src/compiler/x87/instruction-codes-x87.h
@@ -33,6 +33,7 @@ namespace compiler {
V(X87Sar) \
V(X87Ror) \
V(X87Lzcnt) \
+ V(X87Popcnt) \
V(X87Float32Cmp) \
V(X87Float32Add) \
V(X87Float32Sub) \
@@ -80,7 +81,6 @@ namespace compiler {
V(X87PushFloat64) \
V(X87PushFloat32) \
V(X87Poke) \
- V(X87StoreWriteBarrier) \
V(X87StackCheck)
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index ac868fb932..5c6f10255f 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -176,66 +176,89 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+
+ if (write_barrier_kind != kNoWriteBarrier) {
DCHECK_EQ(kRepTagged, rep);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
- InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister()};
- Emit(kX87StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
- g.UseImmediate(index), g.UseFixed(value, ecx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
} else {
- InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
- Emit(kX87StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
- g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kX87Movss;
+ break;
+ case kRepFloat64:
+ opcode = kX87Movsd;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kX87Movb;
+ break;
+ case kRepWord16:
+ opcode = kX87Movw;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kX87Movl;
+ break;
+ default:
+ UNREACHABLE();
+ return;
}
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kX87Movss;
- break;
- case kRepFloat64:
- opcode = kX87Movsd;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kX87Movb;
- break;
- case kRepWord16:
- opcode = kX87Movw;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kX87Movl;
- break;
- default:
- UNREACHABLE();
- return;
- }
+ InstructionOperand val;
+ if (g.CanBeImmediate(value)) {
+ val = g.UseImmediate(value);
+ } else if (rep == kRepWord8 || rep == kRepBit) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
- InstructionOperand val;
- if (g.CanBeImmediate(value)) {
- val = g.UseImmediate(value);
- } else if (rep == kRepWord8 || rep == kRepBit) {
- val = g.UseByteRegister(value);
- } else {
- val = g.UseRegister(value);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ inputs[input_count++] = val;
+ Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
@@ -514,6 +537,15 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
X87OperandGenerator g(this);
@@ -829,20 +861,10 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
+ const CallDescriptor* descriptor,
+ Node* node) {
X87OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -853,8 +875,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
0, nullptr, 0, nullptr, temp_count, temps);
// Poke any stack arguments.
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ if (Node* input = (*arguments)[n]) {
int const slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(input)
? g.UseImmediate(input)
@@ -864,7 +886,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ for (Node* input : base::Reversed(*arguments)) {
// TODO(titzer): handle pushing double parameters.
if (input == nullptr) continue;
InstructionOperand value =
@@ -877,124 +899,10 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Emit(kX87Push, g.NoOutput(), value);
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- X87OperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
-
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): Handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
- Emit(kX87Push, g.NoOutput(), value);
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t output_count = buffer.outputs.size();
- auto* outputs = &buffer.outputs.front();
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
namespace {
@@ -1365,6 +1273,9 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe;
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ flags |= MachineOperatorBuilder::kWord32Popcnt;
+ }
return flags;
}
diff --git a/deps/v8/src/context-measure.cc b/deps/v8/src/context-measure.cc
index da4aae498b..0b87e39614 100644
--- a/deps/v8/src/context-measure.cc
+++ b/deps/v8/src/context-measure.cc
@@ -74,5 +74,5 @@ void ContextMeasure::VisitPointers(Object** start, Object** end) {
MeasureObject(HeapObject::cast(*current));
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/context-measure.h b/deps/v8/src/context-measure.h
index f01c37418f..665c547912 100644
--- a/deps/v8/src/context-measure.h
+++ b/deps/v8/src/context-measure.h
@@ -5,7 +5,9 @@
#ifndef V8_CONTEXT_MEASURE_H_
#define V8_CONTEXT_MEASURE_H_
-#include "src/snapshot/serialize.h"
+#include "src/address-map.h"
+#include "src/assert-scope.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
@@ -17,7 +19,7 @@ class ContextMeasure : public ObjectVisitor {
int Size() { return size_; }
int Count() { return count_; }
- void VisitPointers(Object** start, Object** end);
+ void VisitPointers(Object** start, Object** end) override;
private:
void MeasureObject(HeapObject* object);
@@ -41,7 +43,7 @@ class ContextMeasure : public ObjectVisitor {
DISALLOW_COPY_AND_ASSIGN(ContextMeasure);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CONTEXT_MEASURE_H_
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index e88cd33ad3..437aacf4af 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -65,14 +65,14 @@ JSModule* Context::module() { return JSModule::cast(get(EXTENSION_INDEX)); }
void Context::set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
-GlobalObject* Context::global_object() {
+JSGlobalObject* Context::global_object() {
Object* result = get(GLOBAL_OBJECT_INDEX);
DCHECK(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
- return reinterpret_cast<GlobalObject*>(result);
+ return reinterpret_cast<JSGlobalObject*>(result);
}
-void Context::set_global_object(GlobalObject* object) {
+void Context::set_global_object(JSGlobalObject* object) {
set(GLOBAL_OBJECT_INDEX, object);
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index a008d49ac3..67d19a1eff 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -118,17 +118,6 @@ String* Context::catch_name() {
}
-JSBuiltinsObject* Context::builtins() {
- GlobalObject* object = global_object();
- if (object->IsJSGlobalObject()) {
- return JSGlobalObject::cast(object)->builtins();
- } else {
- DCHECK(object->IsJSBuiltinsObject());
- return JSBuiltinsObject::cast(object);
- }
-}
-
-
Context* Context::script_context() {
Context* current = this;
while (!current->IsScriptContext()) {
@@ -144,7 +133,7 @@ Context* Context::native_context() {
// The global object has a direct pointer to the native context. If the
// following DCHECK fails, the native context is probably being accessed
// indirectly during bootstrapping. This is unsupported.
- DCHECK(global_object()->IsGlobalObject());
+ DCHECK(global_object()->IsJSGlobalObject());
return global_object()->native_context();
}
@@ -264,7 +253,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
// 1. Check global objects, subjects of with, and extension objects.
- if ((context->IsNativeContext() || context->IsWithContext() ||
+ if ((context->IsNativeContext() ||
+ (context->IsWithContext() && ((flags & SKIP_WITH_CONTEXT) == 0)) ||
context->IsFunctionContext() || context->IsBlockContext()) &&
context->extension_receiver() != nullptr) {
Handle<JSReceiver> object(context->extension_receiver());
@@ -384,7 +374,9 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
// 3. Prepare to continue with the previous (next outermost) context.
- if (context->IsNativeContext()) {
+ if (context->IsNativeContext() ||
+ ((flags & STOP_AT_DECLARATION_SCOPE) != 0 &&
+ context->is_declaration_context())) {
follow_context_chain = false;
} else {
context = Handle<Context>(context->previous(), isolate);
@@ -581,10 +573,20 @@ bool Context::IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
- isolate->bootstrapper()->IsActive() ||
- object->IsGlobalObject();
+ isolate->bootstrapper()->IsActive() || object->IsJSGlobalObject();
}
#endif
+
+void Context::IncrementErrorsThrown() {
+ DCHECK(IsNativeContext());
+
+ int previous_value = errors_thrown()->value();
+ set_errors_thrown(Smi::FromInt(previous_value + 1));
+}
+
+
+int Context::GetErrorsThrown() { return errors_thrown()->value(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 1ca572576e..c0d7a20069 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -13,11 +13,15 @@ namespace internal {
enum ContextLookupFlags {
- FOLLOW_CONTEXT_CHAIN = 1,
- FOLLOW_PROTOTYPE_CHAIN = 2,
+ FOLLOW_CONTEXT_CHAIN = 1 << 0,
+ FOLLOW_PROTOTYPE_CHAIN = 1 << 1,
+ STOP_AT_DECLARATION_SCOPE = 1 << 2,
+ SKIP_WITH_CONTEXT = 1 << 3,
DONT_FOLLOW_CHAINS = 0,
- FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN
+ FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN,
+ LEXICAL_TEST =
+ FOLLOW_CONTEXT_CHAIN | STOP_AT_DECLARATION_SCOPE | SKIP_WITH_CONTEXT,
};
@@ -78,12 +82,10 @@ enum BindingFlags {
V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
- V(NON_NUMBER_TO_NUMBER_INDEX, JSFunction, non_number_to_number) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
- V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
- V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun)
+ V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)
#define NATIVE_CONTEXT_JS_BUILTINS(V) \
@@ -131,7 +133,6 @@ enum BindingFlags {
no_side_effect_to_string_fun) \
V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(OBJECT_DEFINE_OWN_PROPERTY_INDEX, JSFunction, object_define_own_property) \
V(OBJECT_GET_OWN_PROPERTY_DESCROPTOR_INDEX, JSFunction, \
object_get_own_property_descriptor) \
V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, observers_begin_perform_splice) \
@@ -185,6 +186,7 @@ enum BindingFlags {
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
+ V(ERRORS_THROWN_INDEX, Smi, errors_thrown) \
V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object) \
V(EXTRAS_UTILS_OBJECT_INDEX, JSObject, extras_utils_object) \
V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map) \
@@ -395,6 +397,9 @@ class Context: public FixedArray {
THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
};
+ void IncrementErrorsThrown();
+ int GetErrorsThrown();
+
// Direct slot access.
inline JSFunction* closure();
inline void set_closure(JSFunction* closure);
@@ -418,16 +423,13 @@ class Context: public FixedArray {
Context* declaration_context();
bool is_declaration_context();
- inline GlobalObject* global_object();
- inline void set_global_object(GlobalObject* object);
+ inline JSGlobalObject* global_object();
+ inline void set_global_object(JSGlobalObject* object);
// Returns a JSGlobalProxy object or null.
JSObject* global_proxy();
void set_global_proxy(JSObject* global);
- // The builtins object.
- JSBuiltinsObject* builtins();
-
// Get the script context by traversing the context chain.
Context* script_context();
@@ -555,6 +557,7 @@ class Context: public FixedArray {
STATIC_ASSERT(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CONTEXTS_H_
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 4b3ac27cf1..c05c0644da 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -70,7 +70,7 @@ inline unsigned int FastD2UI(double x) {
inline float DoubleToFloat32(double x) {
- // TODO(yanggou): This static_cast is implementation-defined behaviour in C++,
+ // TODO(yangguo): This static_cast is implementation-defined behaviour in C++,
// so we may need to do the conversion manually instead to match the spec.
volatile float f = static_cast<float>(x);
return f;
@@ -758,6 +758,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
return (sign == NEGATIVE) ? -converted : converted;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CONVERSIONS_INL_H_
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 740b2a86e4..87eca24498 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -483,16 +483,25 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
HR(gc_idle_time_limit_overshot, V8.GCIdleTimeLimit.Overshot, 0, 10000, 101) \
HR(gc_idle_time_limit_undershot, V8.GCIdleTimeLimit.Undershot, 0, 10000, \
101) \
- HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6)
+ HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
+ HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
+ HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND) \
+ HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND) \
+ HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000, \
+ MILLISECOND) \
HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
HT(gc_context, V8.GCContext, 10000, \
MILLISECOND) /* GC context cleanup time */ \
HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND) \
+ HT(gc_incremental_marking_start, V8.GCIncrementalMarkingStart, 10000, \
+ MILLISECOND) \
+ HT(gc_incremental_marking_finalize, V8.GCIncrementalMarkingFinalize, 10000, \
+ MILLISECOND) \
HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000, \
MILLISECOND) \
/* Parsing timers. */ \
@@ -886,6 +895,7 @@ class Counters {
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_COUNTERS_H_
diff --git a/deps/v8/src/crankshaft/OWNERS b/deps/v8/src/crankshaft/OWNERS
new file mode 100644
index 0000000000..2918dddc4c
--- /dev/null
+++ b/deps/v8/src/crankshaft/OWNERS
@@ -0,0 +1,7 @@
+set noparent
+
+bmeurer@chromium.org
+danno@chromium.org
+jarin@chromium.org
+jkummerow@chromium.org
+verwaest@chromium.org
diff --git a/deps/v8/src/crankshaft/arm/OWNERS b/deps/v8/src/crankshaft/arm/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/crankshaft/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/crankshaft/arm/lithium-arm.cc
index 4ccb020995..09aece0b20 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/lithium-arm.h"
+#include "src/crankshaft/arm/lithium-arm.h"
#include <sstream>
-#include "src/arm/lithium-codegen-arm.h"
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
+#include "src/crankshaft/arm/lithium-codegen-arm.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
namespace v8 {
namespace internal {
@@ -323,11 +323,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -346,12 +341,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -444,14 +433,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -2141,15 +2129,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2226,7 +2205,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2234,7 +2213,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2299,7 +2280,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK(
@@ -2311,7 +2292,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2433,19 +2415,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/crankshaft/arm/lithium-arm.h
index 8954710e53..e534179ec8 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_LITHIUM_ARM_H_
-#define V8_ARM_LITHIUM_ARM_H_
+#ifndef V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
+#define V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -100,7 +100,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -143,7 +142,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1607,15 +1605,17 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1672,22 +1672,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2189,34 +2173,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2225,6 +2189,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2885,6 +2850,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM_LITHIUM_ARM_H_
+#endif // V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index d958405e82..c1b155af49 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/lithium-codegen-arm.h"
-#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/arm/lithium-codegen-arm.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
@@ -72,7 +73,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ __ vstr(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -88,8 +89,8 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
+ __ vldr(DoubleRegister::from_code(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
@@ -114,24 +115,6 @@ bool LCodeGen::GeneratePrologue() {
// pp: Callee's constant pool pointer (if enabled)
// fp: Caller's frame pointer.
// lr: Caller's pc.
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
- __ ldr(r2, MemOperand(sp, receiver_offset));
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(ne, &ok);
-
- __ ldr(r2, GlobalObjectOperand());
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
-
- __ str(r2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -142,7 +125,6 @@ bool LCodeGen::GeneratePrologue() {
__ Prologue(info()->IsCodePreAgingActive());
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -405,13 +387,13 @@ bool LCodeGen::GenerateSafepointTable() {
}
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
}
-DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
- return DwVfpRegister::FromAllocationIndex(index);
+DwVfpRegister LCodeGen::ToDoubleRegister(int code) const {
+ return DwVfpRegister::from_code(code);
}
@@ -2759,9 +2741,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
- no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
}
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
if (instr->has_constant_parameter_count()) {
@@ -2779,10 +2760,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ Jump(lr);
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
}
@@ -2798,7 +2775,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2812,7 +2789,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2833,24 +2810,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3335,7 +3294,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
__ ldr(result,
ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ ldr(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3857,7 +3816,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -3871,15 +3830,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ Move(vector_register, vector);
__ mov(slot_register, Operand(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ mov(r0, Operand(arity));
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4076,30 +4032,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
@@ -4570,7 +4502,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5311,11 +5244,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
index dc58479047..35887c1bef 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
-#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
+#ifndef V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
+#define V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
-#include "src/arm/lithium-arm.h"
-
-#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/arm/lithium-arm.h"
+#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
@@ -393,6 +392,7 @@ class LDeferredCode : public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_
+#endif // V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc
index e1bd47b2ec..066db7dc54 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/lithium-codegen-arm.h"
-#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/arm/lithium-codegen-arm.h"
+#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h
index 88f1a7bb67..59413c5772 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#ifndef V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#define V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -57,6 +57,7 @@ class LGapResolver final BASE_EMBEDDED {
bool need_to_restore_root_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#endif // V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
diff --git a/deps/v8/src/crankshaft/arm64/OWNERS b/deps/v8/src/crankshaft/arm64/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/crankshaft/arm64/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm64/delayed-masm-arm64-inl.h b/deps/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h
index 2c44630371..503fd88ded 100644
--- a/deps/v8/src/arm64/delayed-masm-arm64-inl.h
+++ b/deps/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DELAYED_MASM_ARM64_INL_H_
-#define V8_ARM64_DELAYED_MASM_ARM64_INL_H_
+#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
+#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
-#include "src/arm64/delayed-masm-arm64.h"
+#include "src/crankshaft/arm64/delayed-masm-arm64.h"
namespace v8 {
namespace internal {
@@ -50,6 +50,7 @@ void DelayedMasm::LoadObject(Register result, Handle<Object> object) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_DELAYED_MASM_ARM64_INL_H_
+#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/delayed-masm-arm64.cc b/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.cc
index e86f10262f..6124706cb3 100644
--- a/deps/v8/src/arm64/delayed-masm-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.cc
@@ -4,8 +4,8 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/delayed-masm-arm64.h"
-#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/crankshaft/arm64/delayed-masm-arm64.h"
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/delayed-masm-arm64.h b/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.h
index 76227a3898..5da2b72903 100644
--- a/deps/v8/src/arm64/delayed-masm-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DELAYED_MASM_ARM64_H_
-#define V8_ARM64_DELAYED_MASM_ARM64_H_
+#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -159,6 +159,7 @@ class DelayedMasm BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_DELAYED_MASM_ARM64_H_
+#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
index e623718a1a..635c6dd251 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/lithium-arm64.h"
+#include "src/crankshaft/arm64/lithium-arm64.h"
#include <sstream>
-#include "src/arm64/lithium-codegen-arm64.h"
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
namespace v8 {
namespace internal {
@@ -289,11 +289,6 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -313,12 +308,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
left()->PrintTo(stream);
@@ -375,14 +364,13 @@ const char* LArithmeticT::Mnemonic() const {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -1688,15 +1676,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
@@ -1735,8 +1714,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
IsDoubleOrFloatElementsKind(instr->elements_kind())));
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LLoadKeyedExternal(elements, key, temp));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ LInstruction* result = DefineAsRegister(new (zone()) LLoadKeyedExternal(
+ elements, key, backing_store_owner, temp));
if (elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32)) {
result = AssignEnvironment(result);
@@ -2362,7 +2342,9 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
DCHECK(instr->elements()->representation().IsExternal());
- return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone())
+ LStoreKeyedExternal(elements, key, val, backing_store_owner, temp);
} else if (instr->value()->representation().IsDouble()) {
DCHECK(instr->elements()->representation().IsTagged());
@@ -2449,19 +2431,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), x1);
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
index a77a6da38f..52c94b4534 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_LITHIUM_ARM64_H_
-#define V8_ARM64_LITHIUM_ARM64_H_
+#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -102,7 +102,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyedExternal) \
V(LoadKeyedFixed) \
V(LoadKeyedFixedDouble) \
@@ -152,7 +151,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
@@ -1650,22 +1648,6 @@ class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1732,16 +1714,18 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-template<int T>
-class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
+template <int T>
+class LLoadKeyed : public LTemplateInstruction<1, 3, T> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
this->inputs_[0] = elements;
this->inputs_[1] = key;
+ this->inputs_[2] = backing_store_owner;
}
LOperand* elements() { return this->inputs_[0]; }
LOperand* key() { return this->inputs_[1]; }
+ LOperand* backing_store_owner() { return this->inputs_[2]; }
ElementsKind elements_kind() const {
return this->hydrogen()->elements_kind();
}
@@ -1774,8 +1758,9 @@ class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
class LLoadKeyedExternal: public LLoadKeyed<1> {
public:
- LLoadKeyedExternal(LOperand* elements, LOperand* key, LOperand* temp) :
- LLoadKeyed<1>(elements, key) {
+ LLoadKeyedExternal(LOperand* elements, LOperand* key,
+ LOperand* backing_store_owner, LOperand* temp)
+ : LLoadKeyed<1>(elements, key, backing_store_owner) {
temps_[0] = temp;
}
@@ -1787,8 +1772,8 @@ class LLoadKeyedExternal: public LLoadKeyed<1> {
class LLoadKeyedFixed: public LLoadKeyed<1> {
public:
- LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp) :
- LLoadKeyed<1>(elements, key) {
+ LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp)
+ : LLoadKeyed<1>(elements, key, nullptr) {
temps_[0] = temp;
}
@@ -1800,8 +1785,8 @@ class LLoadKeyedFixed: public LLoadKeyed<1> {
class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
public:
- LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp) :
- LLoadKeyed<1>(elements, key) {
+ LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp)
+ : LLoadKeyed<1>(elements, key, nullptr) {
temps_[0] = temp;
}
@@ -2435,35 +2420,15 @@ class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-template<int T>
-class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
+template <int T>
+class LStoreKeyed : public LTemplateInstruction<0, 4, T> {
public:
- LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
this->inputs_[0] = elements;
this->inputs_[1] = key;
this->inputs_[2] = value;
+ this->inputs_[3] = backing_store_owner;
}
bool is_external() const { return this->hydrogen()->is_external(); }
@@ -2476,6 +2441,7 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
LOperand* elements() { return this->inputs_[0]; }
LOperand* key() { return this->inputs_[1]; }
LOperand* value() { return this->inputs_[2]; }
+ LOperand* backing_store_owner() { return this->inputs_[3]; }
ElementsKind elements_kind() const {
return this->hydrogen()->elements_kind();
}
@@ -2515,8 +2481,8 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
class LStoreKeyedExternal final : public LStoreKeyed<1> {
public:
LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp) :
- LStoreKeyed<1>(elements, key, value) {
+ LOperand* backing_store_owner, LOperand* temp)
+ : LStoreKeyed<1>(elements, key, value, backing_store_owner) {
temps_[0] = temp;
}
@@ -2529,8 +2495,8 @@ class LStoreKeyedExternal final : public LStoreKeyed<1> {
class LStoreKeyedFixed final : public LStoreKeyed<1> {
public:
LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp) :
- LStoreKeyed<1>(elements, key, value) {
+ LOperand* temp)
+ : LStoreKeyed<1>(elements, key, value, nullptr) {
temps_[0] = temp;
}
@@ -2543,8 +2509,8 @@ class LStoreKeyedFixed final : public LStoreKeyed<1> {
class LStoreKeyedFixedDouble final : public LStoreKeyed<1> {
public:
LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp) :
- LStoreKeyed<1>(elements, key, value) {
+ LOperand* temp)
+ : LStoreKeyed<1>(elements, key, value, nullptr) {
temps_[0] = temp;
}
@@ -3254,6 +3220,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_LITHIUM_ARM64_H_
+#endif // V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 108698a9ad..5c90beb68e 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+
#include "src/arm64/frames-arm64.h"
-#include "src/arm64/lithium-codegen-arm64.h"
-#include "src/arm64/lithium-gap-resolver-arm64.h"
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
@@ -370,7 +371,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).Is(x0));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -384,15 +385,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ Mov(vector_register, vector);
__ Mov(slot_register, Operand(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ Mov(x0, arity);
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@@ -593,7 +591,7 @@ void LCodeGen::SaveCallerDoubles() {
while (!iterator.Done()) {
// TODO(all): Is this supposed to save just the callee-saved doubles? It
// looks like it's saving all of them.
- FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ FPRegister value = FPRegister::from_code(iterator.Current());
__ Poke(value, count * kDoubleSize);
iterator.Advance();
count++;
@@ -611,7 +609,7 @@ void LCodeGen::RestoreCallerDoubles() {
while (!iterator.Done()) {
// TODO(all): Is this supposed to restore just the callee-saved doubles? It
// looks like it's restoring all of them.
- FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ FPRegister value = FPRegister::from_code(iterator.Current());
__ Peek(value, count * kDoubleSize);
iterator.Advance();
count++;
@@ -625,23 +623,12 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
- // TODO(all): Add support for stop_t FLAG in DEBUG mode.
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
- __ Peek(x10, receiver_offset);
- __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
-
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
- __ Poke(x10, receiver_offset);
-
- __ Bind(&ok);
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info()->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ Debug("stop-at", __LINE__, BREAK);
}
+#endif
}
DCHECK(__ StackPointer().Is(jssp));
@@ -653,7 +640,6 @@ bool LCodeGen::GeneratePrologue() {
__ Prologue(info()->IsCodePreAgingActive());
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -1158,7 +1144,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
Register LCodeGen::ToRegister(LOperand* op) const {
// TODO(all): support zero register results, as ToRegister32.
DCHECK((op != NULL) && op->IsRegister());
- return Register::FromAllocationIndex(op->index());
+ return Register::from_code(op->index());
}
@@ -1182,7 +1168,7 @@ Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
DCHECK((op != NULL) && op->IsDoubleRegister());
- return DoubleRegister::FromAllocationIndex(op->index());
+ return DoubleRegister::from_code(op->index());
}
@@ -1558,11 +1544,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
- } else {
- __ B(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
} else {
Register size = ToRegister32(instr->size());
__ Sxtw(size.X(), size);
@@ -3203,7 +3186,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Mov(slot_register, Smi::FromInt(index));
}
@@ -3217,7 +3200,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Mov(slot_register, Smi::FromInt(index));
}
@@ -3237,24 +3220,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(x0));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
Register key,
Register base,
@@ -4624,11 +4589,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
Register stack_pointer = masm()->StackPointer();
__ Mov(stack_pointer, fp);
- no_frame_start = masm_->pc_offset();
__ Pop(fp, lr);
}
@@ -4641,10 +4604,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ DropBySMI(parameter_count);
}
__ Ret();
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -5371,30 +5330,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoStringAdd(LStringAdd* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->left()).Is(x1));
@@ -5493,7 +5428,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTagAndPush(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(x0, result);
}
@@ -5919,7 +5855,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
__ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ Ldr(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
__ B(&done);
__ Bind(&copy_receiver);
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
index 20e572c65c..f5eac7c88b 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
-#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
-#include "src/arm64/lithium-arm64.h"
+#include "src/crankshaft/arm64/lithium-arm64.h"
-#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
+#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
@@ -460,6 +460,7 @@ class BranchGenerator BASE_EMBEDDED {
LCodeGen* codegen_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#endif // V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc
index 1520fa1888..3ef9f63ab5 100644
--- a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/delayed-masm-arm64-inl.h"
-#include "src/arm64/lithium-codegen-arm64.h"
-#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/crankshaft/arm64/delayed-masm-arm64-inl.h"
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
index 8866db4c94..4f5eb223d4 100644
--- a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
-#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
-#include "src/arm64/delayed-masm-arm64.h"
-#include "src/lithium.h"
+#include "src/crankshaft/arm64/delayed-masm-arm64.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -92,6 +92,7 @@ class LGapResolver BASE_EMBEDDED {
LOperand* saved_destination_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#endif // V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
diff --git a/deps/v8/src/hydrogen-alias-analysis.h b/deps/v8/src/crankshaft/hydrogen-alias-analysis.h
index 368dd5f020..de8d0bdbe5 100644
--- a/deps/v8/src/hydrogen-alias-analysis.h
+++ b/deps/v8/src/crankshaft/hydrogen-alias-analysis.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_ALIAS_ANALYSIS_H_
-#define V8_HYDROGEN_ALIAS_ANALYSIS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -68,6 +68,7 @@ class HAliasAnalyzer : public ZoneObject {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_ALIAS_ANALYSIS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/crankshaft/hydrogen-bce.cc
index 30c218f82a..d00d8ce25c 100644
--- a/deps/v8/src/hydrogen-bce.cc
+++ b/deps/v8/src/crankshaft/hydrogen-bce.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-bce.h"
+#include "src/crankshaft/hydrogen-bce.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-bce.h b/deps/v8/src/crankshaft/hydrogen-bce.h
index 70c0a07d06..e819ffc403 100644
--- a/deps/v8/src/hydrogen-bce.h
+++ b/deps/v8/src/crankshaft/hydrogen-bce.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_BCE_H_
-#define V8_HYDROGEN_BCE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_BCE_H_
+#define V8_CRANKSHAFT_HYDROGEN_BCE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -46,6 +46,7 @@ class HBoundsCheckEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_BCE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/deps/v8/src/hydrogen-bch.cc b/deps/v8/src/crankshaft/hydrogen-bch.cc
index a4c0ae4e25..060e0bcdab 100644
--- a/deps/v8/src/hydrogen-bch.cc
+++ b/deps/v8/src/crankshaft/hydrogen-bch.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-bch.h"
+#include "src/crankshaft/hydrogen-bch.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-bch.h b/deps/v8/src/crankshaft/hydrogen-bch.h
index 852c264c4f..cdcd407a09 100644
--- a/deps/v8/src/hydrogen-bch.h
+++ b/deps/v8/src/crankshaft/hydrogen-bch.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_BCH_H_
-#define V8_HYDROGEN_BCH_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_BCH_H_
+#define V8_CRANKSHAFT_HYDROGEN_BCH_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -27,6 +27,7 @@ class HBoundsCheckHoistingPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_BCE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/deps/v8/src/hydrogen-canonicalize.cc b/deps/v8/src/crankshaft/hydrogen-canonicalize.cc
index 25911eb353..4a07357d58 100644
--- a/deps/v8/src/hydrogen-canonicalize.cc
+++ b/deps/v8/src/crankshaft/hydrogen-canonicalize.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-canonicalize.h"
-#include "src/hydrogen-redundant-phi.h"
+#include "src/crankshaft/hydrogen-canonicalize.h"
+
+#include "src/crankshaft/hydrogen-redundant-phi.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-canonicalize.h b/deps/v8/src/crankshaft/hydrogen-canonicalize.h
index eb230332fd..a17557ac8b 100644
--- a/deps/v8/src/hydrogen-canonicalize.h
+++ b/deps/v8/src/crankshaft/hydrogen-canonicalize.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_CANONICALIZE_H_
-#define V8_HYDROGEN_CANONICALIZE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
+#define V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -23,6 +23,7 @@ class HCanonicalizePhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_CANONICALIZE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/crankshaft/hydrogen-check-elimination.cc
index 74be2e42f4..548e4cd8bd 100644
--- a/deps/v8/src/hydrogen-check-elimination.cc
+++ b/deps/v8/src/crankshaft/hydrogen-check-elimination.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-check-elimination.h"
+#include "src/crankshaft/hydrogen-check-elimination.h"
-#include "src/hydrogen-alias-analysis.h"
-#include "src/hydrogen-flow-engine.h"
+#include "src/crankshaft/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen-flow-engine.h"
#define GLOBAL 1
diff --git a/deps/v8/src/hydrogen-check-elimination.h b/deps/v8/src/crankshaft/hydrogen-check-elimination.h
index 7102a439f3..d6339df34c 100644
--- a/deps/v8/src/hydrogen-check-elimination.h
+++ b/deps/v8/src/crankshaft/hydrogen-check-elimination.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_CHECK_ELIMINATION_H_
-#define V8_HYDROGEN_CHECK_ELIMINATION_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
-#include "src/hydrogen.h"
-#include "src/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
@@ -68,6 +68,7 @@ class HCheckEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_CHECK_ELIMINATION_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
diff --git a/deps/v8/src/hydrogen-dce.cc b/deps/v8/src/crankshaft/hydrogen-dce.cc
index 50a300bd94..3cb9cf4a07 100644
--- a/deps/v8/src/hydrogen-dce.cc
+++ b/deps/v8/src/crankshaft/hydrogen-dce.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-dce.h"
+#include "src/crankshaft/hydrogen-dce.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-dce.h b/deps/v8/src/crankshaft/hydrogen-dce.h
index af3679d9d3..f620a3cfa8 100644
--- a/deps/v8/src/hydrogen-dce.h
+++ b/deps/v8/src/crankshaft/hydrogen-dce.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_DCE_H_
-#define V8_HYDROGEN_DCE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_DCE_H_
+#define V8_CRANKSHAFT_HYDROGEN_DCE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -29,6 +29,7 @@ class HDeadCodeEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_DCE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_DCE_H_
diff --git a/deps/v8/src/hydrogen-dehoist.cc b/deps/v8/src/crankshaft/hydrogen-dehoist.cc
index e521c25cda..34de94afc5 100644
--- a/deps/v8/src/hydrogen-dehoist.cc
+++ b/deps/v8/src/crankshaft/hydrogen-dehoist.cc
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-dehoist.h"
+#include "src/crankshaft/hydrogen-dehoist.h"
+
#include "src/base/safe_math.h"
namespace v8 {
diff --git a/deps/v8/src/hydrogen-dehoist.h b/deps/v8/src/crankshaft/hydrogen-dehoist.h
index 4aab30fafa..d68f62cf7b 100644
--- a/deps/v8/src/hydrogen-dehoist.h
+++ b/deps/v8/src/crankshaft/hydrogen-dehoist.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_DEHOIST_H_
-#define V8_HYDROGEN_DEHOIST_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
+#define V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -23,6 +23,7 @@ class HDehoistIndexComputationsPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_DEHOIST_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
diff --git a/deps/v8/src/hydrogen-environment-liveness.cc b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
index 7cc4dc04a8..ae0bd08837 100644
--- a/deps/v8/src/hydrogen-environment-liveness.cc
+++ b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
-#include "src/hydrogen-environment-liveness.h"
+#include "src/crankshaft/hydrogen-environment-liveness.h"
namespace v8 {
diff --git a/deps/v8/src/hydrogen-environment-liveness.h b/deps/v8/src/crankshaft/hydrogen-environment-liveness.h
index e595927f9d..d9e156b7e9 100644
--- a/deps/v8/src/hydrogen-environment-liveness.h
+++ b/deps/v8/src/crankshaft/hydrogen-environment-liveness.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
-#define V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+#define V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
-
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -63,6 +62,7 @@ class HEnvironmentLivenessAnalysisPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif /* V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_ */
+#endif // V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
diff --git a/deps/v8/src/hydrogen-escape-analysis.cc b/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
index 3613737192..6128fbc973 100644
--- a/deps/v8/src/hydrogen-escape-analysis.cc
+++ b/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-escape-analysis.h"
+#include "src/crankshaft/hydrogen-escape-analysis.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-escape-analysis.h b/deps/v8/src/crankshaft/hydrogen-escape-analysis.h
index 0726b8edbe..7dac6debe0 100644
--- a/deps/v8/src/hydrogen-escape-analysis.h
+++ b/deps/v8/src/crankshaft/hydrogen-escape-analysis.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_ESCAPE_ANALYSIS_H_
-#define V8_HYDROGEN_ESCAPE_ANALYSIS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
#include "src/allocation.h"
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -65,6 +65,7 @@ class HEscapeAnalysisPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_ESCAPE_ANALYSIS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen-flow-engine.h b/deps/v8/src/crankshaft/hydrogen-flow-engine.h
index 257ab466a1..3a488ddc18 100644
--- a/deps/v8/src/hydrogen-flow-engine.h
+++ b/deps/v8/src/crankshaft/hydrogen-flow-engine.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_FLOW_ENGINE_H_
-#define V8_HYDROGEN_FLOW_ENGINE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
+#define V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
-#include "src/hydrogen.h"
-#include "src/hydrogen-instructions.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-instructions.h"
#include "src/zone.h"
namespace v8 {
@@ -214,6 +214,7 @@ class HFlowEngine {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_FLOW_ENGINE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/crankshaft/hydrogen-gvn.cc
index 31a2cd68a5..07bfabc79a 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/crankshaft/hydrogen-gvn.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen.h"
-#include "src/hydrogen-gvn.h"
+#include "src/crankshaft/hydrogen-gvn.h"
+
+#include "src/crankshaft/hydrogen.h"
#include "src/v8.h"
namespace v8 {
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/crankshaft/hydrogen-gvn.h
index fc7f27368e..a5e2168603 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/crankshaft/hydrogen-gvn.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_GVN_H_
-#define V8_HYDROGEN_GVN_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_GVN_H_
+#define V8_CRANKSHAFT_HYDROGEN_GVN_H_
#include <iosfwd>
-#include "src/hydrogen.h"
-#include "src/hydrogen-instructions.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-instructions.h"
#include "src/zone.h"
namespace v8 {
@@ -148,6 +148,7 @@ class HGlobalValueNumberingPhase final : public HPhase {
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_GVN_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_GVN_H_
diff --git a/deps/v8/src/hydrogen-infer-representation.cc b/deps/v8/src/crankshaft/hydrogen-infer-representation.cc
index 6687aefed8..74f264e17a 100644
--- a/deps/v8/src/hydrogen-infer-representation.cc
+++ b/deps/v8/src/crankshaft/hydrogen-infer-representation.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-infer-representation.h"
+#include "src/crankshaft/hydrogen-infer-representation.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-infer-representation.h b/deps/v8/src/crankshaft/hydrogen-infer-representation.h
index d07f89d973..92f2bc8c59 100644
--- a/deps/v8/src/hydrogen-infer-representation.h
+++ b/deps/v8/src/crankshaft/hydrogen-infer-representation.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_INFER_REPRESENTATION_H_
-#define V8_HYDROGEN_INFER_REPRESENTATION_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -29,6 +29,7 @@ class HInferRepresentationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_INFER_REPRESENTATION_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
diff --git a/deps/v8/src/hydrogen-infer-types.cc b/deps/v8/src/crankshaft/hydrogen-infer-types.cc
index ea69662b40..bfd3dd2281 100644
--- a/deps/v8/src/hydrogen-infer-types.cc
+++ b/deps/v8/src/crankshaft/hydrogen-infer-types.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-infer-types.h"
+#include "src/crankshaft/hydrogen-infer-types.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-infer-types.h b/deps/v8/src/crankshaft/hydrogen-infer-types.h
index 41337ac5c0..8acfeabd60 100644
--- a/deps/v8/src/hydrogen-infer-types.h
+++ b/deps/v8/src/crankshaft/hydrogen-infer-types.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_INFER_TYPES_H_
-#define V8_HYDROGEN_INFER_TYPES_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
+#define V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -31,6 +31,7 @@ class HInferTypesPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_INFER_TYPES_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
index 4482155fbe..1e49202fe0 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.cc
@@ -2,36 +2,35 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-instructions.h"
+#include "src/crankshaft/hydrogen-instructions.h"
#include "src/base/bits.h"
+#include "src/base/safe_math.h"
+#include "src/crankshaft/hydrogen-infer-representation.h"
#include "src/double.h"
#include "src/elements.h"
#include "src/factory.h"
-#include "src/hydrogen-infer-representation.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
-#include "src/base/safe_math.h"
-
namespace v8 {
namespace internal {
@@ -812,7 +811,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kLeaveInlined:
case HValue::kLoadFieldByIndex:
case HValue::kLoadGlobalGeneric:
- case HValue::kLoadGlobalViaContext:
case HValue::kLoadNamedField:
case HValue::kLoadNamedGeneric:
case HValue::kLoadRoot:
@@ -826,7 +824,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kSeqStringGetChar:
case HValue::kStoreCodeEntry:
case HValue::kStoreFrameContext:
- case HValue::kStoreGlobalViaContext:
case HValue::kStoreKeyed:
case HValue::kStoreNamedField:
case HValue::kStoreNamedGeneric:
@@ -952,6 +949,7 @@ std::ostream& HCallFunction::PrintDataTo(std::ostream& os) const { // NOLINT
if (HasVectorAndSlot()) {
os << " (type-feedback-vector icslot " << slot().ToInt() << ")";
}
+ os << " (convert mode" << convert_mode() << ")";
return os;
}
@@ -1584,9 +1582,10 @@ HValue* HUnaryMathOperation::Canonicalize() {
HDiv* hdiv = HDiv::cast(value());
HValue* left = hdiv->left();
- if (left->representation().IsInteger32()) {
+ if (left->representation().IsInteger32() && !left->CheckFlag(kUint32)) {
// A value with an integer representation does not need to be transformed.
- } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32()) {
+ } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32() &&
+ !HChange::cast(left)->value()->CheckFlag(kUint32)) {
// A change from an integer32 can be replaced by the integer32 value.
left = HChange::cast(left)->value();
} else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
@@ -1600,10 +1599,12 @@ HValue* HUnaryMathOperation::Canonicalize() {
if (right->IsInteger32Constant()) {
right = Prepend(HConstant::cast(right)->CopyToRepresentation(
Representation::Integer32(), right->block()->zone()));
- } else if (right->representation().IsInteger32()) {
+ } else if (right->representation().IsInteger32() &&
+ !right->CheckFlag(kUint32)) {
// A value with an integer representation does not need to be transformed.
} else if (right->IsChange() &&
- HChange::cast(right)->from().IsInteger32()) {
+ HChange::cast(right)->from().IsInteger32() &&
+ !HChange::cast(right)->value()->CheckFlag(kUint32)) {
// A change from an integer32 can be replaced by the integer32 value.
right = HChange::cast(right)->value();
} else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
@@ -3545,7 +3546,7 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
HInstruction* index = HLoadKeyed::New(
block()->graph()->isolate(), block()->graph()->zone(),
block()->graph()->GetInvalidContext(), index_cache, key_load->key(),
- key_load->key(), key_load->elements_kind());
+ key_load->key(), nullptr, key_load->elements_kind());
map_check->InsertBefore(this);
index->InsertBefore(this);
return Prepend(new(block()->zone()) HLoadFieldByIndex(
@@ -3566,13 +3567,6 @@ std::ostream& HStoreNamedGeneric::PrintDataTo(
}
-std::ostream& HStoreGlobalViaContext::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << " depth:" << depth() << " slot:" << slot_index() << " = "
- << NameOf(value());
-}
-
-
std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const { // NOLINT
os << NameOf(object()) << access_ << " = " << NameOf(value());
if (NeedsWriteBarrier()) os << " (write-barrier)";
@@ -3623,12 +3617,6 @@ std::ostream& HLoadGlobalGeneric::PrintDataTo(
}
-std::ostream& HLoadGlobalViaContext::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << "depth:" << depth() << " slot:" << slot_index();
-}
-
-
std::ostream& HInnerAllocatedObject::PrintDataTo(
std::ostream& os) const { // NOLINT
os << NameOf(base_object()) << " offset ";
@@ -4693,13 +4681,13 @@ std::ostream& operator<<(std::ostream& os, const HObjectAccess& access) {
break;
case HObjectAccess::kDouble: // fall through
case HObjectAccess::kInobject:
- if (!access.name().is_null()) {
+ if (!access.name().is_null() && access.name()->IsString()) {
os << Handle<String>::cast(access.name())->ToCString().get();
}
os << "[in-object]";
break;
case HObjectAccess::kBackingStore:
- if (!access.name().is_null()) {
+ if (!access.name().is_null() && access.name()->IsString()) {
os << Handle<String>::cast(access.name())->ToCString().get();
}
os << "[backing-store]";
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index 9f5bc2099c..dfed6e32ce 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_INSTRUCTIONS_H_
-#define V8_HYDROGEN_INSTRUCTIONS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
+#define V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
#include <cstring>
#include <iosfwd>
@@ -13,10 +13,10 @@
#include "src/bit-vector.h"
#include "src/code-stubs.h"
#include "src/conversions.h"
+#include "src/crankshaft/hydrogen-types.h"
+#include "src/crankshaft/unique.h"
#include "src/deoptimizer.h"
-#include "src/hydrogen-types.h"
#include "src/small-pointer-list.h"
-#include "src/unique.h"
#include "src/utils.h"
#include "src/zone.h"
@@ -115,7 +115,6 @@ class LChunkBuilder;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -145,7 +144,6 @@ class LChunkBuilder;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -931,6 +929,12 @@ std::ostream& operator<<(std::ostream& os, const ChangesOf& v);
return new (zone) I(p1, p2, p3, p4, p5, p6); \
}
+#define DECLARE_INSTRUCTION_FACTORY_P7(I, P1, P2, P3, P4, P5, P6, P7) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) { \
+ return new (zone) I(p1, p2, p3, p4, p5, p6, p7); \
+ }
+
#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
static I* New(Isolate* isolate, Zone* zone, HValue* context) { \
return new (zone) I(context); \
@@ -2396,21 +2400,20 @@ class HInvokeFunction final : public HBinaryCall {
class HCallFunction final : public HBinaryCall {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(
- HCallFunction, HValue*, int, CallFunctionFlags);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallFunction, HValue*, int,
+ ConvertReceiverMode);
HValue* context() const { return first(); }
HValue* function() const { return second(); }
- CallFunctionFlags function_flags() const { return function_flags_; }
- FeedbackVectorICSlot slot() const { return slot_; }
+ ConvertReceiverMode convert_mode() const { return convert_mode_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
bool HasVectorAndSlot() const { return !feedback_vector_.is_null(); }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -2423,13 +2426,12 @@ class HCallFunction final : public HBinaryCall {
private:
HCallFunction(HValue* context, HValue* function, int argument_count,
- CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS)
+ ConvertReceiverMode convert_mode)
: HBinaryCall(context, function, argument_count),
- function_flags_(flags),
- slot_(FeedbackVectorICSlot::Invalid()) {}
- CallFunctionFlags function_flags_;
+ convert_mode_(convert_mode) {}
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
+ ConvertReceiverMode convert_mode_;
};
@@ -3272,7 +3274,7 @@ class HPhi final : public HValue {
Representation RepresentationFromInputs() override;
Range* InferRange(Zone* zone) override;
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RequiredInputRepresentation(int index) override {
return representation();
}
@@ -3828,15 +3830,15 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return observed_input_representation_[index - 1];
}
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
? Representation::Integer32() : new_rep;
HValue::UpdateRepresentation(rep, h_infer, reason);
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RepresentationFromInputs() override;
Representation RepresentationFromOutput();
void AssumeRepresentation(Representation r) override;
@@ -4067,7 +4069,7 @@ class HBoundsCheck final : public HTemplateInstruction<2> {
}
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
HValue* index() const { return OperandAt(0); }
HValue* length() const { return OperandAt(1); }
@@ -4161,9 +4163,9 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
// We only generate either int32 or generic tagged bitwise operations.
if (new_rep.IsDouble()) new_rep = Representation::Integer32();
HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
@@ -4175,8 +4177,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
return r;
}
- virtual void initialize_output_representation(
- Representation observed) override {
+ void initialize_output_representation(Representation observed) override {
if (observed.IsDouble()) observed = Representation::Integer32();
HBinaryOperation::initialize_output_representation(observed);
}
@@ -4310,7 +4311,7 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
observed_input_representation_[1] = right;
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RequiredInputRepresentation(int index) override {
return representation();
@@ -4358,7 +4359,7 @@ class HCompareHoleAndBranch final : public HUnaryControlInstruction {
DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
HBasicBlock*, HBasicBlock*);
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RequiredInputRepresentation(int index) override {
return representation();
@@ -4381,7 +4382,7 @@ class HCompareMinusZeroAndBranch final : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCompareMinusZeroAndBranch, HValue*);
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RequiredInputRepresentation(int index) override {
return representation();
@@ -4954,9 +4955,9 @@ class HMul final : public HArithmeticBinaryOperation {
// Only commutative if it is certain that not two objects are multiplicated.
bool IsCommutative() const override { return !representation().IsTagged(); }
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4985,9 +4986,9 @@ class HMod final : public HArithmeticBinaryOperation {
HValue* Canonicalize() override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5017,9 +5018,9 @@ class HDiv final : public HArithmeticBinaryOperation {
HValue* Canonicalize() override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5051,7 +5052,7 @@ class HMathMinMax final : public HArithmeticBinaryOperation {
return RequiredInputRepresentation(index);
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RepresentationFromInputs() override {
Representation left_rep = left()->representation();
@@ -5151,9 +5152,9 @@ class HShl final : public HBitwiseBinaryOperation {
Range* InferRange(Zone* zone) override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi() &&
!(right()->IsInteger32Constant() &&
right()->GetInteger32Constant() >= 0)) {
@@ -5193,9 +5194,9 @@ class HShr final : public HBitwiseBinaryOperation {
Range* InferRange(Zone* zone) override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5231,9 +5232,9 @@ class HSar final : public HBitwiseBinaryOperation {
Range* InferRange(Zone* zone) override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5257,9 +5258,9 @@ class HRor final : public HBitwiseBinaryOperation {
return new (zone) HRor(context, left, right, strength);
}
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5415,13 +5416,13 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
HValue* global_object() { return OperandAt(1); }
Handle<String> name() const { return name_; }
TypeofMode typeof_mode() const { return typeof_mode_; }
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
bool HasVectorAndSlot() const { return true; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -5437,9 +5438,7 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
private:
HLoadGlobalGeneric(HValue* context, HValue* global_object,
Handle<String> name, TypeofMode typeof_mode)
- : name_(name),
- typeof_mode_(typeof_mode),
- slot_(FeedbackVectorICSlot::Invalid()) {
+ : name_(name), typeof_mode_(typeof_mode) {
SetOperandAt(0, context);
SetOperandAt(1, global_object);
set_representation(Representation::Tagged());
@@ -5449,36 +5448,7 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
Handle<String> name_;
TypeofMode typeof_mode_;
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
-};
-
-
-class HLoadGlobalViaContext final : public HTemplateInstruction<1> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadGlobalViaContext, int, int);
-
- HValue* context() { return OperandAt(0); }
- int depth() const { return depth_; }
- int slot_index() const { return slot_index_; }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext)
-
- private:
- HLoadGlobalViaContext(HValue* context, int depth, int slot_index)
- : depth_(depth), slot_index_(slot_index) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- int const depth_;
- int const slot_index_;
+ FeedbackVectorSlot slot_;
};
@@ -5555,8 +5525,8 @@ class HAllocate final : public HTemplateInstruction<2> {
flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
}
- virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) override;
+ bool HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) override;
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
@@ -6226,8 +6196,16 @@ class HObjectAccess final {
JSArrayBufferView::kByteLengthOffset);
}
- static HObjectAccess ForGlobalObjectNativeContext() {
- return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset);
+ static HObjectAccess ForJSGlobalObjectNativeContext() {
+ return HObjectAccess(kInobject, JSGlobalObject::kNativeContextOffset);
+ }
+
+ static HObjectAccess ForJSRegExpFlags() {
+ return HObjectAccess(kInobject, JSRegExp::kFlagsOffset);
+ }
+
+ static HObjectAccess ForJSRegExpSource() {
+ return HObjectAccess(kInobject, JSRegExp::kSourceOffset);
}
static HObjectAccess ForJSCollectionTable() {
@@ -6475,13 +6453,13 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
InlineCacheState initialization_state() const {
return initialization_state_;
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
bool HasVectorAndSlot() const { return true; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -6501,7 +6479,6 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
LanguageMode language_mode,
InlineCacheState initialization_state)
: name_(name),
- slot_(FeedbackVectorICSlot::Invalid()),
language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, context);
@@ -6512,7 +6489,7 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
Handle<Name> name_;
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
@@ -6569,14 +6546,14 @@ enum LoadKeyedHoleMode {
};
-class HLoadKeyed final : public HTemplateInstruction<3>,
+class HLoadKeyed final : public HTemplateInstruction<4>,
public ArrayInstructionInterface {
public:
- DECLARE_INSTRUCTION_FACTORY_P4(HLoadKeyed, HValue*, HValue*, HValue*,
+ DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
ElementsKind);
- DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*,
+ DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
ElementsKind, LoadKeyedHoleMode);
- DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*,
+ DECLARE_INSTRUCTION_FACTORY_P7(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
ElementsKind, LoadKeyedHoleMode, int);
bool is_fixed_typed_array() const {
@@ -6589,6 +6566,11 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
return OperandAt(2);
}
bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
+ HValue* backing_store_owner() const {
+ DCHECK(HasBackingStoreOwner());
+ return OperandAt(3);
+ }
+ bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); }
uint32_t base_offset() const { return BaseOffsetField::decode(bit_field_); }
bool TryIncreaseBaseOffset(uint32_t increase_by_value) override;
HValue* GetKey() override { return key(); }
@@ -6619,7 +6601,12 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
return ArrayInstructionInterface::KeyedAccessIndexRequirement(
OperandAt(1)->representation());
}
- return Representation::None();
+ if (index == 2) {
+ return Representation::None();
+ }
+ DCHECK_EQ(3, index);
+ return HasBackingStoreOwner() ? Representation::Tagged()
+ : Representation::None();
}
Representation observed_input_representation(int index) override {
@@ -6647,7 +6634,7 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
private:
HLoadKeyed(HValue* obj, HValue* key, HValue* dependency,
- ElementsKind elements_kind,
+ HValue* backing_store_owner, ElementsKind elements_kind,
LoadKeyedHoleMode mode = NEVER_RETURN_HOLE,
int offset = kDefaultKeyedHeaderOffsetSentinel)
: bit_field_(0) {
@@ -6660,7 +6647,9 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
SetOperandAt(0, obj);
SetOperandAt(1, key);
- SetOperandAt(2, dependency != NULL ? dependency : obj);
+ SetOperandAt(2, dependency != nullptr ? dependency : obj);
+ SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj);
+ DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array());
if (!is_fixed_typed_array()) {
// I can detect the case between storing double (holey and fast) and
@@ -6753,7 +6742,7 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
InlineCacheState initialization_state() const {
return initialization_state_;
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
@@ -6762,7 +6751,7 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
return !feedback_vector_.is_null();
}
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -6784,8 +6773,7 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
LanguageMode language_mode,
InlineCacheState initialization_state)
- : slot_(FeedbackVectorICSlot::Invalid()),
- initialization_state_(initialization_state),
+ : initialization_state_(initialization_state),
language_mode_(language_mode) {
set_representation(Representation::Tagged());
SetOperandAt(0, obj);
@@ -6795,7 +6783,7 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
}
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
InlineCacheState initialization_state_;
LanguageMode language_mode_;
};
@@ -6850,8 +6838,8 @@ class HStoreNamedField final : public HTemplateInstruction<3> {
}
return Representation::Tagged();
}
- virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) override {
+ bool HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) override {
DCHECK(side_effect == kNewSpacePromotion);
if (!FLAG_use_write_barrier_elimination) return false;
dominator_ = dominator;
@@ -6980,13 +6968,13 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
return Representation::Tagged();
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
bool HasVectorAndSlot() const { return FLAG_vector_stores; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -6998,7 +6986,6 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
HValue* value, LanguageMode language_mode,
InlineCacheState initialization_state)
: name_(name),
- slot_(FeedbackVectorICSlot::Invalid()),
language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, object);
@@ -7009,54 +6996,22 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
Handle<Name> name_;
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
-class HStoreGlobalViaContext final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreGlobalViaContext, HValue*,
- int, int, LanguageMode);
- HValue* context() const { return OperandAt(0); }
- HValue* value() const { return OperandAt(1); }
- int depth() const { return depth_; }
- int slot_index() const { return slot_index_; }
- LanguageMode language_mode() const { return language_mode_; }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext)
-
- private:
- HStoreGlobalViaContext(HValue* context, HValue* value, int depth,
- int slot_index, LanguageMode language_mode)
- : depth_(depth), slot_index_(slot_index), language_mode_(language_mode) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- SetAllSideEffects();
- }
-
- int const depth_;
- int const slot_index_;
- LanguageMode const language_mode_;
-};
-
-
-class HStoreKeyed final : public HTemplateInstruction<3>,
+class HStoreKeyed final : public HTemplateInstruction<4>,
public ArrayInstructionInterface {
public:
- DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
- ElementsKind);
DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*,
- ElementsKind, StoreFieldOrKeyedMode);
+ HValue*, ElementsKind);
DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
- ElementsKind, StoreFieldOrKeyedMode, int);
+ HValue*, ElementsKind, StoreFieldOrKeyedMode);
+ DECLARE_INSTRUCTION_FACTORY_P7(HStoreKeyed, HValue*, HValue*, HValue*,
+ HValue*, ElementsKind, StoreFieldOrKeyedMode,
+ int);
Representation RequiredInputRepresentation(int index) override {
// kind_fast: tagged[int32] = tagged
@@ -7070,10 +7025,13 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
} else if (index == 1) {
return ArrayInstructionInterface::KeyedAccessIndexRequirement(
OperandAt(1)->representation());
+ } else if (index == 2) {
+ return RequiredValueRepresentation(elements_kind(), store_mode());
}
- DCHECK_EQ(index, 2);
- return RequiredValueRepresentation(elements_kind(), store_mode());
+ DCHECK_EQ(3, index);
+ return HasBackingStoreOwner() ? Representation::Tagged()
+ : Representation::None();
}
static Representation RequiredValueRepresentation(
@@ -7102,7 +7060,7 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
}
Representation observed_input_representation(int index) override {
- if (index < 2) return RequiredInputRepresentation(index);
+ if (index != 2) return RequiredInputRepresentation(index);
if (IsUninitialized()) {
return Representation::None();
}
@@ -7116,6 +7074,11 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
HValue* elements() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* value() const { return OperandAt(2); }
+ HValue* backing_store_owner() const {
+ DCHECK(HasBackingStoreOwner());
+ return OperandAt(3);
+ }
+ bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); }
bool value_is_smi() const { return IsFastSmiElementsKind(elements_kind()); }
StoreFieldOrKeyedMode store_mode() const {
return StoreModeField::decode(bit_field_);
@@ -7142,8 +7105,8 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
}
- virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) override {
+ bool HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) override {
DCHECK(side_effect == kNewSpacePromotion);
dominator_ = dominator;
return false;
@@ -7171,7 +7134,8 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
private:
- HStoreKeyed(HValue* obj, HValue* key, HValue* val, ElementsKind elements_kind,
+ HStoreKeyed(HValue* obj, HValue* key, HValue* val,
+ HValue* backing_store_owner, ElementsKind elements_kind,
StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE,
int offset = kDefaultKeyedHeaderOffsetSentinel)
: base_offset_(offset == kDefaultKeyedHeaderOffsetSentinel
@@ -7185,6 +7149,8 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
+ SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj);
+ DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array());
if (IsFastObjectElementsKind(elements_kind)) {
SetFlag(kTrackSideEffectDominators);
@@ -7239,7 +7205,7 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
return Representation::Tagged();
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
@@ -7249,7 +7215,7 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
return !feedback_vector_.is_null();
}
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -7262,8 +7228,7 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
HValue* value, LanguageMode language_mode,
InlineCacheState initialization_state)
- : slot_(FeedbackVectorICSlot::Invalid()),
- language_mode_(language_mode),
+ : language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, object);
SetOperandAt(1, key);
@@ -7273,7 +7238,7 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
}
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
@@ -7983,6 +7948,7 @@ class HAllocateBlockContext: public HTemplateInstruction<2> {
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_INSTRUCTIONS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
diff --git a/deps/v8/src/hydrogen-load-elimination.cc b/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
index a4536fd750..da8d1864a6 100644
--- a/deps/v8/src/hydrogen-load-elimination.cc
+++ b/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-alias-analysis.h"
-#include "src/hydrogen-flow-engine.h"
-#include "src/hydrogen-instructions.h"
-#include "src/hydrogen-load-elimination.h"
+#include "src/crankshaft/hydrogen-load-elimination.h"
+
+#include "src/crankshaft/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen-flow-engine.h"
+#include "src/crankshaft/hydrogen-instructions.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-load-elimination.h b/deps/v8/src/crankshaft/hydrogen-load-elimination.h
index e6b432c6ac..e5656459c9 100644
--- a/deps/v8/src/hydrogen-load-elimination.h
+++ b/deps/v8/src/crankshaft/hydrogen-load-elimination.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_LOAD_ELIMINATION_H_
-#define V8_HYDROGEN_LOAD_ELIMINATION_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -22,6 +22,7 @@ class HLoadEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_LOAD_ELIMINATION_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
diff --git a/deps/v8/src/hydrogen-mark-deoptimize.cc b/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.cc
index fe7a88614c..a706d91323 100644
--- a/deps/v8/src/hydrogen-mark-deoptimize.cc
+++ b/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-mark-deoptimize.h"
+#include "src/crankshaft/hydrogen-mark-deoptimize.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-mark-deoptimize.h b/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.h
index 52a6ef96c9..45d40acd95 100644
--- a/deps/v8/src/hydrogen-mark-deoptimize.h
+++ b/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_MARK_DEOPTIMIZE_H_
-#define V8_HYDROGEN_MARK_DEOPTIMIZE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
+#define V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -47,6 +47,7 @@ class HComputeChangeUndefinedToNaN : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_MARK_DEOPTIMIZE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
diff --git a/deps/v8/src/hydrogen-mark-unreachable.cc b/deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc
index affe7ce205..4e1dd689ee 100644
--- a/deps/v8/src/hydrogen-mark-unreachable.cc
+++ b/deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-mark-unreachable.h"
+#include "src/crankshaft/hydrogen-mark-unreachable.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-mark-unreachable.h b/deps/v8/src/crankshaft/hydrogen-mark-unreachable.h
index d43d22bbba..1243b1fcbe 100644
--- a/deps/v8/src/hydrogen-mark-unreachable.h
+++ b/deps/v8/src/crankshaft/hydrogen-mark-unreachable.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_MARK_UNREACHABLE_H_
-#define V8_HYDROGEN_MARK_UNREACHABLE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
+#define V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -25,6 +25,7 @@ class HMarkUnreachableBlocksPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_MARK_UNREACHABLE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
diff --git a/deps/v8/src/hydrogen-osr.cc b/deps/v8/src/crankshaft/hydrogen-osr.cc
index 8a4780c3d7..c98bbf627f 100644
--- a/deps/v8/src/hydrogen-osr.cc
+++ b/deps/v8/src/crankshaft/hydrogen-osr.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
+
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-osr.h b/deps/v8/src/crankshaft/hydrogen-osr.h
index 433548c1a8..52c94dcad3 100644
--- a/deps/v8/src/hydrogen-osr.h
+++ b/deps/v8/src/crankshaft/hydrogen-osr.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_OSR_H_
-#define V8_HYDROGEN_OSR_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_OSR_H_
+#define V8_CRANKSHAFT_HYDROGEN_OSR_H_
-#include "src/hydrogen.h"
#include "src/ast.h"
+#include "src/crankshaft/hydrogen.h"
#include "src/zone.h"
namespace v8 {
@@ -49,6 +49,7 @@ class HOsrBuilder : public ZoneObject {
ZoneList<HUnknownOSRValue*>* osr_values_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_OSR_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_OSR_H_
diff --git a/deps/v8/src/hydrogen-range-analysis.cc b/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
index c81dc1d365..f5eba5e571 100644
--- a/deps/v8/src/hydrogen-range-analysis.cc
+++ b/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-range-analysis.h"
+#include "src/crankshaft/hydrogen-range-analysis.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-range-analysis.h b/deps/v8/src/crankshaft/hydrogen-range-analysis.h
index 1269ec7529..cff7026e14 100644
--- a/deps/v8/src/hydrogen-range-analysis.h
+++ b/deps/v8/src/crankshaft/hydrogen-range-analysis.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_RANGE_ANALYSIS_H_
-#define V8_HYDROGEN_RANGE_ANALYSIS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -45,6 +45,7 @@ class HRangeAnalysisPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_RANGE_ANALYSIS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen-redundant-phi.cc b/deps/v8/src/crankshaft/hydrogen-redundant-phi.cc
index 1b3c94a3db..ef8b29159d 100644
--- a/deps/v8/src/hydrogen-redundant-phi.cc
+++ b/deps/v8/src/crankshaft/hydrogen-redundant-phi.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-redundant-phi.h"
+#include "src/crankshaft/hydrogen-redundant-phi.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-redundant-phi.h b/deps/v8/src/crankshaft/hydrogen-redundant-phi.h
index 7f5ec4e52d..e8735c82d3 100644
--- a/deps/v8/src/hydrogen-redundant-phi.h
+++ b/deps/v8/src/crankshaft/hydrogen-redundant-phi.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_REDUNDANT_PHI_H_
-#define V8_HYDROGEN_REDUNDANT_PHI_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
+#define V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -28,6 +28,7 @@ class HRedundantPhiEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_REDUNDANT_PHI_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
diff --git a/deps/v8/src/hydrogen-removable-simulates.cc b/deps/v8/src/crankshaft/hydrogen-removable-simulates.cc
index eb13cb28bd..ceef7430eb 100644
--- a/deps/v8/src/hydrogen-removable-simulates.cc
+++ b/deps/v8/src/crankshaft/hydrogen-removable-simulates.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-flow-engine.h"
-#include "src/hydrogen-instructions.h"
-#include "src/hydrogen-removable-simulates.h"
+#include "src/crankshaft/hydrogen-removable-simulates.h"
+
+#include "src/crankshaft/hydrogen-flow-engine.h"
+#include "src/crankshaft/hydrogen-instructions.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-removable-simulates.h b/deps/v8/src/crankshaft/hydrogen-removable-simulates.h
index 9bd25056bd..34500012cb 100644
--- a/deps/v8/src/hydrogen-removable-simulates.h
+++ b/deps/v8/src/crankshaft/hydrogen-removable-simulates.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_REMOVABLE_SIMULATES_H_
-#define V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
+#define V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -23,6 +23,7 @@ class HMergeRemovableSimulatesPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
index 4af4e01a5b..32b614c56c 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-representation-changes.h"
+#include "src/crankshaft/hydrogen-representation-changes.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-representation-changes.h b/deps/v8/src/crankshaft/hydrogen-representation-changes.h
index 2f5958a70f..d8403947c3 100644
--- a/deps/v8/src/hydrogen-representation-changes.h
+++ b/deps/v8/src/crankshaft/hydrogen-representation-changes.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_REPRESENTATION_CHANGES_H_
-#define V8_HYDROGEN_REPRESENTATION_CHANGES_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
+#define V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -27,6 +27,7 @@ class HRepresentationChangesPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_REPRESENTATION_CHANGES_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
diff --git a/deps/v8/src/hydrogen-sce.cc b/deps/v8/src/crankshaft/hydrogen-sce.cc
index 235a94142d..91e91d2033 100644
--- a/deps/v8/src/hydrogen-sce.cc
+++ b/deps/v8/src/crankshaft/hydrogen-sce.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-sce.h"
+#include "src/crankshaft/hydrogen-sce.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-sce.h b/deps/v8/src/crankshaft/hydrogen-sce.h
index 276d348676..bb896bad6b 100644
--- a/deps/v8/src/hydrogen-sce.h
+++ b/deps/v8/src/crankshaft/hydrogen-sce.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_SCE_H_
-#define V8_HYDROGEN_SCE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_SCE_H_
+#define V8_CRANKSHAFT_HYDROGEN_SCE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -20,6 +20,7 @@ class HStackCheckEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_SCE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_SCE_H_
diff --git a/deps/v8/src/hydrogen-store-elimination.cc b/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
index f04ec44e44..ba32c8ad6b 100644
--- a/deps/v8/src/hydrogen-store-elimination.cc
+++ b/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-instructions.h"
-#include "src/hydrogen-store-elimination.h"
+#include "src/crankshaft/hydrogen-store-elimination.h"
+
+#include "src/crankshaft/hydrogen-instructions.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-store-elimination.h b/deps/v8/src/crankshaft/hydrogen-store-elimination.h
index 35a23a2660..2a9e0c1488 100644
--- a/deps/v8/src/hydrogen-store-elimination.h
+++ b/deps/v8/src/crankshaft/hydrogen-store-elimination.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_STORE_ELIMINATION_H_
-#define V8_HYDROGEN_STORE_ELIMINATION_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
-#include "src/hydrogen.h"
-#include "src/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
@@ -29,6 +29,7 @@ class HStoreEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif
+#endif // V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
diff --git a/deps/v8/src/hydrogen-types.cc b/deps/v8/src/crankshaft/hydrogen-types.cc
index 7c50a1d887..9c5e34194e 100644
--- a/deps/v8/src/hydrogen-types.cc
+++ b/deps/v8/src/crankshaft/hydrogen-types.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-types.h"
+#include "src/crankshaft/hydrogen-types.h"
#include "src/ostreams.h"
#include "src/types-inl.h"
diff --git a/deps/v8/src/hydrogen-types.h b/deps/v8/src/crankshaft/hydrogen-types.h
index fe13345f76..87148ee4cd 100644
--- a/deps/v8/src/hydrogen-types.h
+++ b/deps/v8/src/crankshaft/hydrogen-types.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef HYDROGEN_TYPES_H_
-#define HYDROGEN_TYPES_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_TYPES_H_
+#define V8_CRANKSHAFT_HYDROGEN_TYPES_H_
#include <climits>
#include <iosfwd>
@@ -86,6 +86,7 @@ class HType final {
std::ostream& operator<<(std::ostream& os, const HType& t);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // HYDROGEN_TYPES_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_TYPES_H_
diff --git a/deps/v8/src/hydrogen-uint32-analysis.cc b/deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc
index c6cbc9bc35..ac4a63f8f2 100644
--- a/deps/v8/src/hydrogen-uint32-analysis.cc
+++ b/deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-uint32-analysis.h"
+#include "src/crankshaft/hydrogen-uint32-analysis.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-uint32-analysis.h b/deps/v8/src/crankshaft/hydrogen-uint32-analysis.h
index 4d2797fa3a..0d959b5953 100644
--- a/deps/v8/src/hydrogen-uint32-analysis.h
+++ b/deps/v8/src/crankshaft/hydrogen-uint32-analysis.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_UINT32_ANALYSIS_H_
-#define V8_HYDROGEN_UINT32_ANALYSIS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -31,6 +31,7 @@ class HUint32AnalysisPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_UINT32_ANALYSIS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index 901e10721d..8118aead9a 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -2,63 +2,63 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
#include <sstream>
#include "src/allocation-site-scopes.h"
#include "src/ast-numbering.h"
#include "src/code-factory.h"
+#include "src/crankshaft/hydrogen-bce.h"
+#include "src/crankshaft/hydrogen-bch.h"
+#include "src/crankshaft/hydrogen-canonicalize.h"
+#include "src/crankshaft/hydrogen-check-elimination.h"
+#include "src/crankshaft/hydrogen-dce.h"
+#include "src/crankshaft/hydrogen-dehoist.h"
+#include "src/crankshaft/hydrogen-environment-liveness.h"
+#include "src/crankshaft/hydrogen-escape-analysis.h"
+#include "src/crankshaft/hydrogen-gvn.h"
+#include "src/crankshaft/hydrogen-infer-representation.h"
+#include "src/crankshaft/hydrogen-infer-types.h"
+#include "src/crankshaft/hydrogen-load-elimination.h"
+#include "src/crankshaft/hydrogen-mark-deoptimize.h"
+#include "src/crankshaft/hydrogen-mark-unreachable.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-range-analysis.h"
+#include "src/crankshaft/hydrogen-redundant-phi.h"
+#include "src/crankshaft/hydrogen-removable-simulates.h"
+#include "src/crankshaft/hydrogen-representation-changes.h"
+#include "src/crankshaft/hydrogen-sce.h"
+#include "src/crankshaft/hydrogen-store-elimination.h"
+#include "src/crankshaft/hydrogen-uint32-analysis.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/crankshaft/typing.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/hydrogen-bce.h"
-#include "src/hydrogen-bch.h"
-#include "src/hydrogen-canonicalize.h"
-#include "src/hydrogen-check-elimination.h"
-#include "src/hydrogen-dce.h"
-#include "src/hydrogen-dehoist.h"
-#include "src/hydrogen-environment-liveness.h"
-#include "src/hydrogen-escape-analysis.h"
-#include "src/hydrogen-gvn.h"
-#include "src/hydrogen-infer-representation.h"
-#include "src/hydrogen-infer-types.h"
-#include "src/hydrogen-load-elimination.h"
-#include "src/hydrogen-mark-deoptimize.h"
-#include "src/hydrogen-mark-unreachable.h"
-#include "src/hydrogen-osr.h"
-#include "src/hydrogen-range-analysis.h"
-#include "src/hydrogen-redundant-phi.h"
-#include "src/hydrogen-removable-simulates.h"
-#include "src/hydrogen-representation-changes.h"
-#include "src/hydrogen-sce.h"
-#include "src/hydrogen-store-elimination.h"
-#include "src/hydrogen-uint32-analysis.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
// GetRootConstructor
#include "src/ic/ic-inl.h"
#include "src/isolate-inl.h"
-#include "src/lithium-allocator.h"
#include "src/parser.h"
#include "src/runtime/runtime.h"
#include "src/scopeinfo.h"
-#include "src/typing.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-codegen-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-codegen-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-codegen-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-codegen-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
@@ -1372,7 +1372,8 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(
HValue* checked_elements = environment()->Top();
// Write zero to ensure that the new element is initialized with some smi.
- Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), kind);
+ Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), nullptr,
+ kind);
}
length_checker.Else();
@@ -1594,18 +1595,11 @@ void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
HValue* instance_type =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
- STATIC_ASSERT(JS_BUILTINS_OBJECT_TYPE == JS_GLOBAL_OBJECT_TYPE + 1);
- HValue* min_global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
- HValue* max_global_type = Add<HConstant>(JS_BUILTINS_OBJECT_TYPE);
+ HValue* global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
IfBuilder if_global_object(this);
- if_global_object.If<HCompareNumericAndBranch>(instance_type,
- max_global_type,
- Token::LTE);
- if_global_object.And();
- if_global_object.If<HCompareNumericAndBranch>(instance_type,
- min_global_type,
- Token::GTE);
+ if_global_object.If<HCompareNumericAndBranch>(instance_type, global_type,
+ Token::EQ);
if_global_object.ThenDeopt(Deoptimizer::kReceiverWasAGlobalObject);
if_global_object.End();
}
@@ -1683,7 +1677,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
LanguageMode language_mode) {
HValue* capacity =
Add<HLoadKeyed>(elements, Add<HConstant>(NameDictionary::kCapacityIndex),
- nullptr, FAST_ELEMENTS);
+ nullptr, nullptr, FAST_ELEMENTS);
HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
mask->ChangeRepresentation(Representation::Integer32());
@@ -1714,7 +1708,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
key_index->ClearFlag(HValue::kCanOverflow);
HValue* candidate_key =
- Add<HLoadKeyed>(elements, key_index, nullptr, FAST_ELEMENTS);
+ Add<HLoadKeyed>(elements, key_index, nullptr, nullptr, FAST_ELEMENTS);
IfBuilder if_undefined(this);
if_undefined.If<HCompareObjectEqAndBranch>(candidate_key,
graph()->GetConstantUndefined());
@@ -1757,7 +1751,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
if_update_with_internalized.Then();
// Replace a key that is a non-internalized string by the equivalent
// internalized string for faster further lookups.
- Add<HStoreKeyed>(elements, key_index, key, FAST_ELEMENTS);
+ Add<HStoreKeyed>(elements, key_index, key, nullptr, FAST_ELEMENTS);
if_update_with_internalized.Else();
if_update_with_internalized.JoinContinuation(&found_key_match_continuation);
@@ -1770,8 +1764,8 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
HValue* details_index =
AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 2));
details_index->ClearFlag(HValue::kCanOverflow);
- HValue* details =
- Add<HLoadKeyed>(elements, details_index, nullptr, FAST_ELEMENTS);
+ HValue* details = Add<HLoadKeyed>(elements, details_index, nullptr, nullptr,
+ FAST_ELEMENTS);
int details_mask = PropertyDetails::TypeField::kMask;
details = AddUncasted<HBitwise>(Token::BIT_AND, details,
Add<HConstant>(details_mask));
@@ -1782,7 +1776,8 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
HValue* result_index =
AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 1));
result_index->ClearFlag(HValue::kCanOverflow);
- Push(Add<HLoadKeyed>(elements, result_index, nullptr, FAST_ELEMENTS));
+ Push(Add<HLoadKeyed>(elements, result_index, nullptr, nullptr,
+ FAST_ELEMENTS));
details_compare.Else();
Add<HPushArguments>(receiver, key);
Push(Add<HCallRuntime>(
@@ -1852,7 +1847,7 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
HValue* index,
HValue* input) {
NoObservableSideEffectsScope scope(this);
- HConstant* max_length = Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ HConstant* max_length = Add<HConstant>(JSArray::kInitialMaxFastElementArray);
Add<HBoundsCheck>(length, max_length);
// Generate size calculation code here in order to make it dominate
@@ -1870,7 +1865,7 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
context(), nullptr,
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HValue* native_context = Add<HLoadNamedField>(
- global_object, nullptr, HObjectAccess::ForGlobalObjectNativeContext());
+ global_object, nullptr, HObjectAccess::ForJSGlobalObjectNativeContext());
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HLoadNamedField>(
@@ -1954,7 +1949,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
// Load the key.
HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
// Check if object == key.
IfBuilder if_objectiskey(this);
@@ -1989,8 +1984,9 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
// Load the key.
HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
- HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ HValue* key =
+ Add<HLoadKeyed>(number_string_cache, key_index, nullptr, nullptr,
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
// Check if the key is a heap number and compare it with the object.
IfBuilder if_keyisnotsmi(this);
@@ -2042,7 +2038,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
// Load the value in case of cache hit.
HValue* key_index = Pop();
HValue* value_index = AddUncasted<HAdd>(key_index, graph()->GetConstant1());
- Push(Add<HLoadKeyed>(number_string_cache, value_index, nullptr,
+ Push(Add<HLoadKeyed>(number_string_cache, value_index, nullptr, nullptr,
FAST_ELEMENTS, ALLOW_RETURN_HOLE));
}
if_found.Else();
@@ -2126,7 +2122,7 @@ HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
// Determine the initial map for the global constructor.
HValue* constructor = Add<HLoadKeyed>(native_context, constructor_index,
- nullptr, FAST_ELEMENTS);
+ nullptr, nullptr, FAST_ELEMENTS);
HValue* constructor_initial_map = Add<HLoadNamedField>(
constructor, nullptr, HObjectAccess::ForPrototypeOrInitialMap());
// Allocate and initialize a JSValue wrapper.
@@ -2411,55 +2407,65 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
- // Allocate the string object. HAllocate does not care whether we pass
- // STRING_TYPE or ONE_BYTE_STRING_TYPE here, so we just use STRING_TYPE.
- HAllocate* result = BuildAllocate(
- size, HType::String(), STRING_TYPE, allocation_mode);
- Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
-
- // Initialize the string fields.
- Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
- Add<HConstant>(String::kEmptyHashField));
- Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
-
- // Copy characters to the result string.
- IfBuilder if_twobyte(this);
- if_twobyte.If<HCompareObjectEqAndBranch>(map, string_map);
- if_twobyte.Then();
+ IfBuilder if_size(this);
+ if_size.If<HCompareNumericAndBranch>(
+ size, Add<HConstant>(Page::kMaxRegularHeapObjectSize), Token::LT);
+ if_size.Then();
{
- // Copy characters from the left string.
- BuildCopySeqStringChars(
- left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
- result, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
- left_length);
-
- // Copy characters from the right string.
- BuildCopySeqStringChars(
- right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
- result, left_length, String::TWO_BYTE_ENCODING,
- right_length);
- }
- if_twobyte.Else();
- {
- // Copy characters from the left string.
- BuildCopySeqStringChars(
- left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
- result, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
- left_length);
-
- // Copy characters from the right string.
- BuildCopySeqStringChars(
- right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
- result, left_length, String::ONE_BYTE_ENCODING,
- right_length);
- }
- if_twobyte.End();
+ // Allocate the string object. HAllocate does not care whether we pass
+ // STRING_TYPE or ONE_BYTE_STRING_TYPE here, so we just use STRING_TYPE.
+ HAllocate* result =
+ BuildAllocate(size, HType::String(), STRING_TYPE, allocation_mode);
+ Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
+
+ // Initialize the string fields.
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
+ Add<HConstant>(String::kEmptyHashField));
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
+
+ // Copy characters to the result string.
+ IfBuilder if_twobyte(this);
+ if_twobyte.If<HCompareObjectEqAndBranch>(map, string_map);
+ if_twobyte.Then();
+ {
+ // Copy characters from the left string.
+ BuildCopySeqStringChars(
+ left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result,
+ graph()->GetConstant0(), String::TWO_BYTE_ENCODING, left_length);
+
+ // Copy characters from the right string.
+ BuildCopySeqStringChars(
+ right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result,
+ left_length, String::TWO_BYTE_ENCODING, right_length);
+ }
+ if_twobyte.Else();
+ {
+ // Copy characters from the left string.
+ BuildCopySeqStringChars(
+ left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result,
+ graph()->GetConstant0(), String::ONE_BYTE_ENCODING, left_length);
+
+ // Copy characters from the right string.
+ BuildCopySeqStringChars(
+ right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result,
+ left_length, String::ONE_BYTE_ENCODING, right_length);
+ }
+ if_twobyte.End();
- // Count the native string addition.
- AddIncrementCounter(isolate()->counters()->string_add_native());
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
- // Return the sequential string.
- Push(result);
+ // Return the sequential string.
+ Push(result);
+ }
+ if_size.Else();
+ {
+ // Fallback to the runtime to add the two strings. The string has to be
+ // allocated in LO space.
+ Add<HPushArguments>(left, right);
+ Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kStringAdd), 2));
+ }
+ if_size.End();
}
if_sameencodingandsequential.Else();
{
@@ -2587,7 +2593,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
HInstruction* result = AddElementAccess(
- backing_store, key, val, bounds_check, elements_kind, access_type);
+ backing_store, key, val, bounds_check, checked_object->ActualValue(),
+ elements_kind, access_type);
negative_checker.ElseDeopt(Deoptimizer::kNegativeKeyEncountered);
negative_checker.End();
length_checker.End();
@@ -2595,9 +2602,9 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
} else {
DCHECK(store_mode == STANDARD_STORE);
checked_key = Add<HBoundsCheck>(key, length);
- return AddElementAccess(
- backing_store, checked_key, val,
- checked_object, elements_kind, access_type);
+ return AddElementAccess(backing_store, checked_key, val, checked_object,
+ checked_object->ActualValue(), elements_kind,
+ access_type);
}
}
DCHECK(fast_smi_only_elements ||
@@ -2636,7 +2643,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
}
}
- return AddElementAccess(elements, checked_key, val, checked_object,
+ return AddElementAccess(elements, checked_key, val, checked_object, nullptr,
elements_kind, access_type, load_mode);
}
@@ -2658,7 +2665,7 @@ HValue* HGraphBuilder::BuildAllocateArrayFromLength(
HValue* constant_zero = graph()->GetConstant0();
HConstant* max_alloc_length =
- Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ Add<HConstant>(JSArray::kInitialMaxFastElementArray);
HInstruction* checked_length = Add<HBoundsCheck>(length_argument,
max_alloc_length);
IfBuilder if_builder(this);
@@ -2799,26 +2806,23 @@ void HGraphBuilder::BuildJSArrayHeader(HValue* array,
HInstruction* HGraphBuilder::AddElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- PropertyAccessType access_type,
- LoadKeyedHoleMode load_mode) {
+ HValue* elements, HValue* checked_key, HValue* val, HValue* dependency,
+ HValue* backing_store_owner, ElementsKind elements_kind,
+ PropertyAccessType access_type, LoadKeyedHoleMode load_mode) {
if (access_type == STORE) {
DCHECK(val != NULL);
if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
val = Add<HClampToUint8>(val);
}
- return Add<HStoreKeyed>(elements, checked_key, val, elements_kind,
- STORE_TO_INITIALIZED_ENTRY);
+ return Add<HStoreKeyed>(elements, checked_key, val, backing_store_owner,
+ elements_kind, STORE_TO_INITIALIZED_ENTRY);
}
DCHECK(access_type == LOAD);
DCHECK(val == NULL);
- HLoadKeyed* load = Add<HLoadKeyed>(
- elements, checked_key, dependency, elements_kind, load_mode);
+ HLoadKeyed* load =
+ Add<HLoadKeyed>(elements, checked_key, dependency, backing_store_owner,
+ elements_kind, load_mode);
if (elements_kind == UINT32_ELEMENTS) {
graph()->RecordUint32Instruction(load);
}
@@ -2919,7 +2923,7 @@ void HGraphBuilder::BuildFillElementsWithValue(HValue* elements,
if (initial_capacity >= 0) {
for (int i = 0; i < initial_capacity; i++) {
HInstruction* key = Add<HConstant>(i);
- Add<HStoreKeyed>(elements, key, value, elements_kind);
+ Add<HStoreKeyed>(elements, key, value, nullptr, elements_kind);
}
} else {
// Carefully loop backwards so that the "from" remains live through the loop
@@ -2933,7 +2937,7 @@ void HGraphBuilder::BuildFillElementsWithValue(HValue* elements,
HValue* adjusted_key = AddUncasted<HSub>(key, graph()->GetConstant1());
adjusted_key->ClearFlag(HValue::kCanOverflow);
- Add<HStoreKeyed>(elements, adjusted_key, value, elements_kind);
+ Add<HStoreKeyed>(elements, adjusted_key, value, nullptr, elements_kind);
builder.EndBody();
}
@@ -2976,9 +2980,10 @@ void HGraphBuilder::BuildCopyProperties(HValue* from_properties,
key = AddUncasted<HSub>(key, graph()->GetConstant1());
key->ClearFlag(HValue::kCanOverflow);
- HValue* element = Add<HLoadKeyed>(from_properties, key, nullptr, kind);
+ HValue* element =
+ Add<HLoadKeyed>(from_properties, key, nullptr, nullptr, kind);
- Add<HStoreKeyed>(to_properties, key, element, kind);
+ Add<HStoreKeyed>(to_properties, key, element, nullptr, kind);
builder.EndBody();
}
@@ -3015,9 +3020,10 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
// Unroll the loop for small elements kinds.
for (int i = 0; i < constant_capacity; i++) {
HValue* key_constant = Add<HConstant>(i);
- HInstruction* value = Add<HLoadKeyed>(from_elements, key_constant,
- nullptr, from_elements_kind);
- Add<HStoreKeyed>(to_elements, key_constant, value, to_elements_kind);
+ HInstruction* value = Add<HLoadKeyed>(
+ from_elements, key_constant, nullptr, nullptr, from_elements_kind);
+ Add<HStoreKeyed>(to_elements, key_constant, value, nullptr,
+ to_elements_kind);
}
} else {
if (!pre_fill_with_holes &&
@@ -3034,7 +3040,7 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
key = AddUncasted<HSub>(key, graph()->GetConstant1());
key->ClearFlag(HValue::kCanOverflow);
- HValue* element = Add<HLoadKeyed>(from_elements, key, nullptr,
+ HValue* element = Add<HLoadKeyed>(from_elements, key, nullptr, nullptr,
from_elements_kind, ALLOW_RETURN_HOLE);
ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
@@ -3049,13 +3055,15 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
? Add<HConstant>(HConstant::kHoleNaN)
: graph()->GetConstantHole();
- Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
+ Add<HStoreKeyed>(to_elements, key, hole_constant, nullptr, kind);
if_hole.Else();
- HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ HStoreKeyed* store =
+ Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
if_hole.End();
} else {
- HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ HStoreKeyed* store =
+ Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
@@ -3128,10 +3136,10 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
// This function implicitly relies on the fact that the
// FastCloneShallowArrayStub is called only for literals shorter than
- // JSObject::kInitialMaxFastElementArray.
+ // JSArray::kInitialMaxFastElementArray.
// Can't add HBoundsCheck here because otherwise the stub will eager a frame.
HConstant* size_upper_bound = EstablishElementsAllocationSize(
- kind, JSObject::kInitialMaxFastElementArray);
+ kind, JSArray::kInitialMaxFastElementArray);
elements->set_size_upper_bound(size_upper_bound);
Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
@@ -3263,7 +3271,7 @@ HInstruction* HGraphBuilder::BuildGetNativeContext() {
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
return Add<HLoadNamedField>(global_object, nullptr,
HObjectAccess::ForObservableJSObjectOffset(
- GlobalObject::kNativeContextOffset));
+ JSGlobalObject::kNativeContextOffset));
}
@@ -3275,7 +3283,7 @@ HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
context, nullptr,
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
- GlobalObject::kNativeContextOffset);
+ JSGlobalObject::kNativeContextOffset);
return Add<HLoadNamedField>(global_object, nullptr, access);
}
@@ -3342,7 +3350,8 @@ HInstruction* HGraphBuilder::BuildGetArrayFunction() {
HInstruction* native_context = BuildGetNativeContext();
HInstruction* index =
Add<HConstant>(static_cast<int32_t>(Context::ARRAY_FUNCTION_INDEX));
- return Add<HLoadKeyed>(native_context, index, nullptr, FAST_ELEMENTS);
+ return Add<HLoadKeyed>(native_context, index, nullptr, nullptr,
+ FAST_ELEMENTS);
}
@@ -3430,12 +3439,12 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
HInstruction* index = builder()->Add<HConstant>(
static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
- HInstruction* map_array =
- builder()->Add<HLoadKeyed>(native_context, index, nullptr, FAST_ELEMENTS);
+ HInstruction* map_array = builder()->Add<HLoadKeyed>(
+ native_context, index, nullptr, nullptr, FAST_ELEMENTS);
HInstruction* kind_index = builder()->Add<HConstant>(kind_);
- return builder()->Add<HLoadKeyed>(map_array, kind_index, nullptr,
+ return builder()->Add<HLoadKeyed>(map_array, kind_index, nullptr, nullptr,
FAST_ELEMENTS);
}
@@ -3504,6 +3513,11 @@ HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* elements_size =
builder()->BuildCalculateElementsSize(kind_, capacity);
+ // Bail out for large objects.
+ HValue* max_regular_heap_object_size =
+ builder()->Add<HConstant>(Page::kMaxRegularHeapObjectSize);
+ builder()->Add<HBoundsCheck>(elements_size, max_regular_heap_object_size);
+
// Allocate (dealing with failure appropriately)
HAllocate* array_object = builder()->AllocateJSArrayObject(mode_);
@@ -3546,7 +3560,7 @@ HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
context(), nullptr,
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
- GlobalObject::kNativeContextOffset);
+ JSGlobalObject::kNativeContextOffset);
HValue* native_context = Add<HLoadNamedField>(global_object, nullptr, access);
HObjectAccess function_access = HObjectAccess::ForContextSlot(context_index);
return Add<HLoadNamedField>(native_context, nullptr, function_access);
@@ -3566,7 +3580,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_ = &initial_function_state_;
- InitializeAstVisitor(info->isolate(), info->zone());
+ InitializeAstVisitor(info->isolate());
if (top_info()->is_tracking_positions()) {
SetSourcePosition(info->shared_info()->start_position());
}
@@ -3679,7 +3693,7 @@ HGraph::HGraph(CompilationInfo* info)
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
- start_environment_->set_ast_id(BailoutId::Prologue());
+ start_environment_->set_ast_id(BailoutId::FunctionContext());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
}
@@ -4414,7 +4428,7 @@ void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
- TestContext for_test(this, expr, true_block, false_block);
+ TestContext for_control(this, expr, true_block, false_block);
Visit(expr);
}
@@ -5384,7 +5398,7 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
HValue* key =
Add<HLoadKeyed>(environment()->ExpressionStackAt(2), // Enum cache.
- index, index, FAST_ELEMENTS);
+ index, index, nullptr, FAST_ELEMENTS);
if (fast) {
// Check if the expected map still matches that of the enumerable.
@@ -5520,6 +5534,14 @@ void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
}
+void HOptimizedGraphBuilder::VisitDoExpression(DoExpression* expr) {
+ DCHECK(!HasStackOverflow());
+ DCHECK(current_block() != NULL);
+ DCHECK(current_block()->HasPredecessor());
+ return Bailout(kDoExpression);
+}
+
+
void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
@@ -5618,7 +5640,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
}
- Handle<GlobalObject> global(current_info()->global_object());
+ Handle<JSGlobalObject> global(current_info()->global_object());
// Lookup in script contexts.
{
@@ -5696,16 +5718,6 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
instr->SetDependsOnFlag(kGlobalVars);
return ast_context()->ReturnInstruction(instr, expr->id());
}
- } else if (variable->IsGlobalSlot()) {
- DCHECK(variable->index() > 0);
- DCHECK(variable->IsStaticGlobalObjectProperty());
- int slot_index = variable->index();
- int depth = scope()->ContextChainLength(variable->scope());
-
- HLoadGlobalViaContext* instr =
- New<HLoadGlobalViaContext>(depth, slot_index);
- return ast_context()->ReturnInstruction(instr, expr->id());
-
} else {
HValue* global_object = Add<HLoadNamedField>(
context(), nullptr,
@@ -5924,7 +5936,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Map> map = property->GetReceiverType();
Handle<String> name = key->AsPropertyName();
HValue* store;
- FeedbackVectorICSlot slot = property->GetSlot();
+ FeedbackVectorSlot slot = property->GetSlot();
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot, literal,
@@ -5989,7 +6001,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- expr->BuildConstantElements(isolate());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
HInstruction* literal;
@@ -6099,7 +6110,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
case FAST_HOLEY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
- HStoreKeyed* instr = Add<HStoreKeyed>(elements, key, value,
+ HStoreKeyed* instr = Add<HStoreKeyed>(elements, key, value, nullptr,
boilerplate_elements_kind);
instr->SetUninitialized(uninitialized);
break;
@@ -6498,9 +6509,12 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
Handle<Map> HOptimizedGraphBuilder::PropertyAccessInfo::map() {
- JSFunction* ctor = IC::GetRootConstructor(
- *map_, current_info()->closure()->context()->native_context());
- if (ctor != NULL) return handle(ctor->initial_map());
+ Handle<JSFunction> ctor;
+ if (Map::GetConstructorFunction(
+ map_, handle(current_info()->closure()->context()->native_context()))
+ .ToHandle(&ctor)) {
+ return handle(ctor->initial_map());
+ }
return map_;
}
@@ -6581,7 +6595,8 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
if (info->NeedsWrappingFor(info->accessor())) {
HValue* function = Add<HConstant>(info->accessor());
PushArgumentsFromEnvironment(argument_count);
- return New<HCallFunction>(function, argument_count, WRAP_AND_CALL);
+ return New<HCallFunction>(function, argument_count,
+ ConvertReceiverMode::kNotNullOrUndefined);
} else if (FLAG_inline_accessors && can_inline_accessor) {
bool success = info->IsLoad()
? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
@@ -6604,9 +6619,9 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorICSlot slot,
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
- SmallMapList* maps, Handle<String> name) {
+ SmallMapList* maps, Handle<Name> name) {
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
@@ -6774,7 +6789,7 @@ static bool AreStringTypes(SmallMapList* maps) {
void HOptimizedGraphBuilder::BuildStore(Expression* expr, Property* prop,
- FeedbackVectorICSlot slot,
+ FeedbackVectorSlot slot,
BailoutId ast_id, BailoutId return_id,
bool is_uninitialized) {
if (!prop->key()->IsPropertyName()) {
@@ -6834,9 +6849,8 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
// superclass of Assignment and CountOperation, we cannot just pass the
// owning expression instead of position and ast_id separately.
void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
- Variable* var, HValue* value, FeedbackVectorICSlot ic_slot,
- BailoutId ast_id) {
- Handle<GlobalObject> global(current_info()->global_object());
+ Variable* var, HValue* value, FeedbackVectorSlot slot, BailoutId ast_id) {
+ Handle<JSGlobalObject> global(current_info()->global_object());
// Lookup in script contexts.
{
@@ -6923,18 +6937,6 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
- } else if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int slot_index = var->index();
- int depth = scope()->ContextChainLength(var->scope());
-
- HStoreGlobalViaContext* instr = Add<HStoreGlobalViaContext>(
- value, depth, slot_index, function_language_mode());
- USE(instr);
- DCHECK(instr->HasObservableSideEffects());
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
-
} else {
HValue* global_object = Add<HLoadNamedField>(
context(), nullptr,
@@ -6945,7 +6947,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
if (FLAG_vector_stores) {
Handle<TypeFeedbackVector> vector =
handle(current_feedback_vector(), isolate());
- instr->SetVectorAndSlot(vector, ic_slot);
+ instr->SetVectorAndSlot(vector, slot);
}
USE(instr);
DCHECK(instr->HasObservableSideEffects());
@@ -7248,7 +7250,7 @@ HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* string) {
HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorICSlot slot,
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
HValue* object, Handle<Name> name, HValue* value, bool is_uninitialized) {
if (is_uninitialized) {
Add<HDeoptimize>(
@@ -7305,7 +7307,7 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorICSlot slot,
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
HValue* object, HValue* key, HValue* value) {
if (access_type == LOAD) {
InlineCacheState initial_state = expr->AsProperty()->GetInlineCacheState();
@@ -7373,6 +7375,8 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
PrototypeIterator iter(map);
JSObject* holder = NULL;
while (!iter.IsAtEnd()) {
+ // JSProxies can't occur here because we wouldn't have installed a
+ // non-generic IC if there were any.
holder = *PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
}
@@ -7499,7 +7503,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
- Expression* expr, FeedbackVectorICSlot slot, HValue* object, HValue* key,
+ Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
HValue* val, SmallMapList* maps, PropertyAccessType access_type,
KeyedAccessStoreMode store_mode, bool* has_side_effects) {
*has_side_effects = false;
@@ -7632,21 +7636,44 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* obj, HValue* key, HValue* val, Expression* expr,
- FeedbackVectorICSlot slot, BailoutId ast_id, BailoutId return_id,
+ FeedbackVectorSlot slot, BailoutId ast_id, BailoutId return_id,
PropertyAccessType access_type, bool* has_side_effects) {
- if (key->ActualValue()->IsConstant()) {
+ // A keyed name access with type feedback may contain the name.
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ HValue* expected_key = key;
+ if (!key->ActualValue()->IsConstant()) {
+ Name* name = nullptr;
+ if (access_type == LOAD) {
+ KeyedLoadICNexus nexus(vector, slot);
+ name = nexus.FindFirstName();
+ } else if (FLAG_vector_stores) {
+ KeyedStoreICNexus nexus(vector, slot);
+ name = nexus.FindFirstName();
+ }
+ if (name != nullptr) {
+ Handle<Name> handle_name(name);
+ expected_key = Add<HConstant>(handle_name);
+ // We need a check against the key.
+ bool in_new_space = isolate()->heap()->InNewSpace(*handle_name);
+ Unique<Name> unique_name = Unique<Name>::CreateUninitialized(handle_name);
+ Add<HCheckValue>(key, unique_name, in_new_space);
+ }
+ }
+ if (expected_key->ActualValue()->IsConstant()) {
Handle<Object> constant =
- HConstant::cast(key->ActualValue())->handle(isolate());
+ HConstant::cast(expected_key->ActualValue())->handle(isolate());
uint32_t array_index;
- if (constant->IsString() &&
- !Handle<String>::cast(constant)->AsArrayIndex(&array_index)) {
+ if ((constant->IsString() &&
+ !Handle<String>::cast(constant)->AsArrayIndex(&array_index)) ||
+ constant->IsSymbol()) {
if (!constant->IsUniqueName()) {
constant = isolate()->factory()->InternalizeString(
Handle<String>::cast(constant));
}
HValue* access =
BuildNamedAccess(access_type, ast_id, return_id, expr, slot, obj,
- Handle<String>::cast(constant), val, false);
+ Handle<Name>::cast(constant), val, false);
if (access == NULL || access->IsPhi() ||
HInstruction::cast(access)->IsLinked()) {
*has_side_effects = false;
@@ -7817,8 +7844,8 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
HValue* HOptimizedGraphBuilder::BuildNamedAccess(
PropertyAccessType access, BailoutId ast_id, BailoutId return_id,
- Expression* expr, FeedbackVectorICSlot slot, HValue* object,
- Handle<String> name, HValue* value, bool is_uninitialized) {
+ Expression* expr, FeedbackVectorSlot slot, HValue* object,
+ Handle<Name> name, HValue* value, bool is_uninitialized) {
SmallMapList* maps;
ComputeReceiverTypes(expr, object, &maps, zone());
DCHECK(maps != NULL);
@@ -8149,10 +8176,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// use the regular CallFunctionStub for method calls to wrap the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
- HInstruction* call = needs_wrapping
- ? NewUncasted<HCallFunction>(
- function, argument_count, WRAP_AND_CALL)
- : BuildCallConstantFunction(target, argument_count);
+ HInstruction* call =
+ needs_wrapping ? NewUncasted<HCallFunction>(
+ function, argument_count,
+ ConvertReceiverMode::kNotNullOrUndefined)
+ : BuildCallConstantFunction(target, argument_count);
PushArgumentsFromEnvironment(argument_count);
AddInstruction(call);
Drop(1); // Drop the function.
@@ -8181,10 +8209,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
environment()->SetExpressionStackAt(0, receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- CallFunctionFlags flags = receiver->type().IsJSObject()
- ? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
HInstruction* call = New<HCallFunction>(
- function, argument_count, flags);
+ function, argument_count, ConvertReceiverMode::kNotNullOrUndefined);
PushArgumentsFromEnvironment(argument_count);
@@ -8247,7 +8273,7 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (target_shared->force_inline()) {
return 0;
}
- if (target->IsBuiltin()) {
+ if (target->shared()->IsBuiltin()) {
return kNotInlinable;
}
@@ -8265,11 +8291,12 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
}
// Target must be inlineable.
- if (!target_shared->IsInlineable()) {
+ BailoutReason noopt_reason = target_shared->disable_optimization_reason();
+ if (!target_shared->IsInlineable() && noopt_reason != kHydrogenFilter) {
TraceInline(target, caller, "target not inlineable");
return kNotInlinable;
}
- if (target_shared->disable_optimization_reason() != kNoReason) {
+ if (noopt_reason != kNoReason && noopt_reason != kHydrogenFilter) {
TraceInline(target, caller, "target contains unsupported syntax [early]");
return kNotInlinable;
}
@@ -8340,6 +8367,11 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
CompilationInfo target_info(&parse_info);
Handle<SharedFunctionInfo> target_shared(target->shared());
+
+ if (IsClassConstructor(target_shared->kind())) {
+ TraceInline(target, caller, "target is classConstructor");
+ return false;
+ }
if (target_shared->HasDebugInfo()) {
TraceInline(target, caller, "target is being debugged");
return false;
@@ -8392,13 +8424,6 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
}
}
- // Generate the deoptimization data for the unoptimized version of
- // the target function if we don't already have it.
- if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
- TraceInline(target, caller, "could not generate deoptimization info");
- return false;
- }
-
// In strong mode it is an error to call a function with too few arguments.
// In that case do not inline because then the arity check would be skipped.
if (is_strong(function->language_mode()) &&
@@ -8408,6 +8433,17 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
return false;
}
+ // Generate the deoptimization data for the unoptimized version of
+ // the target function if we don't already have it.
+ if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
+ TraceInline(target, caller, "could not generate deoptimization info");
+ return false;
+ }
+ // Remember that we inlined this function. This needs to be called right
+ // after the EnsureDeoptimizationSupport call so that the code flusher
+ // does not remove the code with the deoptimization support.
+ top_info()->AddInlinedFunction(target_info.shared_info());
+
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function (so
// TryInline should always return true).
@@ -8840,16 +8876,16 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
elements_kind, length);
}
reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
- result = AddElementAccess(elements, reduced_length, NULL,
- bounds_check, elements_kind, LOAD);
+ result = AddElementAccess(elements, reduced_length, nullptr,
+ bounds_check, nullptr, elements_kind, LOAD);
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
? graph()->GetConstantHole()
: Add<HConstant>(HConstant::kHoleNaN);
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
}
- AddElementAccess(
- elements, reduced_length, hole, bounds_check, elements_kind, STORE);
+ AddElementAccess(elements, reduced_length, hole, bounds_check, nullptr,
+ elements_kind, STORE);
Add<HStoreNamedField>(
checked_object, HObjectAccess::ForArrayLength(elements_kind),
reduced_length, STORE_TO_INITIALIZED_ENTRY);
@@ -8974,8 +9010,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
{
// Remember the result.
if (!ast_context()->IsEffect()) {
- Push(AddElementAccess(elements, graph()->GetConstant0(), NULL,
- lengthiszero, kind, LOAD));
+ Push(AddElementAccess(elements, graph()->GetConstant0(), nullptr,
+ lengthiszero, nullptr, kind, LOAD));
}
// Compute the new length.
@@ -8992,10 +9028,11 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
key->ClearFlag(HValue::kCanOverflow);
ElementsKind copy_kind =
kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
- HValue* element = AddUncasted<HLoadKeyed>(
- elements, key, lengthiszero, copy_kind, ALLOW_RETURN_HOLE);
- HStoreKeyed* store =
- Add<HStoreKeyed>(elements, new_key, element, copy_kind);
+ HValue* element =
+ AddUncasted<HLoadKeyed>(elements, key, lengthiszero, nullptr,
+ copy_kind, ALLOW_RETURN_HOLE);
+ HStoreKeyed* store = Add<HStoreKeyed>(elements, new_key, element,
+ nullptr, copy_kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
loop.EndBody();
@@ -9005,8 +9042,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
? graph()->GetConstantHole()
: Add<HConstant>(HConstant::kHoleNaN);
if (IsFastSmiOrObjectElementsKind(kind)) kind = FAST_HOLEY_ELEMENTS;
- Add<HStoreKeyed>(
- elements, new_length, hole, kind, INITIALIZING_STORE);
+ Add<HStoreKeyed>(elements, new_length, hole, nullptr, kind,
+ INITIALIZING_STORE);
// Remember new length.
Add<HStoreNamedField>(
@@ -9332,6 +9369,7 @@ bool HOptimizedGraphBuilder::TryIndirectCall(Call* expr) {
}
+// f.apply(...)
void HOptimizedGraphBuilder::BuildFunctionApply(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
CHECK_ALIVE(VisitForValue(args->at(0)));
@@ -9467,8 +9505,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr, kind,
- ALLOW_RETURN_HOLE);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
if_issame.If<HCompareNumericAndBranch>(element, search_element,
Token::EQ_STRICT);
@@ -9489,8 +9527,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr,
- kind, ALLOW_RETURN_HOLE);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
if_issame.If<HIsStringAndBranch>(element);
if_issame.AndIf<HStringCompareAndBranch>(
@@ -9519,8 +9557,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr,
- kind, ALLOW_RETURN_HOLE);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
IfBuilder if_element_isnumber(this);
if_element_isnumber.If<HIsSmiAndBranch>(element);
@@ -9551,8 +9589,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr,
- kind, ALLOW_RETURN_HOLE);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
if_issame.If<HCompareObjectEqAndBranch>(
element, search_element);
@@ -9682,8 +9720,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
- call = New<HCallFunction>(
- function, argument_count, WRAP_AND_CALL);
+ call = New<HCallFunction>(function, argument_count,
+ ConvertReceiverMode::kNotNullOrUndefined);
} else if (TryInlineCall(expr)) {
return;
} else {
@@ -9706,9 +9744,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
Push(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments(), arguments_flag));
- CallFunctionFlags flags = receiver->type().IsJSObject()
- ? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
- call = New<HCallFunction>(function, argument_count, flags);
+ call = New<HCallFunction>(function, argument_count,
+ ConvertReceiverMode::kNotNullOrUndefined);
}
PushArgumentsFromEnvironment(argument_count);
@@ -9733,7 +9770,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- if (expr->IsMonomorphic()) {
+ if (expr->IsMonomorphic() &&
+ !IsClassConstructor(expr->target()->shared()->kind())) {
Add<HCheckValue>(function, expr->target());
// Patch the global object on the stack by the expected receiver.
@@ -9757,8 +9795,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
call = BuildCallConstantFunction(expr->target(), argument_count);
} else {
PushArgumentsFromEnvironment(argument_count);
- HCallFunction* call_function =
- New<HCallFunction>(function, argument_count);
+ HCallFunction* call_function = New<HCallFunction>(
+ function, argument_count, ConvertReceiverMode::kNullOrUndefined);
call = call_function;
if (expr->is_uninitialized() &&
expr->IsUsingCallFeedbackICSlot(isolate())) {
@@ -9766,7 +9804,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// through the type vector.
Handle<TypeFeedbackVector> vector =
handle(current_feedback_vector(), isolate());
- FeedbackVectorICSlot slot = expr->CallFeedbackICSlot();
+ FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
call_function->SetVectorAndSlot(vector, slot);
}
}
@@ -9826,6 +9864,7 @@ void HOptimizedGraphBuilder::BuildInlinedCallArray(
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
+ !IsClassConstructor(constructor->shared()->kind()) &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
constructor->initial_map()->instance_size() <
HAllocate::kMaxInlineSize;
@@ -10009,7 +10048,7 @@ HValue* HGraphBuilder::BuildAllocateEmptyArrayBuffer(HValue* byte_length) {
context(), nullptr,
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HValue* native_context = Add<HLoadNamedField>(
- global_object, nullptr, HObjectAccess::ForGlobalObjectNativeContext());
+ global_object, nullptr, HObjectAccess::ForJSGlobalObjectNativeContext());
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HLoadNamedField>(
@@ -10101,25 +10140,6 @@ void HOptimizedGraphBuilder::GenerateDataViewInitialize(
}
-static Handle<Map> TypedArrayMap(Isolate* isolate,
- ExternalArrayType array_type,
- ElementsKind target_kind) {
- Handle<Context> native_context = isolate->native_context();
- Handle<JSFunction> fun;
- switch (array_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- fun = Handle<JSFunction>(native_context->type##_array_fun()); \
- break;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- }
- Handle<Map> map(fun->initial_map());
- return Map::AsElementsKind(map, target_kind);
-}
-
-
HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
ExternalArrayType array_type,
bool is_zero_byte_offset,
@@ -10222,7 +10242,7 @@ HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
HValue* key = builder.BeginBody(
Add<HConstant>(static_cast<int32_t>(0)),
length, Token::LT);
- Add<HStoreKeyed>(backing_store, key, filler, fixed_elements_kind);
+ Add<HStoreKeyed>(backing_store, key, filler, elements, fixed_elements_kind);
builder.EndBody();
}
@@ -10333,9 +10353,6 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
if (buffer != NULL) {
elements = BuildAllocateExternalElements(
array_type, is_zero_byte_offset, buffer, byte_offset, length);
- Handle<Map> obj_map =
- TypedArrayMap(isolate(), array_type, fixed_elements_kind);
- AddStoreMapConstant(obj, obj_map);
} else {
DCHECK(is_zero_byte_offset);
elements = BuildAllocateFixedTypedArray(array_type, element_size,
@@ -10623,9 +10640,8 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
void HOptimizedGraphBuilder::BuildStoreForEffect(
- Expression* expr, Property* prop, FeedbackVectorICSlot slot,
- BailoutId ast_id, BailoutId return_id, HValue* object, HValue* key,
- HValue* value) {
+ Expression* expr, Property* prop, FeedbackVectorSlot slot, BailoutId ast_id,
+ BailoutId return_id, HValue* object, HValue* key, HValue* value) {
EffectContext for_effect(this);
Push(object);
if (key != NULL) Push(key);
@@ -11963,10 +11979,11 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
int elements_length = elements->length();
for (int i = 0; i < elements_length; i++) {
HValue* key_constant = Add<HConstant>(i);
- HInstruction* value_instruction = Add<HLoadKeyed>(
- boilerplate_elements, key_constant, nullptr, kind, ALLOW_RETURN_HOLE);
+ HInstruction* value_instruction =
+ Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr, nullptr,
+ kind, ALLOW_RETURN_HOLE);
HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
- value_instruction, kind);
+ value_instruction, nullptr, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
}
@@ -11989,15 +12006,15 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray(
HInstruction* result =
BuildFastLiteral(value_object, site_context);
site_context->ExitScope(current_site, value_object);
- Add<HStoreKeyed>(object_elements, key_constant, result, kind);
+ Add<HStoreKeyed>(object_elements, key_constant, result, nullptr, kind);
} else {
ElementsKind copy_kind =
kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
HInstruction* value_instruction =
- Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr,
+ Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr, nullptr,
copy_kind, ALLOW_RETURN_HOLE);
Add<HStoreKeyed>(object_elements, key_constant, value_instruction,
- copy_kind);
+ nullptr, copy_kind);
}
}
}
@@ -12272,6 +12289,38 @@ void HOptimizedGraphBuilder::GenerateToString(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateToLength(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ Callable callable = CodeFactory::ToLength(isolate());
+ HValue* input = Pop();
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), input};
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateToNumber(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ Callable callable = CodeFactory::ToNumber(isolate());
+ HValue* input = Pop();
+ if (input->type().IsTaggedNumber()) {
+ return ast_context()->ReturnValue(input);
+ } else {
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), input};
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
+ }
+}
+
+
void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12568,18 +12617,6 @@ void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
}
-// Fast support for StringAdd.
-void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
- DCHECK_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* right = Pop();
- HValue* left = Pop();
- HInstruction* result = NewUncasted<HStringAdd>(left, right);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
// Fast support for SubString.
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
DCHECK_EQ(3, call->arguments()->length());
@@ -12609,6 +12646,26 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateRegExpFlags(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ HValue* regexp = Pop();
+ HInstruction* result =
+ New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpFlags());
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateRegExpSource(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ HValue* regexp = Pop();
+ HInstruction* result =
+ New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpSource());
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) {
DCHECK_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12679,52 +12736,6 @@ void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
}
-// Fast call for custom callbacks.
-void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
- // 1 ~ The function to call is not itself an argument to the call.
- int arg_count = call->arguments()->length() - 1;
- DCHECK(arg_count >= 1); // There's always at least a receiver.
-
- CHECK_ALIVE(VisitExpressions(call->arguments()));
- // The function is the last argument
- HValue* function = Pop();
- // Push the arguments to the stack
- PushArgumentsFromEnvironment(arg_count);
-
- IfBuilder if_is_jsfunction(this);
- if_is_jsfunction.If<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
-
- if_is_jsfunction.Then();
- {
- HInstruction* invoke_result =
- Add<HInvokeFunction>(function, arg_count);
- if (!ast_context()->IsEffect()) {
- Push(invoke_result);
- }
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
-
- if_is_jsfunction.Else();
- {
- HInstruction* call_result =
- Add<HCallFunction>(function, arg_count);
- if (!ast_context()->IsEffect()) {
- Push(call_result);
- }
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_is_jsfunction.End();
-
- if (ast_context()->IsEffect()) {
- // EffectContext::ReturnValue ignores the value, so we can just pass
- // 'undefined' (as we do not have the call result anymore).
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
- } else {
- return ast_context()->ReturnValue(Pop());
- }
-}
-
-
// Fast call to math functions.
void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
DCHECK_EQ(2, call->arguments()->length());
@@ -12773,17 +12784,6 @@ void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateLikely(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- Visit(call->arguments()->at(0));
-}
-
-
-void HOptimizedGraphBuilder::GenerateUnlikely(CallRuntime* call) {
- return GenerateLikely(call);
-}
-
-
void HOptimizedGraphBuilder::GenerateHasInPrototypeChain(CallRuntime* call) {
DCHECK_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12803,7 +12803,7 @@ void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
HValue* index = Pop();
HValue* object = Pop();
HInstruction* result = New<HLoadKeyed>(
- object, index, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+ object, index, nullptr, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12817,7 +12817,7 @@ void HOptimizedGraphBuilder::GenerateFixedArraySet(CallRuntime* call) {
HValue* index = Pop();
HValue* object = Pop();
NoObservableSideEffectsScope no_effects(this);
- Add<HStoreKeyed>(object, index, value, FAST_HOLEY_ELEMENTS);
+ Add<HStoreKeyed>(object, index, value, nullptr, FAST_HOLEY_ELEMENTS);
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
@@ -13510,10 +13510,10 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type,
int assigned_reg = op->index();
if (op->IsDoubleRegister()) {
trace_.Add(" \"%s\"",
- DoubleRegister::AllocationIndexToString(assigned_reg));
+ DoubleRegister::from_code(assigned_reg).ToString());
} else {
DCHECK(op->IsRegister());
- trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
+ trace_.Add(" \"%s\"", Register::from_code(assigned_reg).ToString());
}
} else if (range->IsSpilled()) {
LOperand* op = range->TopLevel()->GetSpillOperand();
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index c1215a33ba..62e55c3efe 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_H_
-#define V8_HYDROGEN_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_H_
+#define V8_CRANKSHAFT_HYDROGEN_H_
#include "src/accessors.h"
#include "src/allocation.h"
#include "src/ast.h"
#include "src/bailout-reason.h"
#include "src/compiler.h"
-#include "src/hydrogen-instructions.h"
+#include "src/crankshaft/hydrogen-instructions.h"
#include "src/scopes.h"
#include "src/zone.h"
@@ -788,15 +788,13 @@ class EffectContext final : public AstContext {
explicit EffectContext(HOptimizedGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
- virtual ~EffectContext();
+ ~EffectContext() override;
void ReturnValue(HValue* value) override;
- virtual void ReturnInstruction(HInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) override;
+ void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
+ void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
+ void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) override;
};
@@ -805,15 +803,13 @@ class ValueContext final : public AstContext {
ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
- virtual ~ValueContext();
+ ~ValueContext() override;
void ReturnValue(HValue* value) override;
- virtual void ReturnInstruction(HInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) override;
+ void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
+ void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
+ void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) override;
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -835,12 +831,10 @@ class TestContext final : public AstContext {
}
void ReturnValue(HValue* value) override;
- virtual void ReturnInstruction(HInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) override;
+ void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
+ void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
+ void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) override;
static TestContext* cast(AstContext* context) {
DCHECK(context->IsTest());
@@ -1409,11 +1403,8 @@ class HGraphBuilder {
KeyedAccessStoreMode store_mode);
HInstruction* AddElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
+ HValue* elements, HValue* checked_key, HValue* val, HValue* dependency,
+ HValue* backing_store_owner, ElementsKind elements_kind,
PropertyAccessType access_type,
LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
@@ -2210,7 +2201,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(IsJSProxy) \
F(IsConstructCall) \
F(Call) \
- F(CallFunction) \
F(ArgumentsLength) \
F(Arguments) \
F(ValueOf) \
@@ -2226,6 +2216,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(ToInteger) \
F(ToObject) \
F(ToString) \
+ F(ToLength) \
+ F(ToNumber) \
F(IsFunction) \
F(IsSpecObject) \
F(MathPow) \
@@ -2235,14 +2227,13 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(FastOneByteArrayJoin) \
F(DebugBreakInOptimizedCode) \
F(StringCharCodeAt) \
- F(StringAdd) \
F(SubString) \
F(RegExpExec) \
F(RegExpConstructResult) \
+ F(RegExpFlags) \
+ F(RegExpSource) \
F(NumberToString) \
F(DebugIsActive) \
- F(Likely) \
- F(Unlikely) \
F(HasInPrototypeChain) \
/* Typed Arrays */ \
F(TypedArrayInitialize) \
@@ -2390,7 +2381,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void SetUpScope(Scope* scope);
void VisitStatements(ZoneList<Statement*>* statements) override;
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
@@ -2483,15 +2474,15 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
const char* failure_reason);
void HandleGlobalVariableAssignment(Variable* var, HValue* value,
- FeedbackVectorICSlot ic_slot,
+ FeedbackVectorSlot slot,
BailoutId ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
void HandlePolymorphicNamedFieldAccess(
- PropertyAccessType access_type, Expression* expr,
- FeedbackVectorICSlot slot, BailoutId ast_id, BailoutId return_id,
- HValue* object, HValue* value, SmallMapList* types, Handle<String> name);
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+ BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
+ SmallMapList* types, Handle<Name> name);
HValue* BuildAllocateExternalElements(
ExternalArrayType array_type,
@@ -2740,8 +2731,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* BuildNamedAccess(PropertyAccessType access, BailoutId ast_id,
BailoutId reutrn_id, Expression* expr,
- FeedbackVectorICSlot slot, HValue* object,
- Handle<String> name, HValue* value,
+ FeedbackVectorSlot slot, HValue* object,
+ Handle<Name> name, HValue* value,
bool is_uninitialized = false);
void HandlePolymorphicCallNamed(Call* expr,
@@ -2777,7 +2768,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
- Expression* expr, FeedbackVectorICSlot slot,
+ Expression* expr, FeedbackVectorSlot slot,
HValue* object, HValue* key, HValue* value);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
@@ -2796,18 +2787,18 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
KeyedAccessStoreMode store_mode);
HValue* HandlePolymorphicElementAccess(
- Expression* expr, FeedbackVectorICSlot slot, HValue* object, HValue* key,
+ Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
HValue* val, SmallMapList* maps, PropertyAccessType access_type,
KeyedAccessStoreMode store_mode, bool* has_side_effects);
HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
- Expression* expr, FeedbackVectorICSlot slot,
+ Expression* expr, FeedbackVectorSlot slot,
BailoutId ast_id, BailoutId return_id,
PropertyAccessType access_type,
bool* has_side_effects);
HInstruction* BuildNamedGeneric(PropertyAccessType access, Expression* expr,
- FeedbackVectorICSlot slot, HValue* object,
+ FeedbackVectorSlot slot, HValue* object,
Handle<Name> name, HValue* value,
bool is_uninitialized = false);
@@ -2820,12 +2811,12 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* key);
void BuildStoreForEffect(Expression* expression, Property* prop,
- FeedbackVectorICSlot slot, BailoutId ast_id,
+ FeedbackVectorSlot slot, BailoutId ast_id,
BailoutId return_id, HValue* object, HValue* key,
HValue* value);
void BuildStore(Expression* expression, Property* prop,
- FeedbackVectorICSlot slot, BailoutId ast_id,
+ FeedbackVectorSlot slot, BailoutId ast_id,
BailoutId return_id, bool is_uninitialized = false);
HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
@@ -3073,6 +3064,7 @@ class NoObservableSideEffectsScope final {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_H_
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 850c182144..7b05078a9b 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -4,14 +4,15 @@
#if V8_TARGET_ARCH_IA32
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/deoptimizer.h"
-#include "src/hydrogen-osr.h"
#include "src/ia32/frames-ia32.h"
-#include "src/ia32/lithium-codegen-ia32.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
@@ -101,7 +102,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ movsd(MemOperand(esp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ XMMRegister::from_code(save_iterator.Current()));
save_iterator.Advance();
count++;
}
@@ -116,8 +117,8 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(esp, count * kDoubleSize));
+ __ movsd(XMMRegister::from_code(save_iterator.Current()),
+ MemOperand(esp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
@@ -137,26 +138,6 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(ecx, Operand(esp, receiver_offset));
-
- __ cmp(ecx, isolate()->factory()->undefined_value());
- __ j(not_equal, &ok, Label::kNear);
-
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
- __ mov(Operand(esp, receiver_offset), ecx);
-
- __ bind(&ok);
- }
-
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
__ Move(edx, Immediate(kNoAlignmentPadding));
@@ -192,7 +173,6 @@ bool LCodeGen::GeneratePrologue() {
} else {
__ Prologue(info()->IsCodePreAgingActive());
}
- info()->AddNoFrameRange(0, masm_->pc_offset());
}
if (info()->IsOptimizing() &&
@@ -515,13 +495,13 @@ bool LCodeGen::GenerateSafepointTable() {
}
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
}
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
+XMMRegister LCodeGen::ToDoubleRegister(int code) const {
+ return XMMRegister::from_code(code);
}
@@ -2701,11 +2681,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
- no_frame_start = masm_->pc_offset();
}
if (dynamic_frame_alignment_) {
Label no_padding;
@@ -2717,9 +2695,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
EmitReturn(instr, false);
- if (no_frame_start != -1) {
- info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2734,7 +2709,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
}
@@ -2748,7 +2723,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
}
@@ -2769,24 +2744,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3233,7 +3190,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ mov(receiver, Operand(receiver, global_offset));
- const int proxy_offset = GlobalObject::kGlobalProxyOffset;
+ const int proxy_offset = JSGlobalObject::kGlobalProxyOffset;
__ mov(receiver, FieldOperand(receiver, proxy_offset));
__ bind(&receiver_ok);
}
@@ -3797,7 +3754,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -3811,15 +3768,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ mov(vector_register, vector);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ Set(eax, arity);
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4026,30 +3980,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
if (instr->index()->IsConstantOperand()) {
@@ -4465,7 +4395,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5193,11 +5124,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
index a26903a9ac..03f6a89b35 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
-#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
-
-#include "src/ia32/lithium-ia32.h"
+#ifndef V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
+#define V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
#include "src/base/logging.h"
+#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
+#include "src/crankshaft/ia32/lithium-ia32.h"
+#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
-#include "src/ia32/lithium-gap-resolver-ia32.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
@@ -398,6 +397,7 @@ class LDeferredCode : public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
+#endif // V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
index 0926a0f21a..b90f6bb21e 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
@@ -4,8 +4,9 @@
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/ia32/lithium-gap-resolver-ia32.h"
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
+#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
+#include "src/register-configuration.h"
namespace v8 {
namespace internal {
@@ -165,10 +166,14 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
- int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
- return Register::FromAllocationIndex(i);
+ int skip_index = reg.is(no_reg) ? -1 : reg.code();
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
+ code != skip_index) {
+ return Register::from_code(code);
}
}
return no_reg;
@@ -178,10 +183,12 @@ Register LGapResolver::GetFreeRegisterNot(Register reg) {
bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
-
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] != 0) return false;
- if (destination_uses_[i] != 0) return false;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] != 0) return false;
+ if (destination_uses_[code] != 0) return false;
}
return true;
}
@@ -204,7 +211,7 @@ void LGapResolver::Verify() {
void LGapResolver::Finish() {
if (spilled_register_ >= 0) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
+ __ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
moves_.Rewind(0);
@@ -213,7 +220,7 @@ void LGapResolver::Finish() {
void LGapResolver::EnsureRestored(LOperand* operand) {
if (operand->IsRegister() && operand->index() == spilled_register_) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
+ __ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
}
@@ -222,7 +229,7 @@ void LGapResolver::EnsureRestored(LOperand* operand) {
Register LGapResolver::EnsureTempRegister() {
// 1. We may have already spilled to create a temp register.
if (spilled_register_ >= 0) {
- return Register::FromAllocationIndex(spilled_register_);
+ return Register::from_code(spilled_register_);
}
// 2. We may have a free register that we can use without spilling.
@@ -231,19 +238,22 @@ Register LGapResolver::EnsureTempRegister() {
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
- Register scratch = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
+ Register scratch = Register::from_code(code);
__ push(scratch);
- spilled_register_ = i;
+ spilled_register_ = code;
return scratch;
}
}
// 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
- Register scratch = Register::FromAllocationIndex(0);
+ spilled_register_ = config->GetAllocatableGeneralCode(0);
+ Register scratch = Register::from_code(spilled_register_);
__ push(scratch);
- spilled_register_ = 0;
return scratch;
}
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h
index d36e78b5f6..687087feb3 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+#ifndef V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+#define V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -72,14 +72,15 @@ class LGapResolver final BASE_EMBEDDED {
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kMaxNumAllocatableRegisters];
- int destination_uses_[Register::kMaxNumAllocatableRegisters];
+ int source_uses_[Register::kNumRegisters];
+ int destination_uses_[DoubleRegister::kMaxNumRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
int spilled_register_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+#endif // V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
index 884067b776..b4186ba573 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ia32/lithium-ia32.h"
+#include "src/crankshaft/ia32/lithium-ia32.h"
#include <sstream>
#if V8_TARGET_ARCH_IA32
-#include "src/hydrogen-osr.h"
-#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/lithium-inl.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
+#include "src/crankshaft/lithium-inl.h"
namespace v8 {
namespace internal {
@@ -372,11 +372,6 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -395,12 +390,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -482,14 +471,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -2137,15 +2125,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2224,7 +2203,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2232,7 +2211,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
(IsDoubleOrFloatElementsKind(instr->elements_kind()))));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2297,7 +2278,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = NULL;
val = UseRegisterAtStart(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
} else {
DCHECK(instr->value()->representation().IsSmiOrTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2312,7 +2293,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseRegisterOrConstantAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
}
- return new(zone()) LStoreKeyed(obj, key, val);
+ return new (zone()) LStoreKeyed(obj, key, val, nullptr);
}
}
@@ -2325,13 +2306,14 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
DCHECK(instr->elements()->representation().IsExternal());
LOperand* backing_store = UseRegister(instr->elements());
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
LOperand* val = GetStoreKeyedValueOperand(instr);
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2478,19 +2460,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
index 9e4b885c48..08c051ad52 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_LITHIUM_IA32_H_
-#define V8_IA32_LITHIUM_IA32_H_
+#ifndef V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
+#define V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -103,7 +103,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -145,7 +144,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1608,14 +1606,16 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1687,22 +1687,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2195,34 +2179,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
+ LOperand* backing_store_owner) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2231,6 +2195,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2885,6 +2850,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_IA32_LITHIUM_IA32_H_
+#endif // V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/crankshaft/lithium-allocator-inl.h
index 98923ae3aa..22611b5efb 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/crankshaft/lithium-allocator-inl.h
@@ -2,27 +2,27 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_ALLOCATOR_INL_H_
-#define V8_LITHIUM_ALLOCATOR_INL_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
+#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/lithium-allocator.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
#error "Unknown architecture."
#endif
@@ -54,6 +54,7 @@ void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_ALLOCATOR_INL_H_
+#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/crankshaft/lithium-allocator.cc
index 36a12e75b3..5d05292642 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/crankshaft/lithium-allocator.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/lithium-allocator.h"
-#include "src/hydrogen.h"
-#include "src/lithium-inl.h"
-#include "src/lithium-allocator-inl.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/lithium-allocator-inl.h"
+#include "src/register-configuration.h"
#include "src/string-stream.h"
namespace v8 {
@@ -585,7 +586,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
int LAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - Register::kMaxNumAllocatableRegisters;
+ return -index - 1 - Register::kNumRegisters;
}
@@ -617,7 +618,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- DCHECK(index < Register::kMaxNumAllocatableRegisters);
+ DCHECK(index < Register::kNumRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
@@ -631,7 +632,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < DoubleRegister::NumAllocatableRegisters());
+ DCHECK(index < DoubleRegister::kMaxNumRegisters);
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
@@ -939,25 +940,27 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
if (instr->ClobbersRegisters()) {
- for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
- if (output == NULL || !output->IsRegister() ||
- output->index() != i) {
- LiveRange* range = FixedLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd(),
- zone());
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if (Register::from_code(i).IsAllocatable()) {
+ if (output == NULL || !output->IsRegister() ||
+ output->index() != i) {
+ LiveRange* range = FixedLiveRangeFor(i);
+ range->AddUseInterval(curr_position,
+ curr_position.InstructionEnd(), zone());
+ }
}
}
}
if (instr->ClobbersDoubleRegisters(isolate())) {
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- if (output == NULL || !output->IsDoubleRegister() ||
- output->index() != i) {
- LiveRange* range = FixedDoubleLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd(),
- zone());
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ if (DoubleRegister::from_code(i).IsAllocatable()) {
+ if (output == NULL || !output->IsDoubleRegister() ||
+ output->index() != i) {
+ LiveRange* range = FixedDoubleLiveRangeFor(i);
+ range->AddUseInterval(curr_position,
+ curr_position.InstructionEnd(), zone());
+ }
}
}
}
@@ -1069,11 +1072,9 @@ bool LAllocator::Allocate(LChunk* chunk) {
DCHECK(chunk_ == NULL);
chunk_ = static_cast<LPlatformChunk*>(chunk);
assigned_registers_ =
- new(chunk->zone()) BitVector(Register::NumAllocatableRegisters(),
- chunk->zone());
- assigned_double_registers_ =
- new(chunk->zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
- chunk->zone());
+ new (chunk->zone()) BitVector(Register::kNumRegisters, chunk->zone());
+ assigned_double_registers_ = new (chunk->zone())
+ BitVector(DoubleRegister::kMaxNumRegisters, chunk->zone());
MeetRegisterConstraints();
if (!AllocationOk()) return false;
ResolvePhis();
@@ -1460,7 +1461,12 @@ void LAllocator::PopulatePointerMaps() {
void LAllocator::AllocateGeneralRegisters() {
LAllocatorPhase phase("L_Allocate general registers", this);
- num_registers_ = Register::NumAllocatableRegisters();
+ num_registers_ =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->num_allocatable_general_registers();
+ allocatable_register_codes_ =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_general_codes();
mode_ = GENERAL_REGISTERS;
AllocateRegisters();
}
@@ -1468,7 +1474,12 @@ void LAllocator::AllocateGeneralRegisters() {
void LAllocator::AllocateDoubleRegisters() {
LAllocatorPhase phase("L_Allocate double registers", this);
- num_registers_ = DoubleRegister::NumAllocatableRegisters();
+ num_registers_ =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->num_allocatable_double_registers();
+ allocatable_register_codes_ =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@@ -1492,7 +1503,7 @@ void LAllocator::AllocateRegisters() {
DCHECK(inactive_live_ranges_.is_empty());
if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+ for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
AddToInactive(current);
@@ -1586,9 +1597,9 @@ void LAllocator::AllocateRegisters() {
const char* LAllocator::RegisterName(int allocation_index) {
if (mode_ == GENERAL_REGISTERS) {
- return Register::AllocationIndexToString(allocation_index);
+ return Register::from_code(allocation_index).ToString();
} else {
- return DoubleRegister::AllocationIndexToString(allocation_index);
+ return DoubleRegister::from_code(allocation_index).ToString();
}
}
@@ -1750,16 +1761,12 @@ void LAllocator::InactiveToActive(LiveRange* range) {
}
-// TryAllocateFreeReg and AllocateBlockedReg assume this
-// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
- Register::kMaxNumAllocatableRegisters);
-
-
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+ DCHECK(DoubleRegister::kMaxNumRegisters >= Register::kNumRegisters);
+
+ LifetimePosition free_until_pos[DoubleRegister::kMaxNumRegisters];
- for (int i = 0; i < num_registers_; i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1800,10 +1807,11 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
}
// Find the register which stays free for the longest time.
- int reg = 0;
+ int reg = allocatable_register_codes_[0];
for (int i = 1; i < RegisterCount(); ++i) {
- if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
- reg = i;
+ int code = allocatable_register_codes_[i];
+ if (free_until_pos[code].Value() > free_until_pos[reg].Value()) {
+ reg = code;
}
}
@@ -1845,10 +1853,10 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
- LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+ LifetimePosition use_pos[DoubleRegister::kMaxNumRegisters];
+ LifetimePosition block_pos[DoubleRegister::kMaxNumRegisters];
- for (int i = 0; i < num_registers_; i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1883,10 +1891,11 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- int reg = 0;
+ int reg = allocatable_register_codes_[0];
for (int i = 1; i < RegisterCount(); ++i) {
- if (use_pos[i].Value() > use_pos[reg].Value()) {
- reg = i;
+ int code = allocatable_register_codes_[i];
+ if (use_pos[code].Value() > use_pos[reg].Value()) {
+ reg = code;
}
}
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/crankshaft/lithium-allocator.h
index 7c94772450..46289e0fbb 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/crankshaft/lithium-allocator.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_ALLOCATOR_H_
-#define V8_LITHIUM_ALLOCATOR_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
+#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
#include "src/allocation.h"
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
#include "src/zone.h"
namespace v8 {
@@ -520,9 +520,8 @@ class LAllocator BASE_EMBEDDED {
ZoneList<LiveRange*> live_ranges_;
// Lists of live ranges
- EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
- fixed_live_ranges_;
- EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, Register::kNumRegisters> fixed_live_ranges_;
+ EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumRegisters>
fixed_double_live_ranges_;
ZoneList<LiveRange*> unhandled_live_ranges_;
ZoneList<LiveRange*> active_live_ranges_;
@@ -536,6 +535,7 @@ class LAllocator BASE_EMBEDDED {
RegisterKind mode_;
int num_registers_;
+ const int* allocatable_register_codes_;
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
@@ -568,6 +568,7 @@ class LAllocatorPhase : public CompilationPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_ALLOCATOR_H_
+#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
index 267df58ccd..41f78cd183 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/crankshaft/lithium-codegen.cc
@@ -2,34 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/lithium-codegen.h"
+#include "src/crankshaft/lithium-codegen.h"
#include <sstream>
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
-#include "src/ia32/lithium-codegen-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
-#include "src/x64/lithium-codegen-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
-#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
-#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
-#include "src/mips/lithium-codegen-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
-#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
-#include "src/x87/lithium-codegen-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
-#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/crankshaft/lithium-codegen.h
index ce04da9006..97a0722736 100644
--- a/deps/v8/src/lithium-codegen.h
+++ b/deps/v8/src/crankshaft/lithium-codegen.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_CODEGEN_H_
-#define V8_LITHIUM_CODEGEN_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
+#define V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
#include "src/bailout-reason.h"
#include "src/compiler.h"
@@ -91,6 +91,7 @@ class LCodeGenBase BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_CODEGEN_H_
+#endif // V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
diff --git a/deps/v8/src/lithium-inl.h b/deps/v8/src/crankshaft/lithium-inl.h
index 1a10773390..9044b4ca7a 100644
--- a/deps/v8/src/lithium-inl.h
+++ b/deps/v8/src/crankshaft/lithium-inl.h
@@ -2,27 +2,27 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_INL_H_
-#define V8_LITHIUM_INL_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_INL_H_
+#define V8_CRANKSHAFT_LITHIUM_INL_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
#error "Unknown architecture."
#endif
@@ -108,7 +108,7 @@ LOperand* UseIterator::Current() {
void UseIterator::Advance() {
input_iterator_.Done() ? env_iterator_.Advance() : input_iterator_.Advance();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_INL_H_
+#endif // V8_CRANKSHAFT_LITHIUM_INL_H_
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/crankshaft/lithium.cc
index 7d37532ace..b4771c0557 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/crankshaft/lithium.cc
@@ -2,34 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
#include "src/scopes.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
-#include "src/ia32/lithium-codegen-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
-#include "src/x64/lithium-codegen-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
-#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
-#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
-#include "src/mips/lithium-codegen-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
-#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
-#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
-#include "src/x87/lithium-codegen-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#else
#error "Unknown architecture."
#endif
@@ -56,24 +56,22 @@ void LOperand::PrintTo(StringStream* stream) {
break;
case LUnallocated::FIXED_REGISTER: {
int reg_index = unalloc->fixed_register_index();
- if (reg_index < 0 ||
- reg_index >= Register::kMaxNumAllocatableRegisters) {
+ if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
stream->Add("(=invalid_reg#%d)", reg_index);
} else {
const char* register_name =
- Register::AllocationIndexToString(reg_index);
+ Register::from_code(reg_index).ToString();
stream->Add("(=%s)", register_name);
}
break;
}
case LUnallocated::FIXED_DOUBLE_REGISTER: {
int reg_index = unalloc->fixed_register_index();
- if (reg_index < 0 ||
- reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+ if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
stream->Add("(=invalid_double_reg#%d)", reg_index);
} else {
const char* double_register_name =
- DoubleRegister::AllocationIndexToString(reg_index);
+ DoubleRegister::from_code(reg_index).ToString();
stream->Add("(=%s)", double_register_name);
}
break;
@@ -106,21 +104,19 @@ void LOperand::PrintTo(StringStream* stream) {
break;
case REGISTER: {
int reg_index = index();
- if (reg_index < 0 || reg_index >= Register::kMaxNumAllocatableRegisters) {
+ if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
stream->Add("(=invalid_reg#%d|R)", reg_index);
} else {
- stream->Add("[%s|R]", Register::AllocationIndexToString(reg_index));
+ stream->Add("[%s|R]", Register::from_code(reg_index).ToString());
}
break;
}
case DOUBLE_REGISTER: {
int reg_index = index();
- if (reg_index < 0 ||
- reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+ if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
stream->Add("(=invalid_double_reg#%d|R)", reg_index);
} else {
- stream->Add("[%s|R]",
- DoubleRegister::AllocationIndexToString(reg_index));
+ stream->Add("[%s|R]", DoubleRegister::from_code(reg_index).ToString());
}
break;
}
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/crankshaft/lithium.h
index 046de19fd0..126517e2eb 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/crankshaft/lithium.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_H_
-#define V8_LITHIUM_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_H_
+#define V8_CRANKSHAFT_LITHIUM_H_
#include <set>
#include "src/allocation.h"
#include "src/bailout-reason.h"
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
#include "src/safepoint-table.h"
#include "src/zone-allocator.h"
@@ -835,6 +835,7 @@ class UseIterator BASE_EMBEDDED {
class LInstruction;
class LCodeGen;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_H_
+#endif // V8_CRANKSHAFT_LITHIUM_H_
diff --git a/deps/v8/src/crankshaft/mips/OWNERS b/deps/v8/src/crankshaft/mips/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/crankshaft/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index bf158b4c43..a82b262dbb 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -25,14 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "src/crankshaft/mips/lithium-codegen-mips.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/mips/lithium-codegen-mips.h"
-#include "src/mips/lithium-gap-resolver-mips.h"
#include "src/profiler/cpu-profiler.h"
@@ -96,7 +97,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -112,7 +113,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -137,24 +138,6 @@ bool LCodeGen::GeneratePrologue() {
// cp: Callee's context.
// fp: Caller's frame pointer.
// lr: Caller's pc.
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ lw(a2, MemOperand(sp, receiver_offset));
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ lw(a2, GlobalObjectOperand());
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ sw(a2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -165,7 +148,6 @@ bool LCodeGen::GeneratePrologue() {
__ Prologue(info()->IsCodePreAgingActive());
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -403,12 +385,12 @@ bool LCodeGen::GenerateSafepointTable() {
Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+ return Register::from_code(index);
}
DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+ return DoubleRegister::from_code(index);
}
@@ -2686,10 +2668,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(sp, fp);
- no_frame_start = masm_->pc_offset();
__ Pop(ra, fp);
}
if (instr->has_constant_parameter_count()) {
@@ -2708,10 +2688,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ Jump(ra);
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2726,7 +2702,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ li(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2740,7 +2716,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ li(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2761,24 +2737,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3285,8 +3243,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
__ lw(result,
ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ lw(result,
- FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ lw(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3834,7 +3791,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -3848,15 +3805,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ li(vector_register, vector);
__ li(slot_register, Operand(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ li(a0, Operand(arity));
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4053,30 +4007,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
Operand operand(0);
@@ -4560,7 +4490,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5325,11 +5256,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
index 858c7f12bc..8fc78a96d4 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#define V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
+#include "src/crankshaft/mips/lithium-mips.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
-#include "src/mips/lithium-gap-resolver-mips.h"
-#include "src/mips/lithium-mips.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
@@ -426,6 +426,7 @@ class LDeferredCode : public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc
index cdaf2463a0..e25a32dffd 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips/lithium-codegen-mips.h"
-#include "src/mips/lithium-gap-resolver-mips.h"
+#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
+
+#include "src/crankshaft/mips/lithium-codegen-mips.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.h b/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h
index 7374da7727..6c5fd037a3 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -53,6 +53,7 @@ class LGapResolver final BASE_EMBEDDED {
LOperand* saved_destination_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/crankshaft/mips/lithium-mips.cc
index 42ecab4b8e..d65d5582c0 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips/lithium-mips.h"
+#include "src/crankshaft/mips/lithium-mips.h"
#include <sstream>
#if V8_TARGET_ARCH_MIPS
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/mips/lithium-codegen-mips.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/mips/lithium-codegen-mips.h"
namespace v8 {
namespace internal {
@@ -330,11 +330,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -353,12 +348,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -451,14 +440,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -2088,15 +2076,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2173,7 +2152,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2181,7 +2160,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2246,7 +2227,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK(
@@ -2258,7 +2239,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2380,19 +2362,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/crankshaft/mips/lithium-mips.h
index ed3332ca65..e064edd6b1 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_MIPS_H_
-#define V8_MIPS_LITHIUM_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
+#define V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -100,7 +100,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -142,7 +141,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1570,15 +1568,17 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1635,22 +1635,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2152,34 +2136,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2188,6 +2152,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2843,6 +2808,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
diff --git a/deps/v8/src/crankshaft/mips64/OWNERS b/deps/v8/src/crankshaft/mips64/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/crankshaft/mips64/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index a26d099a62..a615030fae 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
+
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/mips64/lithium-codegen-mips64.h"
-#include "src/mips64/lithium-gap-resolver-mips64.h"
#include "src/profiler/cpu-profiler.h"
namespace v8 {
@@ -71,7 +72,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -87,7 +88,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -112,24 +113,6 @@ bool LCodeGen::GeneratePrologue() {
// cp: Callee's context.
// fp: Caller's frame pointer.
// lr: Caller's pc.
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ ld(a2, MemOperand(sp, receiver_offset));
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ ld(a2, GlobalObjectOperand());
- __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ sd(a2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -140,7 +123,6 @@ bool LCodeGen::GeneratePrologue() {
__ Prologue(info()->IsCodePreAgingActive());
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -386,12 +368,12 @@ bool LCodeGen::GenerateSafepointTable() {
Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+ return Register::from_code(index);
}
DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+ return DoubleRegister::from_code(index);
}
@@ -2794,10 +2776,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(sp, fp);
- no_frame_start = masm_->pc_offset();
__ Pop(ra, fp);
}
if (instr->has_constant_parameter_count()) {
@@ -2816,10 +2796,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ Jump(ra);
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2834,7 +2810,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ li(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2848,7 +2824,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ li(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2869,24 +2845,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3454,8 +3412,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
__ ld(result,
ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ ld(result,
- FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ ld(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -4022,7 +3979,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -4036,15 +3993,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ li(vector_register, vector);
__ li(slot_register, Operand(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ li(a0, Operand(arity));
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4250,30 +4204,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
Operand operand((int64_t)0);
@@ -4777,7 +4707,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5508,11 +5439,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
index b08de167be..3d9433be48 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
+#define V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
+#include "src/crankshaft/mips64/lithium-mips64.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
-#include "src/mips64/lithium-gap-resolver-mips64.h"
-#include "src/mips64/lithium-mips64.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
@@ -429,6 +429,7 @@ class LDeferredCode : public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
index 9e3114bc34..0374cbc7bb 100644
--- a/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips64/lithium-codegen-mips64.h"
-#include "src/mips64/lithium-gap-resolver-mips64.h"
+#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
+
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips64/lithium-gap-resolver-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h
index 7374da7727..85d8e2920c 100644
--- a/deps/v8/src/mips64/lithium-gap-resolver-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -53,6 +53,7 @@ class LGapResolver final BASE_EMBEDDED {
LOperand* saved_destination_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/deps/v8/src/mips64/lithium-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
index 4f2f161524..f0fba39036 100644
--- a/deps/v8/src/mips64/lithium-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips64/lithium-mips64.h"
+#include "src/crankshaft/mips64/lithium-mips64.h"
#include <sstream>
#if V8_TARGET_ARCH_MIPS64
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/mips64/lithium-codegen-mips64.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
namespace v8 {
namespace internal {
@@ -330,11 +330,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -353,12 +348,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -451,14 +440,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -2091,15 +2079,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2177,7 +2156,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
instr->representation().IsInteger32());
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2185,7 +2164,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2251,7 +2232,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK(
@@ -2263,7 +2244,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2385,19 +2367,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
diff --git a/deps/v8/src/mips64/lithium-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
index 01463c9d63..fb0e3cba72 100644
--- a/deps/v8/src/mips64/lithium-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_MIPS_H_
-#define V8_MIPS_LITHIUM_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
+#define V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -102,7 +102,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -144,7 +143,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1632,15 +1630,17 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1697,22 +1697,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2198,34 +2182,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2234,6 +2198,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2889,6 +2854,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
diff --git a/deps/v8/src/crankshaft/ppc/OWNERS b/deps/v8/src/crankshaft/ppc/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/crankshaft/ppc/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index ad6d8db13d..8f8a0e50f5 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/ppc/lithium-codegen-ppc.h"
-#include "src/ppc/lithium-gap-resolver-ppc.h"
#include "src/profiler/cpu-profiler.h"
namespace v8 {
@@ -73,7 +74,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ stfd(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -89,7 +90,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ lfd(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -116,24 +117,6 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
// ip: Our own function entry (required by the prologue)
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
- __ LoadP(r5, MemOperand(sp, receiver_offset));
- __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bne(&ok);
-
- __ LoadP(r5, GlobalObjectOperand());
- __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
-
- __ StoreP(r5, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
- }
}
int prologue_offset = masm_->pc_offset();
@@ -152,7 +135,6 @@ bool LCodeGen::GeneratePrologue() {
__ Prologue(info()->IsCodePreAgingActive(), prologue_offset);
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -395,13 +377,13 @@ bool LCodeGen::GenerateSafepointTable() {
}
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
}
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
+ return DoubleRegister::from_code(code);
}
@@ -2845,12 +2827,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
int32_t sp_delta = (parameter_count + 1) * kPointerSize;
if (NeedsEagerFrame()) {
- no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
} else if (sp_delta != 0) {
__ addi(sp, sp, Operand(sp_delta));
}
@@ -2859,17 +2840,13 @@ void LCodeGen::DoReturn(LReturn* instr) {
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
if (NeedsEagerFrame()) {
- no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
}
__ SmiToPtrArrayOffset(r0, reg);
__ add(sp, sp, r0);
}
__ blr();
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2884,7 +2861,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ LoadSmiLiteral(slot_register, Smi::FromInt(index));
}
@@ -2898,7 +2875,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ LoadSmiLiteral(slot_register, Smi::FromInt(index));
}
@@ -2919,24 +2896,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3483,28 +3442,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
if (!instr->hydrogen()->known_function()) {
// Do not transform the receiver to object for strict mode
- // functions.
+ // functions or builtins.
__ LoadP(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ lwz(scratch,
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrictModeFunction,
-#else
- SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
-#endif
- r0);
- __ bne(&result_in_receiver, cr0);
-
- // Do not transform the receiver to object for builtins.
- __ TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kNative,
-#else
- SharedFunctionInfo::kNative + kSmiTagSize,
-#endif
- r0);
+ __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+ (1 << SharedFunctionInfo::kNativeBit)));
__ bne(&result_in_receiver, cr0);
}
@@ -3526,7 +3470,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ bind(&global_object);
__ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
__ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ LoadP(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
} else {
@@ -4089,7 +4033,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(r3));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -4103,15 +4047,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ Move(vector_register, vector);
__ LoadSmiLiteral(slot_register, Smi::FromInt(index));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ mov(r3, Operand(arity));
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4321,29 +4262,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode()).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4852,7 +4770,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(r3, result);
}
@@ -5604,11 +5523,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- __ b(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
index 117dc574d5..69653921f7 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_LITHIUM_CODEGEN_PPC_H_
-#define V8_PPC_LITHIUM_CODEGEN_PPC_H_
+#ifndef V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
+#define V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
-#include "src/ppc/lithium-ppc.h"
-
-#include "src/ppc/lithium-gap-resolver-ppc.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
+#include "src/crankshaft/ppc/lithium-ppc.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
@@ -137,7 +136,7 @@ class LCodeGen : public LCodeGenBase {
Scope* scope() const { return scope_; }
- Register scratch0() { return r11; }
+ Register scratch0() { return kLithiumScratch; }
DoubleRegister double_scratch0() { return kScratchDoubleReg; }
LInstruction* GetNextInstruction();
@@ -358,7 +357,7 @@ class LDeferredCode : public ZoneObject {
Label* external_exit_;
int instruction_index_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_PPC_LITHIUM_CODEGEN_PPC_H_
+#endif // V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
diff --git a/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc
index 16fb665dda..4e249808f7 100644
--- a/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ppc/lithium-codegen-ppc.h"
-#include "src/ppc/lithium-gap-resolver-ppc.h"
+#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
+
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ppc/lithium-gap-resolver-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h
index 7741080e55..6eeea5eee5 100644
--- a/deps/v8/src/ppc/lithium-gap-resolver-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
-#define V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+#ifndef V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+#define V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -52,7 +52,7 @@ class LGapResolver final BASE_EMBEDDED {
bool in_cycle_;
LOperand* saved_destination_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+#endif // V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
index 767c771fb3..67a860ae55 100644
--- a/deps/v8/src/ppc/lithium-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ppc/lithium-ppc.h"
+#include "src/crankshaft/ppc/lithium-ppc.h"
#include <sstream>
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/ppc/lithium-codegen-ppc.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
namespace v8 {
namespace internal {
@@ -336,11 +336,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -359,12 +354,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -457,14 +446,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -2102,15 +2090,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2186,14 +2165,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
} else {
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK((instr->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new (zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2256,7 +2237,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new (zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK((instr->value()->representation().IsInteger32() &&
@@ -2267,7 +2248,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new (zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2387,19 +2369,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r4);
diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
index e862a11f63..e4d267ec85 100644
--- a/deps/v8/src/ppc/lithium-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_LITHIUM_PPC_H_
-#define V8_PPC_LITHIUM_PPC_H_
+#ifndef V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
+#define V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -100,7 +100,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -143,7 +142,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1549,15 +1547,17 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
@@ -1612,22 +1612,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
@@ -2104,34 +2088,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2140,6 +2104,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
@@ -2760,7 +2725,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_PPC_LITHIUM_PPC_H_
+#endif // V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/crankshaft/typing.cc
index bd5114e89a..49bc2c7ded 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/crankshaft/typing.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/typing.h"
+#include "src/crankshaft/typing.h"
#include "src/frames.h"
#include "src/frames-inl.h"
@@ -17,7 +17,9 @@ namespace internal {
AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root)
- : closure_(closure),
+ : isolate_(isolate),
+ zone_(zone),
+ closure_(closure),
scope_(scope),
osr_ast_id_(osr_ast_id),
root_(root),
@@ -25,7 +27,7 @@ AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
handle(closure->shared()->feedback_vector()),
handle(closure->context()->native_context())),
store_(zone) {
- InitializeAstVisitor(isolate, zone);
+ InitializeAstVisitor(isolate);
}
@@ -51,7 +53,7 @@ void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
if (stmt->OsrEntryId() != osr_ast_id_) return;
DisallowHeapAllocation no_gc;
- JavaScriptFrameIterator it(isolate());
+ JavaScriptFrameIterator it(isolate_);
JavaScriptFrame* frame = it.frame();
// Assert that the frame on the stack belongs to the function we want to OSR.
@@ -348,6 +350,13 @@ void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
}
+void AstTyper::VisitDoExpression(DoExpression* expr) {
+ RECURSE(VisitBlock(expr->block()));
+ RECURSE(VisitVariableProxy(expr->result()));
+ NarrowType(expr, expr->result()->bounds());
+}
+
+
void AstTyper::VisitConditional(Conditional* expr) {
// Collect type feedback.
expr->condition()->RecordToBooleanTypeFeedback(oracle());
@@ -402,7 +411,7 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
prop->emit_store()) {
// Record type feed back for the property.
TypeFeedbackId id = prop->key()->AsLiteral()->LiteralFeedbackId();
- FeedbackVectorICSlot slot = prop->GetSlot();
+ FeedbackVectorSlot slot = prop->GetSlot();
SmallMapList maps;
if (FLAG_vector_stores) {
oracle()->CollectReceiverTypes(slot, &maps);
@@ -437,7 +446,7 @@ void AstTyper::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
if (prop != NULL) {
TypeFeedbackId id = expr->AssignmentFeedbackId();
- FeedbackVectorICSlot slot = expr->AssignmentSlot();
+ FeedbackVectorSlot slot = expr->AssignmentSlot();
expr->set_is_uninitialized(FLAG_vector_stores
? oracle()->StoreIsUninitialized(slot)
: oracle()->StoreIsUninitialized(id));
@@ -499,8 +508,7 @@ void AstTyper::VisitThrow(Throw* expr) {
void AstTyper::VisitProperty(Property* expr) {
// Collect type feedback.
- FeedbackVectorICSlot slot(FeedbackVectorICSlot::Invalid());
- slot = expr->PropertyFeedbackSlot();
+ FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
expr->set_inline_cache_state(oracle()->LoadInlineCacheState(slot));
if (!expr->IsUninitialized()) {
@@ -530,8 +538,8 @@ void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
RECURSE(Visit(expr->expression()));
bool is_uninitialized = true;
- if (expr->IsUsingCallFeedbackICSlot(isolate())) {
- FeedbackVectorICSlot slot = expr->CallFeedbackICSlot();
+ if (expr->IsUsingCallFeedbackICSlot(isolate_)) {
+ FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
is_uninitialized = oracle()->CallIsUninitialized(slot);
if (!expr->expression()->IsProperty() &&
oracle()->CallIsMonomorphic(slot)) {
@@ -550,7 +558,7 @@ void AstTyper::VisitCall(Call* expr) {
}
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate_)) {
store_.Forget(); // Eval could do whatever to local variables.
}
@@ -622,7 +630,7 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
void AstTyper::VisitCountOperation(CountOperation* expr) {
// Collect type feedback.
TypeFeedbackId store_id = expr->CountStoreFeedbackId();
- FeedbackVectorICSlot slot = expr->CountSlot();
+ FeedbackVectorSlot slot = expr->CountSlot();
KeyedAccessStoreMode store_mode;
IcCheckType key_type;
if (FLAG_vector_stores) {
diff --git a/deps/v8/src/typing.h b/deps/v8/src/crankshaft/typing.h
index 8b3e97b67c..d088b84709 100644
--- a/deps/v8/src/typing.h
+++ b/deps/v8/src/crankshaft/typing.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TYPING_H_
-#define V8_TYPING_H_
+#ifndef V8_CRANKSHAFT_TYPING_H_
+#define V8_CRANKSHAFT_TYPING_H_
#include "src/allocation.h"
#include "src/ast.h"
@@ -33,6 +33,8 @@ class AstTyper: public AstVisitor {
typedef v8::internal::Effects<int, kNoVar> Effects;
typedef v8::internal::NestedEffects<int, kNoVar> Store;
+ Isolate* isolate_;
+ Zone* zone_;
Handle<JSFunction> closure_;
Scope* scope_;
BailoutId osr_ast_id_;
@@ -40,6 +42,7 @@ class AstTyper: public AstVisitor {
TypeFeedbackOracle oracle_;
Store store_;
+ Zone* zone() const { return zone_; }
TypeFeedbackOracle* oracle() { return &oracle_; }
void NarrowType(Expression* e, Bounds b) {
@@ -69,13 +72,14 @@ class AstTyper: public AstVisitor {
void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
void VisitStatements(ZoneList<Statement*>* statements) override;
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
DISALLOW_COPY_AND_ASSIGN(AstTyper);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_TYPING_H_
+#endif // V8_CRANKSHAFT_TYPING_H_
diff --git a/deps/v8/src/unique.h b/deps/v8/src/crankshaft/unique.h
index 8805218b1f..54abfa7710 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/crankshaft/unique.h
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UNIQUE_H_
-#define V8_UNIQUE_H_
+#ifndef V8_CRANKSHAFT_UNIQUE_H_
+#define V8_CRANKSHAFT_UNIQUE_H_
#include <ostream> // NOLINT(readability/streams)
+#include "src/assert-scope.h"
#include "src/base/functional.h"
#include "src/handles.h"
#include "src/utils.h"
@@ -355,6 +356,7 @@ class UniqueSet final : public ZoneObject {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_UNIQUE_H_
+#endif // V8_CRANKSHAFT_UNIQUE_H_
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index dbdd146a1e..d6ad87be1c 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -4,14 +4,15 @@
#if V8_TARGET_ARCH_X64
+#include "src/crankshaft/x64/lithium-codegen-x64.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
-#include "src/x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
@@ -88,8 +89,8 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ movsd(MemOperand(rsp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ __ Movsd(MemOperand(rsp, count * kDoubleSize),
+ XMMRegister::from_code(save_iterator.Current()));
save_iterator.Advance();
count++;
}
@@ -104,7 +105,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ __ Movsd(XMMRegister::from_code(save_iterator.Current()),
MemOperand(rsp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -124,24 +125,6 @@ bool LCodeGen::GeneratePrologue() {
__ int3();
}
#endif
-
- // Sloppy mode functions need to replace the receiver with the global proxy
- // when called as functions (without an explicit receiver object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- StackArgumentsAccessor args(rsp, scope()->num_parameters());
- __ movp(rcx, args.GetReceiverOperand());
-
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &ok, Label::kNear);
-
- __ movp(rcx, GlobalObjectOperand());
- __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
-
- __ movp(args.GetReceiverOperand(), rcx);
-
- __ bind(&ok);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -153,7 +136,6 @@ bool LCodeGen::GeneratePrologue() {
} else {
__ Prologue(info()->IsCodePreAgingActive());
}
- info()->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -437,12 +419,12 @@ bool LCodeGen::GenerateSafepointTable() {
Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+ return Register::from_code(index);
}
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
+ return XMMRegister::from_code(index);
}
@@ -1964,35 +1946,40 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ bind(&return_left);
} else {
DCHECK(instr->hydrogen()->representation().IsDouble());
- Label check_nan_left, check_zero, return_left, return_right;
+ Label not_nan, distinct, return_left, return_right;
Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
XMMRegister left_reg = ToDoubleRegister(left);
XMMRegister right_reg = ToDoubleRegister(right);
- __ ucomisd(left_reg, right_reg);
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
+ __ Ucomisd(left_reg, right_reg);
+ __ j(parity_odd, &not_nan, Label::kNear); // Both are not NaN.
+
+ // One of the numbers is NaN. Find which one and return it.
+ __ Ucomisd(left_reg, left_reg);
+ __ j(parity_even, &return_left, Label::kNear); // left is NaN.
+ __ jmp(&return_right, Label::kNear); // right is NaN.
- __ bind(&check_zero);
+ __ bind(&not_nan);
+ __ j(not_equal, &distinct, Label::kNear); // left != right.
+
+ // left == right
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(left_reg, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
+
+ // At this point, both left and right are either +0 or -0.
if (operation == HMathMinMax::kMathMin) {
- __ orps(left_reg, right_reg);
+ __ Orpd(left_reg, right_reg);
} else {
- // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
- __ addsd(left_reg, right_reg);
+ __ Andpd(left_reg, right_reg);
}
__ jmp(&return_left, Label::kNear);
- __ bind(&check_nan_left);
- __ ucomisd(left_reg, left_reg); // NaN check.
- __ j(parity_even, &return_left, Label::kNear);
+ __ bind(&distinct);
+ __ j(condition, &return_left, Label::kNear);
+
__ bind(&return_right);
- __ movaps(left_reg, right_reg);
+ __ Movapd(left_reg, right_reg);
__ bind(&return_left);
}
@@ -2041,16 +2028,16 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
}
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result
- __ movaps(result, result);
+ __ Movapd(result, result);
break;
case Token::MOD: {
XMMRegister xmm_scratch = double_scratch0();
__ PrepareCallCFunction(2);
- __ movaps(xmm_scratch, left);
+ __ Movapd(xmm_scratch, left);
DCHECK(right.is(xmm1));
__ CallCFunction(
ExternalReference::mod_two_doubles_operation(isolate()), 2);
- __ movaps(result, xmm_scratch);
+ __ Movapd(result, xmm_scratch);
break;
}
default:
@@ -2129,8 +2116,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
DCHECK(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(reg, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
DCHECK(r.IsTagged());
@@ -2150,8 +2137,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (type.IsHeapNumber()) {
DCHECK(!info()->IsStub());
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
DCHECK(!info()->IsStub());
@@ -2239,8 +2226,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &not_heap_number, Label::kNear);
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, instr->FalseLabel(chunk_));
__ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
@@ -2320,7 +2307,7 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
int32_t value;
@@ -2388,11 +2375,11 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ ucomisd(input_reg, input_reg);
+ __ Ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
__ subp(rsp, Immediate(kDoubleSize));
- __ movsd(MemOperand(rsp, 0), input_reg);
+ __ Movsd(MemOperand(rsp, 0), input_reg);
__ addp(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
@@ -2408,10 +2395,10 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
if (rep.IsDouble()) {
XMMRegister value = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, value);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(xmm_scratch, value);
EmitFalseBranch(instr, not_equal);
- __ movmskpd(kScratchRegister, value);
+ __ Movmskpd(kScratchRegister, value);
__ testl(kScratchRegister, Immediate(1));
EmitBranch(instr, not_zero);
} else {
@@ -2712,11 +2699,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ movp(rsp, rbp);
__ popq(rbp);
- no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
__ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
@@ -2732,9 +2717,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ addp(rsp, reg);
__ jmp(return_addr_reg);
}
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2749,7 +2731,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Move(slot_register, Smi::FromInt(index));
}
@@ -2763,7 +2745,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Move(slot_register, Smi::FromInt(index));
}
@@ -2784,23 +2766,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->result()).is(rax));
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Set(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2875,7 +2840,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (instr->hydrogen()->representation().IsDouble()) {
DCHECK(access.IsInobject());
XMMRegister result = ToDoubleRegister(instr->result());
- __ movsd(result, FieldOperand(object, offset));
+ __ Movsd(result, FieldOperand(object, offset));
return;
}
@@ -3005,10 +2970,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
+ __ Cvtss2sd(result, operand);
} else if (elements_kind == FLOAT64_ELEMENTS) {
- __ movsd(ToDoubleRegister(instr->result()), operand);
+ __ Movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
@@ -3079,7 +3043,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
instr->base_offset());
- __ movsd(result, double_load_operand);
+ __ Movsd(result, double_load_operand);
}
@@ -3305,7 +3269,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ movp(receiver,
Operand(receiver,
Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
+ __ movp(receiver, FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
__ bind(&receiver_ok);
}
@@ -3582,9 +3546,9 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
if (r.IsDouble()) {
XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ xorps(scratch, scratch);
- __ subsd(scratch, input_reg);
- __ andps(input_reg, scratch);
+ __ Xorpd(scratch, scratch);
+ __ Subsd(scratch, input_reg);
+ __ Andpd(input_reg, scratch);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else if (r.IsSmi()) {
@@ -3610,19 +3574,19 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE4_1);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Deoptimize if minus zero.
- __ movq(output_reg, input_reg);
+ __ Movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
}
- __ roundsd(xmm_scratch, input_reg, kRoundDown);
- __ cvttsd2si(output_reg, xmm_scratch);
+ __ Roundsd(xmm_scratch, input_reg, kRoundDown);
+ __ Cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ __ Ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
__ j(below, &negative_sign, Label::kNear);
@@ -3630,8 +3594,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Check for negative zero.
Label positive_sign;
__ j(above, &positive_sign, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ testq(output_reg, Immediate(1));
+ __ Movmskpd(output_reg, input_reg);
+ __ testl(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
__ Set(output_reg, 0);
__ jmp(&done);
@@ -3639,7 +3603,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
// Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
+ __ Cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
@@ -3648,9 +3612,9 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Non-zero negative reaches here.
__ bind(&negative_sign);
// Truncate, then compare and compensate.
- __ cvttsd2si(output_reg, input_reg);
+ __ Cvttsd2si(output_reg, input_reg);
__ Cvtlsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
+ __ Ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
@@ -3671,13 +3635,13 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
Label done, round_to_zero, below_one_half;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ movq(kScratchRegister, one_half);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
+ __ Movq(xmm_scratch, kScratchRegister);
+ __ Ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
- __ addsd(xmm_scratch, input_reg);
- __ cvttsd2si(output_reg, xmm_scratch);
+ __ Addsd(xmm_scratch, input_reg);
+ __ Cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
@@ -3685,21 +3649,21 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&below_one_half);
__ movq(kScratchRegister, minus_one_half);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
+ __ Movq(xmm_scratch, kScratchRegister);
+ __ Ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
- __ movq(input_temp, input_reg); // Do not alter input_reg.
- __ subsd(input_temp, xmm_scratch);
- __ cvttsd2si(output_reg, input_temp);
+ __ Movapd(input_temp, input_reg); // Do not alter input_reg.
+ __ Subsd(input_temp, xmm_scratch);
+ __ Cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ Cvtlsi2sd(xmm_scratch, output_reg);
- __ ucomisd(xmm_scratch, input_temp);
+ __ Ucomisd(xmm_scratch, input_temp);
__ j(equal, &done, dist);
__ subl(output_reg, Immediate(1));
// No overflow because we already ruled out minint.
@@ -3709,7 +3673,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
// we can ignore the difference between a result of -0 and +0.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movq(output_reg, input_reg);
+ __ Movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
}
@@ -3721,8 +3685,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
void LCodeGen::DoMathFround(LMathFround* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister output_reg = ToDoubleRegister(instr->result());
- __ cvtsd2ss(output_reg, input_reg);
- __ cvtss2sd(output_reg, output_reg);
+ __ Cvtsd2ss(output_reg, input_reg);
+ __ Cvtss2sd(output_reg, output_reg);
}
@@ -3730,10 +3694,10 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
XMMRegister output = ToDoubleRegister(instr->result());
if (instr->value()->IsDoubleRegister()) {
XMMRegister input = ToDoubleRegister(instr->value());
- __ sqrtsd(output, input);
+ __ Sqrtsd(output, input);
} else {
Operand input = ToOperand(instr->value());
- __ sqrtsd(output, input);
+ __ Sqrtsd(output, input);
}
}
@@ -3750,22 +3714,22 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
// Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
__ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
+ __ Movq(xmm_scratch, kScratchRegister);
+ __ Ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &sqrt, Label::kNear);
__ j(carry, &sqrt, Label::kNear);
// If input is -Infinity, return Infinity.
- __ xorps(input_reg, input_reg);
- __ subsd(input_reg, xmm_scratch);
+ __ Xorpd(input_reg, input_reg);
+ __ Subsd(input_reg, xmm_scratch);
__ jmp(&done, Label::kNear);
// Square root.
__ bind(&sqrt);
- __ xorps(xmm_scratch, xmm_scratch);
- __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ sqrtsd(input_reg, input_reg);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Addsd(input_reg, xmm_scratch); // Convert -0 to +0.
+ __ Sqrtsd(input_reg, input_reg);
__ bind(&done);
}
@@ -3821,26 +3785,26 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(input_reg, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(not_carry, &zero, Label::kNear);
- __ pcmpeqd(input_reg, input_reg);
+ __ Pcmpeqd(input_reg, input_reg);
__ jmp(&done, Label::kNear);
__ bind(&zero);
ExternalReference ninf =
ExternalReference::address_of_negative_infinity();
Operand ninf_operand = masm()->ExternalOperand(ninf);
- __ movsd(input_reg, ninf_operand);
+ __ Movsd(input_reg, ninf_operand);
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ subp(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), input_reg);
+ __ Movsd(Operand(rsp, 0), input_reg);
__ fld_d(Operand(rsp, 0));
__ fyl2x();
__ fstp_d(Operand(rsp, 0));
- __ movsd(input_reg, Operand(rsp, 0));
+ __ Movsd(input_reg, Operand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize));
__ bind(&done);
}
@@ -3879,7 +3843,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -3893,15 +3857,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ Move(vector_register, vector);
__ Move(slot_register, Smi::FromInt(index));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ Set(rax, arity);
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4031,7 +3992,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DCHECK(!hinstr->has_transition());
DCHECK(!hinstr->NeedsWriteBarrier());
XMMRegister value = ToDoubleRegister(instr->value());
- __ movsd(FieldOperand(object, offset), value);
+ __ Movsd(FieldOperand(object, offset), value);
return;
}
@@ -4079,7 +4040,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
XMMRegister value = ToDoubleRegister(instr->value());
- __ movsd(operand, value);
+ __ Movsd(operand, value);
} else if (instr->value()->IsRegister()) {
Register value = ToRegister(instr->value());
@@ -4141,29 +4102,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Set(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4249,10 +4187,10 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == FLOAT32_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
- __ cvtsd2ss(value, value);
- __ movss(operand, value);
+ __ Cvtsd2ss(value, value);
+ __ Movss(operand, value);
} else if (elements_kind == FLOAT64_ELEMENTS) {
- __ movsd(operand, ToDoubleRegister(instr->value()));
+ __ Movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
@@ -4299,8 +4237,8 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (instr->NeedsCanonicalization()) {
XMMRegister xmm_scratch = double_scratch0();
// Turn potential sNaN value into qNaN.
- __ xorps(xmm_scratch, xmm_scratch);
- __ subsd(value, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Subsd(value, xmm_scratch);
}
Operand double_store_operand = BuildFastArrayOperand(
@@ -4310,7 +4248,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset());
- __ movsd(double_store_operand, value);
+ __ Movsd(double_store_operand, value);
}
@@ -4676,7 +4614,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
__ Push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4780,7 +4719,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// the value in there. If that fails, call the runtime system.
__ SmiToInteger32(reg, reg);
__ xorl(reg, Immediate(0x80000000));
- __ cvtlsi2sd(temp_xmm, reg);
+ __ Cvtlsi2sd(temp_xmm, reg);
} else {
DCHECK(signedness == UNSIGNED_INT32);
__ LoadUint32(temp_xmm, reg);
@@ -4817,7 +4756,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
+ __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
}
@@ -4844,7 +4783,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}
@@ -4920,7 +4859,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// On x64 it is safe to load at heap number offset before evaluating the map
// check, since all heap objects are at least two words long.
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
@@ -4930,11 +4869,11 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (deoptimize_on_minus_zero) {
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, result_reg);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(xmm_scratch, result_reg);
__ j(not_equal, &done, Label::kNear);
- __ movmskpd(kScratchRegister, result_reg);
- __ testq(kScratchRegister, Immediate(1));
+ __ Movmskpd(kScratchRegister, result_reg);
+ __ testl(kScratchRegister, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
@@ -4946,7 +4885,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
- __ pcmpeqd(result_reg, result_reg);
+ __ Pcmpeqd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
}
} else {
@@ -4999,16 +4938,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, xmm0);
+ __ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ Cvttsd2si(input_reg, xmm0);
__ Cvtlsi2sd(scratch, input_reg);
- __ ucomisd(xmm0, scratch);
+ __ Ucomisd(xmm0, scratch);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ testl(input_reg, input_reg);
__ j(not_zero, done);
- __ movmskpd(input_reg, xmm0);
+ __ Movmskpd(input_reg, xmm0);
__ andl(input_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
@@ -5311,7 +5250,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
@@ -5328,10 +5267,10 @@ void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->value());
Register result_reg = ToRegister(instr->result());
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ movq(result_reg, value_reg);
+ __ Movq(result_reg, value_reg);
__ shrq(result_reg, Immediate(32));
} else {
- __ movd(result_reg, value_reg);
+ __ Movd(result_reg, value_reg);
}
}
@@ -5340,11 +5279,10 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
Register hi_reg = ToRegister(instr->hi());
Register lo_reg = ToRegister(instr->lo());
XMMRegister result_reg = ToDoubleRegister(instr->result());
- XMMRegister xmm_scratch = double_scratch0();
- __ movd(result_reg, hi_reg);
- __ psllq(result_reg, 32);
- __ movd(xmm_scratch, lo_reg);
- __ orps(result_reg, xmm_scratch);
+ __ movl(kScratchRegister, hi_reg);
+ __ shlq(kScratchRegister, Immediate(32));
+ __ orq(kScratchRegister, lo_reg);
+ __ Movq(result_reg, kScratchRegister);
}
@@ -5378,11 +5316,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
index e05b310dec..eafdc778ad 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
-#define V8_X64_LITHIUM_CODEGEN_X64_H_
+#ifndef V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
+#define V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
-#include "src/x64/lithium-x64.h"
#include "src/base/logging.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
+#include "src/crankshaft/x64/lithium-x64.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
-#include "src/x64/lithium-gap-resolver-x64.h"
namespace v8 {
namespace internal {
@@ -387,6 +387,7 @@ class LDeferredCode: public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X64_LITHIUM_CODEGEN_X64_H_
+#endif // V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
index 800fb3f61c..cc65f188ce 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
@@ -4,8 +4,9 @@
#if V8_TARGET_ARCH_X64
-#include "src/x64/lithium-codegen-x64.h"
-#include "src/x64/lithium-gap-resolver-x64.h"
+#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
+
+#include "src/crankshaft/x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
@@ -189,10 +190,10 @@ void LGapResolver::EmitMove(int index) {
uint64_t int_val = bit_cast<uint64_t, double>(v);
XMMRegister dst = cgen_->ToDoubleRegister(destination);
if (int_val == 0) {
- __ xorps(dst, dst);
+ __ Xorpd(dst, dst);
} else {
__ Set(kScratchRegister, int_val);
- __ movq(dst, kScratchRegister);
+ __ Movq(dst, kScratchRegister);
}
} else {
DCHECK(destination->IsStackSlot());
@@ -211,19 +212,19 @@ void LGapResolver::EmitMove(int index) {
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
- __ movaps(cgen_->ToDoubleRegister(destination), src);
+ __ Movapd(cgen_->ToDoubleRegister(destination), src);
} else {
DCHECK(destination->IsDoubleStackSlot());
- __ movsd(cgen_->ToOperand(destination), src);
+ __ Movsd(cgen_->ToOperand(destination), src);
}
} else if (source->IsDoubleStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
- __ movsd(cgen_->ToDoubleRegister(destination), src);
+ __ Movsd(cgen_->ToDoubleRegister(destination), src);
} else {
DCHECK(destination->IsDoubleStackSlot());
- __ movsd(xmm0, src);
- __ movsd(cgen_->ToOperand(destination), xmm0);
+ __ Movsd(xmm0, src);
+ __ Movsd(cgen_->ToOperand(destination), xmm0);
}
} else {
UNREACHABLE();
@@ -261,18 +262,18 @@ void LGapResolver::EmitSwap(int index) {
// Swap two stack slots or two double stack slots.
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
- __ movsd(xmm0, src);
+ __ Movsd(xmm0, src);
__ movp(kScratchRegister, dst);
- __ movsd(dst, xmm0);
+ __ Movsd(dst, xmm0);
__ movp(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
- __ movaps(xmm0, source_reg);
- __ movaps(source_reg, destination_reg);
- __ movaps(destination_reg, xmm0);
+ __ Movapd(xmm0, source_reg);
+ __ Movapd(source_reg, destination_reg);
+ __ Movapd(destination_reg, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
@@ -284,9 +285,9 @@ void LGapResolver::EmitSwap(int index) {
LOperand* other = source->IsDoubleRegister() ? destination : source;
DCHECK(other->IsDoubleStackSlot());
Operand other_operand = cgen_->ToOperand(other);
- __ movsd(xmm0, other_operand);
- __ movsd(other_operand, reg);
- __ movaps(reg, xmm0);
+ __ Movapd(xmm0, reg);
+ __ Movsd(reg, other_operand);
+ __ Movsd(other_operand, xmm0);
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.h b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h
index 7882da56e0..641f0ee69f 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#ifndef V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#define V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -44,6 +44,7 @@ class LGapResolver final BASE_EMBEDDED {
ZoneList<LMoveOperands> moves_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#endif // V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
index 9df3a7dabf..76df55dcef 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/x64/lithium-x64.h"
+#include "src/crankshaft/x64/lithium-x64.h"
#include <sstream>
#if V8_TARGET_ARCH_X64
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/x64/lithium-codegen-x64.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
@@ -367,11 +367,6 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -390,12 +385,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -470,14 +459,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -2100,15 +2088,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2224,7 +2203,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2232,7 +2211,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
(IsDoubleOrFloatElementsKind(elements_kind))));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2304,7 +2285,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK(
@@ -2328,7 +2309,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
: UseRegisterOrConstantAtStart(instr->key());
}
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2468,19 +2450,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left = UseFixed(instr->left(), rdx);
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/crankshaft/x64/lithium-x64.h
index 6129516515..74709d3316 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_LITHIUM_X64_H_
-#define V8_X64_LITHIUM_X64_H_
+#ifndef V8_CRANKSHAFT_X64_LITHIUM_X64_H_
+#define V8_CRANKSHAFT_X64_LITHIUM_X64_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -100,7 +100,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -141,7 +140,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1596,11 +1594,12 @@ inline static bool ExternalArrayOpRequiresTemp(
}
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
@@ -1611,6 +1610,7 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
void PrintDataTo(StringStream* stream) override;
uint32_t base_offset() const { return hydrogen()->base_offset(); }
ElementsKind elements_kind() const {
@@ -1660,22 +1660,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2167,34 +2151,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2203,6 +2167,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
@@ -2864,6 +2829,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::int
+} // namespace internal
+} // namespace v8
-#endif // V8_X64_LITHIUM_X64_H_
+#endif // V8_CRANKSHAFT_X64_LITHIUM_X64_H_
diff --git a/deps/v8/src/crankshaft/x87/OWNERS b/deps/v8/src/crankshaft/x87/OWNERS
new file mode 100644
index 0000000000..dd9998b261
--- /dev/null
+++ b/deps/v8/src/crankshaft/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/deps/v8/src/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index 921259e964..074628b5ef 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -4,17 +4,18 @@
#if V8_TARGET_ARCH_X87
+#include "src/crankshaft/x87/lithium-codegen-x87.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/deoptimizer.h"
-#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
#include "src/x87/frames-x87.h"
-#include "src/x87/lithium-codegen-x87.h"
namespace v8 {
namespace internal {
@@ -106,26 +107,6 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(ecx, Operand(esp, receiver_offset));
-
- __ cmp(ecx, isolate()->factory()->undefined_value());
- __ j(not_equal, &ok, Label::kNear);
-
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
- __ mov(Operand(esp, receiver_offset), ecx);
-
- __ bind(&ok);
- }
-
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
__ Move(edx, Immediate(kNoAlignmentPadding));
@@ -161,7 +142,6 @@ bool LCodeGen::GeneratePrologue() {
} else {
__ Prologue(info()->IsCodePreAgingActive());
}
- info()->AddNoFrameRange(0, masm_->pc_offset());
}
if (info()->IsOptimizing() &&
@@ -518,13 +498,13 @@ bool LCodeGen::GenerateSafepointTable() {
}
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
}
-X87Register LCodeGen::ToX87Register(int index) const {
- return X87Register::FromAllocationIndex(index);
+X87Register LCodeGen::ToX87Register(int code) const {
+ return X87Register::from_code(code);
}
@@ -700,7 +680,7 @@ void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
DCHECK(is_mutable_);
// Assert the reg is prepared to write, but not on the virtual stack yet
DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
- stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
+ stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
stack_depth_++;
}
@@ -2982,11 +2962,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
- no_frame_start = masm_->pc_offset();
}
if (dynamic_frame_alignment_) {
Label no_padding;
@@ -2998,9 +2976,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
EmitReturn(instr, false);
- if (no_frame_start != -1) {
- info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -3015,7 +2990,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
}
@@ -3029,7 +3004,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
}
@@ -3050,24 +3025,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3504,7 +3461,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ mov(receiver, Operand(receiver, global_offset));
- const int proxy_offset = GlobalObject::kGlobalProxyOffset;
+ const int proxy_offset = JSGlobalObject::kGlobalProxyOffset;
__ mov(receiver, FieldOperand(receiver, proxy_offset));
__ bind(&receiver_ok);
}
@@ -4204,7 +4161,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -4218,15 +4175,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ mov(vector_register, vector);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ Set(eax, arity);
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4430,30 +4384,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
if (instr->index()->IsConstantOperand()) {
@@ -4925,7 +4855,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5805,11 +5736,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
diff --git a/deps/v8/src/x87/lithium-codegen-x87.h b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
index 2da1a31461..821eb822a5 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X87_LITHIUM_CODEGEN_X87_H_
-#define V8_X87_LITHIUM_CODEGEN_X87_H_
+#ifndef V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
+#define V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
#include <map>
-#include "src/x87/lithium-x87.h"
#include "src/base/logging.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
+#include "src/crankshaft/x87/lithium-x87.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
-#include "src/x87/lithium-gap-resolver-x87.h"
namespace v8 {
namespace internal {
@@ -499,6 +499,7 @@ class LDeferredCode : public ZoneObject {
LCodeGen::X87Stack x87_stack_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X87_LITHIUM_CODEGEN_X87_H_
+#endif // V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
diff --git a/deps/v8/src/x87/lithium-gap-resolver-x87.cc b/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc
index edafcb2b16..aa9183541f 100644
--- a/deps/v8/src/x87/lithium-gap-resolver-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_X87
-#include "src/x87/lithium-codegen-x87.h"
-#include "src/x87/lithium-gap-resolver-x87.h"
+#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
+#include "src/register-configuration.h"
+
+#include "src/crankshaft/x87/lithium-codegen-x87.h"
namespace v8 {
namespace internal {
@@ -165,10 +167,14 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
- int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
- return Register::FromAllocationIndex(i);
+ int skip_index = reg.is(no_reg) ? -1 : reg.code();
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
+ code != skip_index) {
+ return Register::from_code(code);
}
}
return no_reg;
@@ -178,10 +184,12 @@ Register LGapResolver::GetFreeRegisterNot(Register reg) {
bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
-
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] != 0) return false;
- if (destination_uses_[i] != 0) return false;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] != 0) return false;
+ if (destination_uses_[code] != 0) return false;
}
return true;
}
@@ -204,7 +212,7 @@ void LGapResolver::Verify() {
void LGapResolver::Finish() {
if (spilled_register_ >= 0) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
+ __ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
moves_.Rewind(0);
@@ -213,7 +221,7 @@ void LGapResolver::Finish() {
void LGapResolver::EnsureRestored(LOperand* operand) {
if (operand->IsRegister() && operand->index() == spilled_register_) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
+ __ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
}
@@ -222,7 +230,7 @@ void LGapResolver::EnsureRestored(LOperand* operand) {
Register LGapResolver::EnsureTempRegister() {
// 1. We may have already spilled to create a temp register.
if (spilled_register_ >= 0) {
- return Register::FromAllocationIndex(spilled_register_);
+ return Register::from_code(spilled_register_);
}
// 2. We may have a free register that we can use without spilling.
@@ -231,19 +239,22 @@ Register LGapResolver::EnsureTempRegister() {
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
- Register scratch = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
+ Register scratch = Register::from_code(code);
__ push(scratch);
- spilled_register_ = i;
+ spilled_register_ = code;
return scratch;
}
}
// 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
- Register scratch = Register::FromAllocationIndex(0);
+ spilled_register_ = config->GetAllocatableGeneralCode(0);
+ Register scratch = Register::from_code(spilled_register_);
__ push(scratch);
- spilled_register_ = 0;
return scratch;
}
diff --git a/deps/v8/src/x87/lithium-gap-resolver-x87.h b/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h
index cdd26b8776..6b6e2e64b6 100644
--- a/deps/v8/src/x87/lithium-gap-resolver-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
-#define V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
+#ifndef V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
+#define V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -72,14 +72,15 @@ class LGapResolver final BASE_EMBEDDED {
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kMaxNumAllocatableRegisters];
- int destination_uses_[Register::kMaxNumAllocatableRegisters];
+ int source_uses_[Register::kNumRegisters];
+ int destination_uses_[DoubleRegister::kMaxNumRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
int spilled_register_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
+#endif // V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
diff --git a/deps/v8/src/x87/lithium-x87.cc b/deps/v8/src/crankshaft/x87/lithium-x87.cc
index cb429b2f21..72b0797ea9 100644
--- a/deps/v8/src/x87/lithium-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/x87/lithium-x87.h"
+#include "src/crankshaft/x87/lithium-x87.h"
#include <sstream>
#if V8_TARGET_ARCH_X87
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/x87/lithium-codegen-x87.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/x87/lithium-codegen-x87.h"
namespace v8 {
namespace internal {
@@ -383,11 +383,6 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -406,12 +401,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -493,14 +482,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(X87Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- X87Register::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -2135,15 +2123,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2222,7 +2201,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2230,7 +2209,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
(IsDoubleOrFloatElementsKind(instr->elements_kind()))));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2301,7 +2282,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
? NULL
: UseRegisterAtStart(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
} else {
DCHECK(instr->value()->representation().IsSmiOrTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2316,7 +2297,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseRegisterOrConstantAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
}
- return new(zone()) LStoreKeyed(obj, key, val);
+ return new (zone()) LStoreKeyed(obj, key, val, nullptr);
}
}
@@ -2335,7 +2316,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2482,19 +2464,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
diff --git a/deps/v8/src/x87/lithium-x87.h b/deps/v8/src/crankshaft/x87/lithium-x87.h
index cc1a43fbaf..f0f694ef3d 100644
--- a/deps/v8/src/x87/lithium-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X87_LITHIUM_X87_H_
-#define V8_X87_LITHIUM_X87_H_
+#ifndef V8_CRANKSHAFT_X87_LITHIUM_X87_H_
+#define V8_CRANKSHAFT_X87_LITHIUM_X87_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -104,7 +104,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -146,7 +145,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1616,14 +1614,16 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1695,22 +1695,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2199,34 +2183,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
+ LOperand* backing_store_owner) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2235,6 +2199,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2897,6 +2862,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X87_LITHIUM_X87_H_
+#endif // V8_CRANKSHAFT_X87_LITHIUM_X87_H_
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index 104bc940a6..9f806724d5 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -109,7 +109,7 @@
'variables': {
'js_files': [
'd8.js',
- 'macros.py',
+ 'js/macros.py',
],
},
'conditions': [
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index 813d3126ed..813e5b83c0 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -276,6 +276,7 @@ class DateCache {
base::TimezoneCache* tz_cache_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index e70c34a831..c1afb7d1b6 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -329,6 +329,7 @@ DateParser::DateToken DateParser::ParseES5DateTime(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DATEPARSER_INL_H_
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 616318db29..484e1d3eca 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -364,6 +364,7 @@ class DateParser : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DATEPARSER_H_
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index c4c288148c..ad54247417 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -206,7 +206,7 @@ int DebugFrameHelper::FindIndexedNonNativeFrame(JavaScriptFrameIterator* it,
it->frame()->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
// Omit functions from native and extension scripts.
- if (!frames[i].function()->IsSubjectToDebugging()) continue;
+ if (!frames[i].function()->shared()->IsSubjectToDebugging()) continue;
if (++count == index) return i;
}
}
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index e8ef240393..99d96404d1 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -75,8 +75,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
context_ = Handle<Context>(context_->previous(), isolate_);
}
}
- if (scope_info->scope_type() == FUNCTION_SCOPE ||
- scope_info->scope_type() == ARROW_SCOPE) {
+ if (scope_info->scope_type() == FUNCTION_SCOPE) {
nested_scope_chain_.Add(scope_info);
}
} else {
@@ -86,8 +85,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// Check whether we are in global, eval or function code.
Zone zone;
- if (scope_info->scope_type() != FUNCTION_SCOPE &&
- scope_info->scope_type() != ARROW_SCOPE) {
+ if (scope_info->scope_type() != FUNCTION_SCOPE) {
// Global or eval code.
ParseInfo info(&zone, script);
if (scope_info->scope_type() == SCRIPT_SCOPE) {
@@ -119,7 +117,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
context_(function->context()),
seen_script_scope_(false),
failed_(false) {
- if (!function->IsSubjectToDebugging()) context_ = Handle<Context>();
+ if (!function->shared()->IsSubjectToDebugging()) context_ = Handle<Context>();
}
@@ -132,6 +130,12 @@ MUST_USE_RESULT MaybeHandle<JSObject> ScopeIterator::MaterializeScopeDetails() {
Handle<JSObject> scope_object;
ASSIGN_RETURN_ON_EXCEPTION(isolate_, scope_object, ScopeObject(), JSObject);
details->set(kScopeDetailsObjectIndex, *scope_object);
+ if (HasContext() && CurrentContext()->closure() != NULL) {
+ Handle<String> closure_name = JSFunction::GetDebugName(
+ Handle<JSFunction>(CurrentContext()->closure()));
+ if (!closure_name.is_null() && (closure_name->length() != 0))
+ details->set(kScopeDetailsNameIndex, *closure_name);
+ }
return isolate_->factory()->NewJSArrayWithElements(details);
}
@@ -177,7 +181,6 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
switch (scope_info->scope_type()) {
case FUNCTION_SCOPE:
- case ARROW_SCOPE:
DCHECK(context_->IsFunctionContext() || !scope_info->HasContext());
return ScopeTypeLocal;
case MODULE_SCOPE:
@@ -200,7 +203,7 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
}
}
if (context_->IsNativeContext()) {
- DCHECK(context_->global_object()->IsGlobalObject());
+ DCHECK(context_->global_object()->IsJSGlobalObject());
// If we are at the native context and have not yet seen script scope,
// fake it.
return seen_script_scope_ ? ScopeTypeGlobal : ScopeTypeScript;
@@ -401,7 +404,7 @@ void ScopeIterator::RetrieveScopeChain(Scope* scope,
MaybeHandle<JSObject> ScopeIterator::MaterializeScriptScope() {
- Handle<GlobalObject> global(CurrentContext()->global_object());
+ Handle<JSGlobalObject> global(CurrentContext()->global_object());
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 20cd0336dc..6e5c459037 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -30,7 +30,8 @@ class ScopeIterator {
static const int kScopeDetailsTypeIndex = 0;
static const int kScopeDetailsObjectIndex = 1;
- static const int kScopeDetailsSize = 2;
+ static const int kScopeDetailsNameIndex = 2;
+ static const int kScopeDetailsSize = 3;
ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
bool ignore_nested_scopes = false);
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 4f23555d28..e41c508f44 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -38,10 +38,12 @@ Debug::Debug(Isolate* isolate)
is_suppressed_(false),
live_edit_enabled_(true), // TODO(yangguo): set to false by default.
break_disabled_(false),
+ break_points_active_(true),
in_debug_event_listener_(false),
break_on_exception_(false),
break_on_uncaught_exception_(false),
debug_info_list_(NULL),
+ feature_tracker_(isolate),
isolate_(isolate) {
ThreadInit();
}
@@ -315,6 +317,15 @@ Handle<Object> BreakLocation::BreakPointObjects() const {
}
+void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
+ uint32_t mask = 1 << feature;
+ // Only count one sample per feature and isolate.
+ if (bitfield_ & mask) return;
+ isolate_->counters()->debug_feature_usage()->AddSample(feature);
+ bitfield_ |= mask;
+}
+
+
// Threading support.
void Debug::ThreadInit() {
thread_local_.break_count_ = 0;
@@ -395,6 +406,9 @@ bool Debug::Load() {
debug_context_ = Handle<Context>::cast(
isolate_->global_handles()->Create(*context));
+
+ feature_tracker()->Track(DebugFeatureTracker::kActive);
+
return true;
}
@@ -457,7 +471,7 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
// If there is one or more real break points check whether any of these are
// triggered.
Handle<Object> break_points_hit(heap->undefined_value(), isolate_);
- if (break_location.HasBreakPoint()) {
+ if (break_points_active_ && break_location.HasBreakPoint()) {
Handle<Object> break_point_objects = break_location.BreakPointObjects();
break_points_hit = CheckBreakPoints(break_point_objects);
}
@@ -574,7 +588,7 @@ MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
Handle<JSFunction> fun = Handle<JSFunction>::cast(
Object::GetProperty(isolate_, holder, name, STRICT).ToHandleChecked());
Handle<Object> undefined = isolate_->factory()->undefined_value();
- return Execution::TryCall(fun, undefined, argc, args);
+ return Execution::TryCall(isolate_, fun, undefined, argc, args);
}
@@ -624,6 +638,8 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
*source_position = location.statement_position();
location.SetBreakPoint(break_point_object);
+ feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
+
// At least one active break point now.
return debug_info->GetBreakPointCount() > 0;
}
@@ -665,6 +681,8 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
debug_info, ALL_BREAK_LOCATIONS, position, alignment);
location.SetBreakPoint(break_point_object);
+ feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
+
position = (alignment == STATEMENT_ALIGNED) ? location.statement_position()
: location.position();
@@ -746,9 +764,8 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function,
void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
- Handle<FixedArray> new_bindings(function->function_bindings());
- Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex),
- isolate_);
+ Handle<BindingsArray> new_bindings(function->function_bindings());
+ Handle<Object> bindee(new_bindings->bound_function(), isolate_);
if (!bindee.is_null() && bindee->IsJSFunction()) {
Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
@@ -874,6 +891,8 @@ void Debug::PrepareStep(StepAction step_action,
JavaScriptFrameIterator frames_it(isolate_, id);
JavaScriptFrame* frame = frames_it.frame();
+ feature_tracker()->Track(DebugFeatureTracker::kStepping);
+
// First of all ensure there is one-shot break points in the top handler
// if any.
FloodHandlerWithOneShot();
@@ -923,7 +942,7 @@ void Debug::PrepareStep(StepAction step_action,
}
// Skip native and extension functions on the stack.
while (!frames_it.done() &&
- !frames_it.frame()->function()->IsSubjectToDebugging()) {
+ !frames_it.frame()->function()->shared()->IsSubjectToDebugging()) {
frames_it.Advance();
}
// Step out: If there is a JavaScript caller frame, we need to
@@ -1305,8 +1324,16 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
List<Handle<JSFunction> > functions;
List<Handle<JSGeneratorObject> > suspended_generators;
- if (!shared->optimized_code_map()->IsSmi()) {
- shared->ClearOptimizedCodeMap();
+ // Flush all optimized code maps. Note that the below heap iteration does not
+ // cover this, because the given function might have been inlined into code
+ // for which no JSFunction exists.
+ {
+ SharedFunctionInfo::Iterator iterator(isolate_);
+ while (SharedFunctionInfo* shared = iterator.Next()) {
+ if (!shared->optimized_code_map()->IsSmi()) {
+ shared->ClearOptimizedCodeMap();
+ }
+ }
}
// Make sure we abort incremental marking.
@@ -1503,7 +1530,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
// Make sure IC state is clean. This is so that we correctly flood
// accessor pairs when stepping in.
shared->code()->ClearInlineCaches();
- shared->feedback_vector()->ClearICSlots(*shared);
+ shared->ClearTypeFeedbackInfo();
// Create the debug info object.
DCHECK(shared->HasDebugCode());
@@ -1596,7 +1623,7 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
}
-bool Debug::IsDebugGlobal(GlobalObject* global) {
+bool Debug::IsDebugGlobal(JSGlobalObject* global) {
return is_loaded() && global == debug_context()->global_object();
}
@@ -1943,7 +1970,7 @@ void Debug::CallEventCallback(v8::DebugEvent event,
event_data,
event_listener_data_ };
Handle<JSReceiver> global(isolate_->global_proxy());
- Execution::TryCall(Handle<JSFunction>::cast(event_listener_),
+ Execution::TryCall(isolate_, Handle<JSFunction>::cast(event_listener_),
global, arraysize(argv), argv);
}
in_debug_event_listener_ = previous;
@@ -2089,7 +2116,7 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
Handle<String> answer;
MaybeHandle<Object> maybe_exception;
MaybeHandle<Object> maybe_result =
- Execution::TryCall(process_debug_request, cmd_processor, 1,
+ Execution::TryCall(isolate_, process_debug_request, cmd_processor, 1,
request_args, &maybe_exception);
if (maybe_result.ToHandle(&answer_value)) {
@@ -2208,7 +2235,7 @@ void Debug::EnqueueCommandMessage(Vector<const uint16_t> command,
}
-MaybeHandle<Object> Debug::Call(Handle<JSFunction> fun, Handle<Object> data) {
+MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
DebugScope debug_scope(this);
if (debug_scope.failed()) return isolate_->factory()->undefined_value();
@@ -2244,8 +2271,9 @@ void Debug::HandleDebugBreak() {
Object* fun = it.frame()->function();
if (fun && fun->IsJSFunction()) {
// Don't stop in builtin functions.
- if (!JSFunction::cast(fun)->IsSubjectToDebugging()) return;
- GlobalObject* global = JSFunction::cast(fun)->context()->global_object();
+ if (!JSFunction::cast(fun)->shared()->IsSubjectToDebugging()) return;
+ JSGlobalObject* global =
+ JSFunction::cast(fun)->context()->global_object();
// Don't stop in debugger functions.
if (IsDebugGlobal(global)) return;
}
@@ -2419,7 +2447,7 @@ v8::Local<v8::String> MessageImpl::GetJSON() const {
}
MaybeHandle<Object> maybe_json =
- Execution::TryCall(Handle<JSFunction>::cast(fun), event_data_, 0, NULL);
+ Execution::TryCall(isolate, fun, event_data_, 0, NULL);
Handle<Object> json;
if (!maybe_json.ToHandle(&json) || !json->IsString()) {
return v8::Local<v8::String>();
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 640355a7e6..c24789d376 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -343,6 +343,28 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
};
+class DebugFeatureTracker {
+ public:
+ enum Feature {
+ kActive = 1,
+ kBreakPoint = 2,
+ kStepping = 3,
+ kHeapSnapshot = 4,
+ kAllocationTracking = 5,
+ kProfiler = 6,
+ kLiveEdit = 7,
+ };
+
+ explicit DebugFeatureTracker(Isolate* isolate)
+ : isolate_(isolate), bitfield_(0) {}
+ void Track(Feature feature);
+
+ private:
+ Isolate* isolate_;
+ uint32_t bitfield_;
+};
+
+
// This class contains the debugger support. The main purpose is to handle
// setting break points in the code.
//
@@ -368,7 +390,7 @@ class Debug {
void SetMessageHandler(v8::Debug::MessageHandler handler);
void EnqueueCommandMessage(Vector<const uint16_t> command,
v8::Debug::ClientData* client_data = NULL);
- MUST_USE_RESULT MaybeHandle<Object> Call(Handle<JSFunction> fun,
+ MUST_USE_RESULT MaybeHandle<Object> Call(Handle<Object> fun,
Handle<Object> data);
Handle<Context> GetDebugContext();
void HandleDebugBreak();
@@ -441,7 +463,7 @@ class Debug {
BreakPositionAlignment position_aligment);
// Check whether a global object is the debug global object.
- bool IsDebugGlobal(GlobalObject* global);
+ bool IsDebugGlobal(JSGlobalObject* global);
// Check whether this frame is just about to return.
bool IsBreakAtReturn(JavaScriptFrame* frame);
@@ -482,7 +504,7 @@ class Debug {
inline bool in_debug_scope() const {
return !!base::NoBarrier_Load(&thread_local_.current_debug_scope_);
}
- void set_disable_break(bool v) { break_disabled_ = v; }
+ void set_break_points_active(bool v) { break_points_active_ = v; }
StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
int break_id() { return thread_local_.break_id_; }
@@ -507,6 +529,8 @@ class Debug {
StepAction last_step_action() { return thread_local_.last_step_action_; }
+ DebugFeatureTracker* feature_tracker() { return &feature_tracker_; }
+
private:
explicit Debug(Isolate* isolate);
@@ -592,8 +616,8 @@ class Debug {
bool is_active_;
bool is_suppressed_;
bool live_edit_enabled_;
- bool has_break_points_;
bool break_disabled_;
+ bool break_points_active_;
bool in_debug_event_listener_;
bool break_on_exception_;
bool break_on_uncaught_exception_;
@@ -605,6 +629,9 @@ class Debug {
// before returning to the DebugBreakCallHelper.
Address after_break_target_;
+ // Used to collect histogram data on debugger feature usage.
+ DebugFeatureTracker feature_tracker_;
+
// Per-thread data.
class ThreadLocal {
public:
@@ -763,6 +790,7 @@ class DebugCodegen : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DEBUG_DEBUG_H_
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 2e51d43088..50bd0a9c06 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -15,22 +15,20 @@ var IsNaN = global.isNaN;
var JSONParse = global.JSON.parse;
var JSONStringify = global.JSON.stringify;
var LookupMirror = global.LookupMirror;
+var MakeError;
+var MakeTypeError;
var MakeMirror = global.MakeMirror;
var MakeMirrorSerializer = global.MakeMirrorSerializer;
var MathMin = global.Math.min;
var Mirror = global.Mirror;
var MirrorType;
var ParseInt = global.parseInt;
-var ToBoolean;
-var ToNumber;
-var ToString;
var ValueMirror = global.ValueMirror;
utils.Import(function(from) {
+ MakeError = from.MakeError;
+ MakeTypeError = from.MakeTypeError;
MirrorType = from.MirrorType;
- ToBoolean = from.ToBoolean;
- ToNumber = from.ToNumber;
- ToString = from.ToString;
});
//----------------------------------------------------------------------------
@@ -106,7 +104,7 @@ var debugger_flags = {
getValue: function() { return this.value; },
setValue: function(value) {
this.value = !!value;
- %SetDisableBreak(!this.value);
+ %SetBreakPointsActive(this.value);
}
},
breakOnCaughtException: {
@@ -234,7 +232,7 @@ BreakPoint.prototype.isTriggered = function(exec_state) {
try {
var mirror = exec_state.frame(0).evaluate(this.condition());
// If no sensible mirror or non true value break point not triggered.
- if (!(mirror instanceof ValueMirror) || !ToBoolean(mirror.value_)) {
+ if (!(mirror instanceof ValueMirror) || !mirror.value_) {
return false;
}
} catch (e) {
@@ -950,8 +948,8 @@ function ExecutionState(break_id) {
ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
opt_callframe) {
var action = Debug.StepAction.StepIn;
- if (!IS_UNDEFINED(opt_action)) action = ToNumber(opt_action);
- var count = opt_count ? ToNumber(opt_count) : 1;
+ if (!IS_UNDEFINED(opt_action)) action = TO_NUMBER(opt_action);
+ var count = opt_count ? TO_NUMBER(opt_count) : 1;
var callFrameId = 0;
if (!IS_UNDEFINED(opt_callframe)) {
callFrameId = opt_callframe.details_.frameId();
@@ -963,7 +961,7 @@ ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
opt_additional_context) {
return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
- ToBoolean(disable_break),
+ TO_BOOLEAN(disable_break),
opt_additional_context));
};
@@ -985,7 +983,7 @@ ExecutionState.prototype.frame = function(opt_index) {
};
ExecutionState.prototype.setSelectedFrame = function(index) {
- var i = ToNumber(index);
+ var i = TO_NUMBER(index);
if (i < 0 || i >= this.frameCount()) {
throw MakeTypeError(kDebuggerFrame);
}
@@ -1421,7 +1419,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
var key = request.command.toLowerCase();
var handler = DebugCommandProcessor.prototype.dispatch_[key];
if (IS_FUNCTION(handler)) {
- %_CallFunction(this, request, response, handler);
+ %_Call(handler, this, request, response);
} else {
throw MakeError(kDebugger,
'Unknown command "' + request.command + '" in request');
@@ -1432,7 +1430,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
response = this.createResponse();
}
response.success = false;
- response.message = ToString(e);
+ response.message = TO_STRING(e);
}
// Return the response as a JSON encoded string.
@@ -1449,7 +1447,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
'"request_seq":' + request.seq + ',' +
'"type":"response",' +
'"success":false,' +
- '"message":"Internal error: ' + ToString(e) + '"}';
+ '"message":"Internal error: ' + TO_STRING(e) + '"}';
}
} catch (e) {
// Failed in one of the catch blocks above - most generic error.
@@ -1470,7 +1468,7 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
// Get the stepcount argument if any.
if (stepcount) {
- count = ToNumber(stepcount);
+ count = TO_NUMBER(stepcount);
if (count < 0) {
throw MakeError(kDebugger,
'Invalid stepcount argument "' + stepcount + '".');
@@ -1545,7 +1543,7 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
// Find the function through a global evaluate.
f = this.exec_state_.evaluateGlobal(target).value();
} catch (e) {
- response.failed('Error: "' + ToString(e) +
+ response.failed('Error: "' + TO_STRING(e) +
'" evaluating "' + target + '"');
return;
}
@@ -1634,7 +1632,7 @@ DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
}
// Pull out arguments.
- var break_point = ToNumber(request.arguments.breakpoint);
+ var break_point = TO_NUMBER(request.arguments.breakpoint);
var enabled = request.arguments.enabled;
var condition = request.arguments.condition;
var ignoreCount = request.arguments.ignoreCount;
@@ -1710,7 +1708,7 @@ DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
}
// Pull out arguments.
- var break_point = ToNumber(request.arguments.breakpoint);
+ var break_point = TO_NUMBER(request.arguments.breakpoint);
// Check for legal arguments.
if (!break_point) {
@@ -1968,7 +1966,7 @@ DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// With no scope argument just return top scope.
var scope_index = 0;
if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
- scope_index = ToNumber(request.arguments.number);
+ scope_index = TO_NUMBER(request.arguments.number);
if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
return response.failed('Invalid scope number');
}
@@ -1992,11 +1990,11 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
return value_mirror.value();
} else if ("stringDescription" in value_description) {
if (value_description.type == MirrorType.BOOLEAN_TYPE) {
- return ToBoolean(value_description.stringDescription);
+ return TO_BOOLEAN(value_description.stringDescription);
} else if (value_description.type == MirrorType.NUMBER_TYPE) {
- return ToNumber(value_description.stringDescription);
+ return TO_NUMBER(value_description.stringDescription);
} if (value_description.type == MirrorType.STRING_TYPE) {
- return ToString(value_description.stringDescription);
+ return TO_STRING(value_description.stringDescription);
} else {
throw MakeError(kDebugger, "Unknown type");
}
@@ -2032,7 +2030,7 @@ DebugCommandProcessor.prototype.setVariableValueRequest_ =
if (IS_UNDEFINED(scope_description.number)) {
response.failed('Missing scope number');
}
- var scope_index = ToNumber(scope_description.number);
+ var scope_index = TO_NUMBER(scope_description.number);
var scope = scope_holder.scope(scope_index);
@@ -2064,7 +2062,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
// The expression argument could be an integer so we convert it to a
// string.
try {
- expression = ToString(expression);
+ expression = TO_STRING(expression);
} catch(e) {
return response.failed('Failed to convert expression argument to string');
}
@@ -2094,7 +2092,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (global) {
// Evaluate in the native context.
response.body = this.exec_state_.evaluateGlobal(
- expression, ToBoolean(disable_break), additional_context_object);
+ expression, TO_BOOLEAN(disable_break), additional_context_object);
return;
}
@@ -2110,18 +2108,18 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
// Check whether a frame was specified.
if (!IS_UNDEFINED(frame)) {
- var frame_number = ToNumber(frame);
+ var frame_number = TO_NUMBER(frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
// Evaluate in the specified frame.
response.body = this.exec_state_.frame(frame_number).evaluate(
- expression, ToBoolean(disable_break), additional_context_object);
+ expression, TO_BOOLEAN(disable_break), additional_context_object);
return;
} else {
// Evaluate in the selected frame.
response.body = this.exec_state_.frame().evaluate(
- expression, ToBoolean(disable_break), additional_context_object);
+ expression, TO_BOOLEAN(disable_break), additional_context_object);
return;
}
};
@@ -2142,7 +2140,7 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
// Set 'includeSource' option for script lookup.
if (!IS_UNDEFINED(request.arguments.includeSource)) {
- var includeSource = ToBoolean(request.arguments.includeSource);
+ var includeSource = TO_BOOLEAN(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
@@ -2210,7 +2208,7 @@ DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
to_line = request.arguments.toLine;
if (!IS_UNDEFINED(request.arguments.frame)) {
- var frame_number = ToNumber(request.arguments.frame);
+ var frame_number = TO_NUMBER(request.arguments.frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
@@ -2246,7 +2244,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
if (request.arguments) {
// Pull out arguments.
if (!IS_UNDEFINED(request.arguments.types)) {
- types = ToNumber(request.arguments.types);
+ types = TO_NUMBER(request.arguments.types);
if (IsNaN(types) || types < 0) {
return response.failed('Invalid types "' +
request.arguments.types + '"');
@@ -2254,7 +2252,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
}
if (!IS_UNDEFINED(request.arguments.includeSource)) {
- includeSource = ToBoolean(request.arguments.includeSource);
+ includeSource = TO_BOOLEAN(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
@@ -2269,7 +2267,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
var filterStr = null;
var filterNum = null;
if (!IS_UNDEFINED(request.arguments.filter)) {
- var num = ToNumber(request.arguments.filter);
+ var num = TO_NUMBER(request.arguments.filter);
if (!IsNaN(num)) {
filterNum = num;
}
@@ -2405,7 +2403,7 @@ DebugCommandProcessor.prototype.restartFrameRequest_ = function(
var frame_mirror;
// Check whether a frame was specified.
if (!IS_UNDEFINED(frame)) {
- var frame_number = ToNumber(frame);
+ var frame_number = TO_NUMBER(frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 8a936ac177..7e991b62bc 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -910,7 +910,7 @@ class ReplacingVisitor : public ObjectVisitor {
: original_(original), substitution_(substitution) {
}
- virtual void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
if (*p == original_) {
*p = substitution_;
@@ -918,14 +918,14 @@ class ReplacingVisitor : public ObjectVisitor {
}
}
- virtual void VisitCodeEntry(Address entry) {
+ void VisitCodeEntry(Address entry) override {
if (Code::GetObjectFromEntryAddress(entry) == original_) {
Address substitution_entry = substitution_->instruction_start();
Memory::Address_at(entry) = substitution_entry;
}
}
- virtual void VisitCodeTarget(RelocInfo* rinfo) {
+ void VisitCodeTarget(RelocInfo* rinfo) override {
if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
Address substitution_entry = substitution_->instruction_start();
@@ -933,9 +933,7 @@ class ReplacingVisitor : public ObjectVisitor {
}
}
- virtual void VisitDebugTarget(RelocInfo* rinfo) {
- VisitCodeTarget(rinfo);
- }
+ void VisitDebugTarget(RelocInfo* rinfo) override { VisitCodeTarget(rinfo); }
private:
Code* original_;
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 251368f0cb..29fe60579f 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -364,6 +364,7 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
friend class JSArrayBasedStruct<SharedInfoWrapper>;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif /* V8_DEBUG_LIVEEDIT_H_ */
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 11f9e485c1..5ff3e34955 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -8,22 +8,30 @@
// ----------------------------------------------------------------------------
// Imports
+var ErrorToString;
var FunctionSourceString;
var GlobalArray = global.Array;
var IsNaN = global.isNaN;
var JSONStringify = global.JSON.stringify;
+var MakeError;
+var MapEntries;
+var MapIteratorNext;
var MathMin = global.Math.min;
var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var SetIteratorNext;
+var SetValues;
var SymbolToString;
-var ToBoolean;
-var ToString;
utils.Import(function(from) {
+ ErrorToString = from.ErrorToString;
FunctionSourceString = from.FunctionSourceString;
+ MakeError = from.MakeError;
+ MapEntries = from.MapEntries;
+ MapIteratorNext = from.MapIteratorNext;
+ SetIteratorNext = from.SetIteratorNext;
+ SetValues = from.SetValues;
SymbolToString = from.SymbolToString;
- ToBoolean = from.ToBoolean;
- ToString = from.ToString;
});
// ----------------------------------------------------------------------------
@@ -536,7 +544,7 @@ Mirror.prototype.toText = function() {
* @extends Mirror
*/
function ValueMirror(type, value, transient) {
- %_CallFunction(this, type, Mirror);
+ %_Call(Mirror, this, type);
this.value_ = value;
if (!transient) {
this.allocateHandle_();
@@ -582,7 +590,7 @@ ValueMirror.prototype.value = function() {
* @extends ValueMirror
*/
function UndefinedMirror() {
- %_CallFunction(this, MirrorType.UNDEFINED_TYPE, UNDEFINED, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.UNDEFINED_TYPE, UNDEFINED);
}
inherits(UndefinedMirror, ValueMirror);
@@ -598,7 +606,7 @@ UndefinedMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function NullMirror() {
- %_CallFunction(this, MirrorType.NULL_TYPE, null, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.NULL_TYPE, null);
}
inherits(NullMirror, ValueMirror);
@@ -615,7 +623,7 @@ NullMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function BooleanMirror(value) {
- %_CallFunction(this, MirrorType.BOOLEAN_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.BOOLEAN_TYPE, value);
}
inherits(BooleanMirror, ValueMirror);
@@ -632,7 +640,7 @@ BooleanMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function NumberMirror(value) {
- %_CallFunction(this, MirrorType.NUMBER_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.NUMBER_TYPE, value);
}
inherits(NumberMirror, ValueMirror);
@@ -649,7 +657,7 @@ NumberMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function StringMirror(value) {
- %_CallFunction(this, MirrorType.STRING_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.STRING_TYPE, value);
}
inherits(StringMirror, ValueMirror);
@@ -678,7 +686,7 @@ StringMirror.prototype.toText = function() {
* @extends Mirror
*/
function SymbolMirror(value) {
- %_CallFunction(this, MirrorType.SYMBOL_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.SYMBOL_TYPE, value);
}
inherits(SymbolMirror, ValueMirror);
@@ -689,7 +697,7 @@ SymbolMirror.prototype.description = function() {
SymbolMirror.prototype.toText = function() {
- return %_CallFunction(this.value_, SymbolToString);
+ return %_Call(SymbolToString, this.value_);
}
@@ -703,7 +711,7 @@ SymbolMirror.prototype.toText = function() {
*/
function ObjectMirror(value, type, transient) {
type = type || MirrorType.OBJECT_TYPE;
- %_CallFunction(this, type, value, transient, ValueMirror);
+ %_Call(ValueMirror, this, type, value, transient);
}
inherits(ObjectMirror, ValueMirror);
@@ -953,7 +961,7 @@ ObjectMirror.GetInternalProperties = function(value) {
* @extends ObjectMirror
*/
function FunctionMirror(value) {
- %_CallFunction(this, value, MirrorType.FUNCTION_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.FUNCTION_TYPE);
this.resolved_ = true;
}
inherits(FunctionMirror, ObjectMirror);
@@ -1107,7 +1115,7 @@ FunctionMirror.prototype.toText = function() {
function UnresolvedFunctionMirror(value) {
// Construct this using the ValueMirror as an unresolved function is not a
// real object but just a string.
- %_CallFunction(this, MirrorType.FUNCTION_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.FUNCTION_TYPE, value);
this.propertyCount_ = 0;
this.elementCount_ = 0;
this.resolved_ = false;
@@ -1157,7 +1165,7 @@ UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
* @extends ObjectMirror
*/
function ArrayMirror(value) {
- %_CallFunction(this, value, ObjectMirror);
+ %_Call(ObjectMirror, this, value);
}
inherits(ArrayMirror, ObjectMirror);
@@ -1174,7 +1182,7 @@ ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
if (from_index > to_index) return new GlobalArray();
var values = new GlobalArray(to_index - from_index + 1);
for (var i = from_index; i <= to_index; i++) {
- var details = %DebugGetPropertyDetails(this.value_, ToString(i));
+ var details = %DebugGetPropertyDetails(this.value_, TO_STRING(i));
var value;
if (details) {
value = new PropertyMirror(this, i, details);
@@ -1194,7 +1202,7 @@ ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
* @extends ObjectMirror
*/
function DateMirror(value) {
- %_CallFunction(this, value, ObjectMirror);
+ %_Call(ObjectMirror, this, value);
}
inherits(DateMirror, ObjectMirror);
@@ -1212,7 +1220,7 @@ DateMirror.prototype.toText = function() {
* @extends ObjectMirror
*/
function RegExpMirror(value) {
- %_CallFunction(this, value, MirrorType.REGEXP_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.REGEXP_TYPE);
}
inherits(RegExpMirror, ObjectMirror);
@@ -1284,7 +1292,7 @@ RegExpMirror.prototype.toText = function() {
* @extends ObjectMirror
*/
function ErrorMirror(value) {
- %_CallFunction(this, value, MirrorType.ERROR_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.ERROR_TYPE);
}
inherits(ErrorMirror, ObjectMirror);
@@ -1302,7 +1310,7 @@ ErrorMirror.prototype.toText = function() {
// Use the same text representation as in messages.js.
var text;
try {
- text = %_CallFunction(this.value_, builtins.$errorToString);
+ text = %_Call(ErrorToString, this.value_);
} catch (e) {
text = '#<Error>';
}
@@ -1317,7 +1325,7 @@ ErrorMirror.prototype.toText = function() {
* @extends ObjectMirror
*/
function PromiseMirror(value) {
- %_CallFunction(this, value, MirrorType.PROMISE_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.PROMISE_TYPE);
}
inherits(PromiseMirror, ObjectMirror);
@@ -1346,7 +1354,7 @@ PromiseMirror.prototype.promiseValue = function() {
function MapMirror(value) {
- %_CallFunction(this, value, MirrorType.MAP_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.MAP_TYPE);
}
inherits(MapMirror, ObjectMirror);
@@ -1372,7 +1380,7 @@ MapMirror.prototype.entries = function(opt_limit) {
return result;
}
- var iter = %_CallFunction(this.value_, builtins.$mapEntries);
+ var iter = %_Call(MapEntries, this.value_);
var next;
while ((!opt_limit || result.length < opt_limit) &&
!(next = iter.next()).done) {
@@ -1386,7 +1394,7 @@ MapMirror.prototype.entries = function(opt_limit) {
function SetMirror(value) {
- %_CallFunction(this, value, MirrorType.SET_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.SET_TYPE);
}
inherits(SetMirror, ObjectMirror);
@@ -1395,7 +1403,7 @@ function IteratorGetValues_(iter, next_function, opt_limit) {
var result = [];
var next;
while ((!opt_limit || result.length < opt_limit) &&
- !(next = %_CallFunction(iter, next_function)).done) {
+ !(next = %_Call(next_function, iter)).done) {
result.push(next.value);
}
return result;
@@ -1414,13 +1422,13 @@ SetMirror.prototype.values = function(opt_limit) {
return %GetWeakSetValues(this.value_, opt_limit || 0);
}
- var iter = %_CallFunction(this.value_, builtins.$setValues);
- return IteratorGetValues_(iter, builtins.$setIteratorNext, opt_limit);
+ var iter = %_Call(SetValues, this.value_);
+ return IteratorGetValues_(iter, SetIteratorNext, opt_limit);
};
function IteratorMirror(value) {
- %_CallFunction(this, value, MirrorType.ITERATOR_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.ITERATOR_TYPE);
}
inherits(IteratorMirror, ObjectMirror);
@@ -1435,11 +1443,11 @@ inherits(IteratorMirror, ObjectMirror);
IteratorMirror.prototype.preview = function(opt_limit) {
if (IS_MAP_ITERATOR(this.value_)) {
return IteratorGetValues_(%MapIteratorClone(this.value_),
- builtins.$mapIteratorNext,
+ MapIteratorNext,
opt_limit);
} else if (IS_SET_ITERATOR(this.value_)) {
return IteratorGetValues_(%SetIteratorClone(this.value_),
- builtins.$setIteratorNext,
+ SetIteratorNext,
opt_limit);
}
};
@@ -1452,7 +1460,7 @@ IteratorMirror.prototype.preview = function(opt_limit) {
* @extends Mirror
*/
function GeneratorMirror(value) {
- %_CallFunction(this, value, MirrorType.GENERATOR_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.GENERATOR_TYPE);
}
inherits(GeneratorMirror, ObjectMirror);
@@ -1519,7 +1527,7 @@ GeneratorMirror.prototype.receiver = function() {
* @extends Mirror
*/
function PropertyMirror(mirror, name, details) {
- %_CallFunction(this, MirrorType.PROPERTY_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.PROPERTY_TYPE);
this.mirror_ = mirror;
this.name_ = name;
this.value_ = details[0];
@@ -1662,7 +1670,7 @@ PropertyMirror.prototype.isNative = function() {
* @extends Mirror
*/
function InternalPropertyMirror(name, value) {
- %_CallFunction(this, MirrorType.INTERNAL_PROPERTY_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.INTERNAL_PROPERTY_TYPE);
this.name_ = name;
this.value_ = value;
}
@@ -1875,7 +1883,7 @@ FrameDetails.prototype.stepInPositionsImpl = function() {
* @extends Mirror
*/
function FrameMirror(break_id, index) {
- %_CallFunction(this, MirrorType.FRAME_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.FRAME_TYPE);
this.break_id_ = break_id;
this.index_ = index;
this.details_ = new FrameDetails(break_id, index);
@@ -2074,7 +2082,7 @@ FrameMirror.prototype.evaluate = function(source, disable_break,
this.details_.frameId(),
this.details_.inlinedFrameIndex(),
source,
- ToBoolean(disable_break),
+ TO_BOOLEAN(disable_break),
opt_context_object));
};
@@ -2233,8 +2241,10 @@ FrameMirror.prototype.toText = function(opt_locals) {
};
+// This indexes correspond definitions in debug-scopes.h.
var kScopeDetailsTypeIndex = 0;
var kScopeDetailsObjectIndex = 1;
+var kScopeDetailsNameIndex = 2;
function ScopeDetails(frame, fun, index, opt_details) {
if (frame) {
@@ -2271,6 +2281,14 @@ ScopeDetails.prototype.object = function() {
};
+ScopeDetails.prototype.name = function() {
+ if (!IS_UNDEFINED(this.break_id_)) {
+ %CheckExecutionState(this.break_id_);
+ }
+ return this.details_[kScopeDetailsNameIndex];
+};
+
+
ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
var raw_res;
if (!IS_UNDEFINED(this.break_id_)) {
@@ -2296,7 +2314,7 @@ ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
* @extends Mirror
*/
function ScopeMirror(frame, fun, index, opt_details) {
- %_CallFunction(this, MirrorType.SCOPE_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.SCOPE_TYPE);
if (frame) {
this.frame_index_ = frame.index_;
} else {
@@ -2351,7 +2369,7 @@ ScopeMirror.prototype.setVariableValue = function(name, new_value) {
* @extends Mirror
*/
function ScriptMirror(script) {
- %_CallFunction(this, MirrorType.SCRIPT_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.SCRIPT_TYPE);
this.script_ = script;
this.context_ = new ContextMirror(script.context_data);
this.allocateHandle_();
@@ -2472,7 +2490,7 @@ ScriptMirror.prototype.toText = function() {
* @extends Mirror
*/
function ContextMirror(data) {
- %_CallFunction(this, MirrorType.CONTEXT_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.CONTEXT_TYPE);
this.data_ = data;
this.allocateHandle_();
}
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index fdca98e90a..dd012db6ab 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -736,7 +736,7 @@ void Deoptimizer::DoComputeOutputFrames() {
TranslationIterator state_iterator(translations, translation_index);
translated_state_.Init(
- input_->GetFramePointerAddress(), function_, &state_iterator,
+ input_->GetFramePointerAddress(), &state_iterator,
input_data->LiteralArray(), input_->GetRegisterValues(),
trace_scope_ == nullptr ? nullptr : trace_scope_->file());
@@ -760,23 +760,23 @@ void Deoptimizer::DoComputeOutputFrames() {
int frame_index = static_cast<int>(i);
switch (translated_state_.frames()[i].kind()) {
case TranslatedFrame::kFunction:
- DoComputeJSFrame(nullptr, frame_index);
+ DoComputeJSFrame(frame_index);
jsframe_count_++;
break;
case TranslatedFrame::kArgumentsAdaptor:
- DoComputeArgumentsAdaptorFrame(nullptr, frame_index);
+ DoComputeArgumentsAdaptorFrame(frame_index);
break;
case TranslatedFrame::kConstructStub:
- DoComputeConstructStubFrame(nullptr, frame_index);
+ DoComputeConstructStubFrame(frame_index);
break;
case TranslatedFrame::kGetter:
- DoComputeAccessorStubFrame(nullptr, frame_index, false);
+ DoComputeAccessorStubFrame(frame_index, false);
break;
case TranslatedFrame::kSetter:
- DoComputeAccessorStubFrame(nullptr, frame_index, true);
+ DoComputeAccessorStubFrame(frame_index, true);
break;
case TranslatedFrame::kCompiledStub:
- DoComputeCompiledStubFrame(nullptr, frame_index);
+ DoComputeCompiledStubFrame(frame_index);
break;
case TranslatedFrame::kInvalid:
FATAL("invalid frame");
@@ -806,8 +806,7 @@ void Deoptimizer::DoComputeOutputFrames() {
}
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeJSFrame(int frame_index) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -1023,8 +1022,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -1130,8 +1128,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -1266,8 +1263,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
+void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
bool is_setter_stub_frame) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
@@ -1392,8 +1388,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
//
// FROM TO
// | .... | | .... |
@@ -2072,7 +2067,7 @@ void Translation::StoreBoolRegister(Register reg) {
void Translation::StoreDoubleRegister(DoubleRegister reg) {
buffer_->Add(DOUBLE_REGISTER, zone());
- buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
+ buffer_->Add(reg.code(), zone());
}
@@ -2491,16 +2486,6 @@ Object* TranslatedValue::GetRawValue() const {
break;
}
- case kDouble: {
- int int_value = FastD2IChecked(double_value());
- bool is_smi = !IsMinusZero(double_value()) &&
- double_value() == int_value && Smi::IsValid(int_value);
- if (is_smi) {
- return Smi::FromInt(static_cast<int32_t>(int_value));
- }
- break;
- }
-
case kBoolBit: {
if (uint32_value() == 0) {
return isolate()->heap()->false_value();
@@ -2701,7 +2686,7 @@ void TranslatedFrame::Handlify() {
TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
TranslationIterator* iterator, FixedArray* literal_array, Address fp,
- JSFunction* frame_function, FILE* trace_file) {
+ FILE* trace_file) {
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
@@ -2716,7 +2701,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
PrintF(trace_file, " reading input frame %s", name.get());
int arg_count = shared_info->internal_formal_parameter_count() + 1;
PrintF(trace_file, " => node=%d, args=%d, height=%d; inputs:\n",
- arg_count, node_id.ToInt(), height);
+ node_id.ToInt(), arg_count, height);
}
return TranslatedFrame::JSFrame(node_id, shared_info, height);
}
@@ -2925,7 +2910,7 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
double value = registers->GetDoubleRegister(input_reg);
if (trace_file != nullptr) {
PrintF(trace_file, "%e ; %s (bool)", value,
- DoubleRegister::AllocationIndexToString(input_reg));
+ DoubleRegister::from_code(input_reg).ToString());
}
return TranslatedValue::NewDouble(this, value);
}
@@ -3024,8 +3009,8 @@ TranslatedState::TranslatedState(JavaScriptFrame* frame)
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
- Init(frame->fp(), frame->function(), &it, data->LiteralArray(),
- nullptr /* registers */, nullptr /* trace file */);
+ Init(frame->fp(), &it, data->LiteralArray(), nullptr /* registers */,
+ nullptr /* trace file */);
}
@@ -3036,7 +3021,6 @@ TranslatedState::TranslatedState()
void TranslatedState::Init(Address input_frame_pointer,
- JSFunction* input_frame_function,
TranslationIterator* iterator,
FixedArray* literal_array, RegisterValues* registers,
FILE* trace_file) {
@@ -3058,9 +3042,8 @@ void TranslatedState::Init(Address input_frame_pointer,
// Read the frames
for (int i = 0; i < count; i++) {
// Read the frame descriptor.
- frames_.push_back(
- CreateNextTranslatedFrame(iterator, literal_array, input_frame_pointer,
- input_frame_function, trace_file));
+ frames_.push_back(CreateNextTranslatedFrame(
+ iterator, literal_array, input_frame_pointer, trace_file));
TranslatedFrame& frame = frames_.back();
// Read the values.
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index b116ccd54d..8d06956818 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -252,9 +252,9 @@ class TranslatedState {
Isolate* isolate() { return isolate_; }
- void Init(Address input_frame_pointer, JSFunction* input_frame_function,
- TranslationIterator* iterator, FixedArray* literal_array,
- RegisterValues* registers, FILE* trace_file);
+ void Init(Address input_frame_pointer, TranslationIterator* iterator,
+ FixedArray* literal_array, RegisterValues* registers,
+ FILE* trace_file);
private:
friend TranslatedValue;
@@ -262,7 +262,6 @@ class TranslatedState {
TranslatedFrame CreateNextTranslatedFrame(TranslationIterator* iterator,
FixedArray* literal_array,
Address fp,
- JSFunction* frame_function,
FILE* trace_file);
TranslatedValue CreateNextTranslatedValue(int frame_index, int value_index,
TranslationIterator* iterator,
@@ -586,16 +585,11 @@ class Deoptimizer : public Malloced {
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
- void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
- void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index);
- void DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index);
- void DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame);
- void DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index);
+ void DoComputeJSFrame(int frame_index);
+ void DoComputeArgumentsAdaptorFrame(int frame_index);
+ void DoComputeConstructStubFrame(int frame_index);
+ void DoComputeAccessorStubFrame(int frame_index, bool is_setter_stub_frame);
+ void DoComputeCompiledStubFrame(int frame_index);
void WriteTranslatedValueToOutput(
TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index b16f090e9d..1158e01495 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -32,7 +32,9 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
- const char* name = code_->GetIsolate()->builtins()->Lookup(pc);
+ const char* name =
+ code_ == NULL ? NULL : code_->GetIsolate()->builtins()->Lookup(pc);
+
if (name != NULL) {
SNPrintF(v8_buffer_, "%s (%p)", name, pc);
return v8_buffer_.start();
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index 32e48c4e92..ac53f775b1 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -20,6 +20,7 @@ class Disassembler : public AllStatic {
Code* code = NULL);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DISASSEMBLER_H_
diff --git a/deps/v8/src/diy-fp.h b/deps/v8/src/diy-fp.h
index e0daf27a1e..1325c94519 100644
--- a/deps/v8/src/diy-fp.h
+++ b/deps/v8/src/diy-fp.h
@@ -93,6 +93,7 @@ class DiyFp {
int e_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DIY_FP_H_
diff --git a/deps/v8/src/double.h b/deps/v8/src/double.h
index cb12628675..f21bd748f9 100644
--- a/deps/v8/src/double.h
+++ b/deps/v8/src/double.h
@@ -204,6 +204,7 @@ class Double {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DOUBLE_H_
diff --git a/deps/v8/src/dtoa.h b/deps/v8/src/dtoa.h
index ca6277ee56..9f190ab472 100644
--- a/deps/v8/src/dtoa.h
+++ b/deps/v8/src/dtoa.h
@@ -59,6 +59,7 @@ const int kBase10MaximalLength = 17;
void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
Vector<char> buffer, int* sign, int* length, int* point);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DTOA_H_
diff --git a/deps/v8/src/effects.h b/deps/v8/src/effects.h
index 8d539f64bd..020471830c 100644
--- a/deps/v8/src/effects.h
+++ b/deps/v8/src/effects.h
@@ -329,6 +329,7 @@ class NestedEffects: public
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EFFECTS_H_
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 1397bd7c19..5f6cd62c46 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -226,6 +226,7 @@ inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ELEMENTS_KIND_H_
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index e716aac255..7eafe9bfaf 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -506,9 +506,8 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateImpl(holder);
}
- virtual bool IsPacked(Handle<JSObject> holder,
- Handle<FixedArrayBase> backing_store, uint32_t start,
- uint32_t end) final {
+ bool IsPacked(Handle<JSObject> holder, Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t end) final {
return ElementsAccessorSubclass::IsPackedImpl(holder, backing_store, start,
end);
}
@@ -518,7 +517,8 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t end) {
if (IsFastPackedElementsKind(kind())) return true;
for (uint32_t i = start; i < end; i++) {
- if (!ElementsAccessorSubclass::HasElementImpl(holder, i, backing_store)) {
+ if (!ElementsAccessorSubclass::HasElementImpl(holder, i, backing_store,
+ NONE)) {
return false;
}
}
@@ -543,20 +543,22 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
- virtual bool HasElement(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store) final {
+ bool HasElement(Handle<JSObject> holder, uint32_t index,
+ Handle<FixedArrayBase> backing_store,
+ PropertyAttributes filter) final {
return ElementsAccessorSubclass::HasElementImpl(holder, index,
- backing_store);
+ backing_store, filter);
}
static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store) {
+ Handle<FixedArrayBase> backing_store,
+ PropertyAttributes filter) {
return ElementsAccessorSubclass::GetEntryForIndexImpl(
- *holder, *backing_store, index) != kMaxUInt32;
+ *holder, *backing_store, index, filter) != kMaxUInt32;
}
- virtual Handle<Object> Get(Handle<FixedArrayBase> backing_store,
- uint32_t entry) final {
+ Handle<Object> Get(Handle<FixedArrayBase> backing_store,
+ uint32_t entry) final {
return ElementsAccessorSubclass::GetImpl(backing_store, entry);
}
@@ -566,8 +568,7 @@ class ElementsAccessorBase : public ElementsAccessor {
return BackingStore::get(Handle<BackingStore>::cast(backing_store), index);
}
- virtual void Set(FixedArrayBase* backing_store, uint32_t entry,
- Object* value) final {
+ void Set(FixedArrayBase* backing_store, uint32_t entry, Object* value) final {
ElementsAccessorSubclass::SetImpl(backing_store, entry, value);
}
@@ -582,10 +583,9 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- virtual void Reconfigure(Handle<JSObject> object,
- Handle<FixedArrayBase> store, uint32_t entry,
- Handle<Object> value,
- PropertyAttributes attributes) final {
+ void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store,
+ uint32_t entry, Handle<Object> value,
+ PropertyAttributes attributes) final {
ElementsAccessorSubclass::ReconfigureImpl(object, store, entry, value,
attributes);
}
@@ -597,9 +597,8 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- virtual void Add(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) final {
+ void Add(Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, uint32_t new_capacity) final {
ElementsAccessorSubclass::AddImpl(object, index, value, attributes,
new_capacity);
}
@@ -610,9 +609,8 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- virtual uint32_t Push(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, Arguments* args,
- uint32_t push_size) final {
+ uint32_t Push(Handle<JSArray> receiver, Handle<FixedArrayBase> backing_store,
+ Arguments* args, uint32_t push_size) final {
return ElementsAccessorSubclass::PushImpl(receiver, backing_store, args,
push_size);
}
@@ -624,9 +622,9 @@ class ElementsAccessorBase : public ElementsAccessor {
return 0;
}
- virtual uint32_t Unshift(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
- Arguments* args, uint32_t unshift_size) final {
+ uint32_t Unshift(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, Arguments* args,
+ uint32_t unshift_size) final {
return ElementsAccessorSubclass::UnshiftImpl(receiver, backing_store, args,
unshift_size);
}
@@ -638,9 +636,9 @@ class ElementsAccessorBase : public ElementsAccessor {
return 0;
}
- virtual Handle<JSArray> Slice(Handle<JSObject> receiver,
- Handle<FixedArrayBase> backing_store,
- uint32_t start, uint32_t end) final {
+ Handle<JSArray> Slice(Handle<JSObject> receiver,
+ Handle<FixedArrayBase> backing_store, uint32_t start,
+ uint32_t end) final {
return ElementsAccessorSubclass::SliceImpl(receiver, backing_store, start,
end);
}
@@ -652,10 +650,10 @@ class ElementsAccessorBase : public ElementsAccessor {
return Handle<JSArray>();
}
- virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
- uint32_t start, uint32_t delete_count,
- Arguments* args, uint32_t add_count) final {
+ Handle<JSArray> Splice(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, uint32_t start,
+ uint32_t delete_count, Arguments* args,
+ uint32_t add_count) final {
return ElementsAccessorSubclass::SpliceImpl(receiver, backing_store, start,
delete_count, args, add_count);
}
@@ -668,8 +666,8 @@ class ElementsAccessorBase : public ElementsAccessor {
return Handle<JSArray>();
}
- virtual Handle<Object> Pop(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) final {
+ Handle<Object> Pop(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) final {
return ElementsAccessorSubclass::PopImpl(receiver, backing_store);
}
@@ -679,8 +677,8 @@ class ElementsAccessorBase : public ElementsAccessor {
return Handle<Object>();
}
- virtual Handle<Object> Shift(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) final {
+ Handle<Object> Shift(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) final {
return ElementsAccessorSubclass::ShiftImpl(receiver, backing_store);
}
@@ -690,7 +688,7 @@ class ElementsAccessorBase : public ElementsAccessor {
return Handle<Object>();
}
- virtual void SetLength(Handle<JSArray> array, uint32_t length) final {
+ void SetLength(Handle<JSArray> array, uint32_t length) final {
ElementsAccessorSubclass::SetLengthImpl(array->GetIsolate(), array, length,
handle(array->elements()));
}
@@ -812,12 +810,12 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
- virtual void GrowCapacityAndConvert(Handle<JSObject> object,
- uint32_t capacity) final {
+ void GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) final {
ElementsAccessorSubclass::GrowCapacityAndConvertImpl(object, capacity);
}
- virtual void Delete(Handle<JSObject> obj, uint32_t entry) final {
+ void Delete(Handle<JSObject> obj, uint32_t entry) final {
ElementsAccessorSubclass::DeleteImpl(obj, entry);
}
@@ -828,9 +826,9 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- virtual void CopyElements(Handle<FixedArrayBase> from, uint32_t from_start,
- ElementsKind from_kind, Handle<FixedArrayBase> to,
- uint32_t to_start, int copy_size) final {
+ void CopyElements(Handle<FixedArrayBase> from, uint32_t from_start,
+ ElementsKind from_kind, Handle<FixedArrayBase> to,
+ uint32_t to_start, int copy_size) final {
DCHECK(!from.is_null());
// NOTE: the ElementsAccessorSubclass::CopyElementsImpl() methods
// violate the handlified function signature convention:
@@ -843,9 +841,9 @@ class ElementsAccessorBase : public ElementsAccessor {
kPackedSizeNotKnown, copy_size);
}
- virtual void CopyElements(JSObject* from_holder, uint32_t from_start,
- ElementsKind from_kind, Handle<FixedArrayBase> to,
- uint32_t to_start, int copy_size) final {
+ void CopyElements(JSObject* from_holder, uint32_t from_start,
+ ElementsKind from_kind, Handle<FixedArrayBase> to,
+ uint32_t to_start, int copy_size) final {
int packed_size = kPackedSizeNotKnown;
bool is_packed = IsFastPackedElementsKind(from_kind) &&
from_holder->IsJSArray();
@@ -870,25 +868,50 @@ class ElementsAccessorBase : public ElementsAccessor {
from, from_start, *to, from_kind, to_start, packed_size, copy_size);
}
- virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
- KeyAccumulator* accumulator,
- FixedArray::KeyFilter filter) final {
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys, uint32_t range,
+ PropertyAttributes filter,
+ uint32_t offset) {
+ uint32_t length = 0;
+ if (object->IsJSArray()) {
+ length = Smi::cast(JSArray::cast(*object)->length())->value();
+ } else {
+ length =
+ ElementsAccessorSubclass::GetCapacityImpl(*object, *backing_store);
+ }
+ if (range < length) length = range;
+ for (uint32_t i = offset; i < length; i++) {
+ if (!ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
+ filter))
+ continue;
+ keys->AddKey(i);
+ }
+ }
+
+ void CollectElementIndices(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys, uint32_t range,
+ PropertyAttributes filter, uint32_t offset) final {
+ ElementsAccessorSubclass::CollectElementIndicesImpl(
+ object, backing_store, keys, range, filter, offset);
+ };
+
+ void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) final {
Handle<FixedArrayBase> from(receiver->elements());
uint32_t add_length =
ElementsAccessorSubclass::GetCapacityImpl(*receiver, *from);
if (add_length == 0) return;
- accumulator->PrepareForComparisons(add_length);
- int prev_key_count = accumulator->GetLength();
+
for (uint32_t i = 0; i < add_length; i++) {
if (!ElementsAccessorSubclass::HasEntryImpl(*from, i)) continue;
Handle<Object> value = ElementsAccessorSubclass::GetImpl(from, i);
DCHECK(!value->IsTheHole());
DCHECK(!value->IsAccessorPair());
DCHECK(!value->IsExecutableAccessorInfo());
- if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
- continue;
- }
- accumulator->AddKey(value, prev_key_count);
+ accumulator->AddKey(value, convert);
}
}
@@ -912,7 +935,8 @@ class ElementsAccessorBase : public ElementsAccessor {
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* backing_store,
- uint32_t index) {
+ uint32_t index,
+ PropertyAttributes filter) {
if (IsHoleyElementsKind(kind())) {
return index < ElementsAccessorSubclass::GetCapacityImpl(holder,
backing_store) &&
@@ -926,11 +950,10 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
- virtual uint32_t GetEntryForIndex(JSObject* holder,
- FixedArrayBase* backing_store,
- uint32_t index) final {
+ uint32_t GetEntryForIndex(JSObject* holder, FixedArrayBase* backing_store,
+ uint32_t index) final {
return ElementsAccessorSubclass::GetEntryForIndexImpl(holder, backing_store,
- index);
+ index, NONE);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
@@ -938,8 +961,8 @@ class ElementsAccessorBase : public ElementsAccessor {
return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
}
- virtual PropertyDetails GetDetails(FixedArrayBase* backing_store,
- uint32_t entry) final {
+ PropertyDetails GetDetails(FixedArrayBase* backing_store,
+ uint32_t entry) final {
return ElementsAccessorSubclass::GetDetailsImpl(backing_store, entry);
}
@@ -1092,19 +1115,50 @@ class DictionaryElementsAccessor
}
static uint32_t GetEntryForIndexImpl(JSObject* holder, FixedArrayBase* store,
- uint32_t index) {
+ uint32_t index,
+ PropertyAttributes filter) {
DisallowHeapAllocation no_gc;
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
- int entry = dict->FindEntry(index);
- return entry == SeededNumberDictionary::kNotFound
- ? kMaxUInt32
- : static_cast<uint32_t>(entry);
+ SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
+ int entry = dictionary->FindEntry(index);
+ if (entry == SeededNumberDictionary::kNotFound) return kMaxUInt32;
+ if (filter != NONE) {
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) return kMaxUInt32;
+ }
+ return static_cast<uint32_t>(entry);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
uint32_t entry) {
return SeededNumberDictionary::cast(backing_store)->DetailsAt(entry);
}
+
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys, uint32_t range,
+ PropertyAttributes filter,
+ uint32_t offset) {
+ Handle<SeededNumberDictionary> dictionary =
+ Handle<SeededNumberDictionary>::cast(backing_store);
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = dictionary->KeyAt(i);
+ if (!dictionary->IsKey(k)) continue;
+ if (k->FilterKey(filter)) continue;
+ if (dictionary->IsDeleted(i)) continue;
+ DCHECK(k->IsNumber());
+ DCHECK_LE(k->Number(), kMaxUInt32);
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ if (index < offset) continue;
+ PropertyDetails details = dictionary->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) continue;
+ keys->AddKey(index);
+ }
+
+ keys->SortCurrentElementsList();
+ }
};
@@ -1793,7 +1847,8 @@ class TypedElementsAccessor
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* backing_store,
- uint32_t index) {
+ uint32_t index,
+ PropertyAttributes filter) {
return index < AccessorClass::GetCapacityImpl(holder, backing_store)
? index
: kMaxUInt32;
@@ -1928,14 +1983,15 @@ class SloppyArgumentsElementsAccessor
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* parameters,
- uint32_t index) {
+ uint32_t index,
+ PropertyAttributes filter) {
FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(parameter_map, index);
if (!probe->IsTheHole()) return index;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- uint32_t entry =
- ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments, index);
+ uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments,
+ index, filter);
if (entry == kMaxUInt32) return entry;
return (parameter_map->length() - 2) + entry;
}
@@ -2205,7 +2261,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
// Optimize the case where there is one argument and the argument is a small
// smi.
- if (length > 0 && length < JSObject::kInitialMaxFastElementArray) {
+ if (length > 0 && length < JSArray::kInitialMaxFastElementArray) {
ElementsKind elements_kind = array->GetElementsKind();
JSArray::Initialize(array, length, length);
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index fcc90024ba..01fc18448f 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -8,6 +8,7 @@
#include "src/elements-kind.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
+#include "src/key-accumulator.h"
#include "src/objects.h"
namespace v8 {
@@ -22,6 +23,14 @@ class ElementsAccessor {
const char* name() const { return name_; }
+ // Returns a shared ElementsAccessor for the specified ElementsKind.
+ static ElementsAccessor* ForKind(ElementsKind elements_kind) {
+ DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
+ return elements_accessors_[elements_kind];
+ }
+
+ static ElementsAccessor* ForArray(Handle<FixedArrayBase> array);
+
// Checks the elements of an object for consistency, asserting when a problem
// is found.
virtual void Validate(Handle<JSObject> obj) = 0;
@@ -30,12 +39,19 @@ class ElementsAccessor {
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
// the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
- // holder->elements() is used as the backing store.
+ // holder->elements() is used as the backing store. If a |filter| is
+ // specified the PropertyAttributes of the element at the given index
+ // are compared to the given |filter|. If they match/overlap the given
+ // index is ignored. Note that only Dictionary elements have custom
+ // PropertyAttributes associated, hence the |filter| argument is ignored for
+ // all but DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
virtual bool HasElement(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store) = 0;
+ Handle<FixedArrayBase> backing_store,
+ PropertyAttributes filter = NONE) = 0;
- inline bool HasElement(Handle<JSObject> holder, uint32_t index) {
- return HasElement(holder, index, handle(holder->elements()));
+ inline bool HasElement(Handle<JSObject> holder, uint32_t index,
+ PropertyAttributes filter = NONE) {
+ return HasElement(holder, index, handle(holder->elements()), filter);
}
// Returns true if the backing store is compact in the given range
@@ -97,20 +113,31 @@ class ElementsAccessor {
*from_holder, 0, from_kind, to, 0, kCopyToEndAndInitializeToHole);
}
- virtual void GrowCapacityAndConvert(Handle<JSObject> object,
- uint32_t capacity) = 0;
+ // Copy all indices that have elements from |object| into the given
+ // KeyAccumulator. For Dictionary-based element-kinds we filter out elements
+ // whose PropertyAttribute match |filter|.
+ virtual void CollectElementIndices(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys,
+ uint32_t range = kMaxUInt32,
+ PropertyAttributes filter = NONE,
+ uint32_t offset = 0) = 0;
+
+ inline void CollectElementIndices(Handle<JSObject> object,
+ KeyAccumulator* keys,
+ uint32_t range = kMaxUInt32,
+ PropertyAttributes filter = NONE,
+ uint32_t offset = 0) {
+ CollectElementIndices(object, handle(object->elements()), keys, range,
+ filter, offset);
+ }
virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
- FixedArray::KeyFilter filter) = 0;
-
- // Returns a shared ElementsAccessor for the specified ElementsKind.
- static ElementsAccessor* ForKind(ElementsKind elements_kind) {
- DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
- return elements_accessors_[elements_kind];
- }
+ AddKeyConversion convert) = 0;
- static ElementsAccessor* ForArray(Handle<FixedArrayBase> array);
+ virtual void GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) = 0;
static void InitializeOncePerProcess();
static void TearDown();
@@ -158,8 +185,6 @@ class ElementsAccessor {
static ElementsAccessor* ForArray(FixedArrayBase* array);
- virtual uint32_t GetCapacity(JSObject* holder,
- FixedArrayBase* backing_store) = 0;
// Element handlers distinguish between entries and indices when they
// manipulate elements. Entries refer to elements in terms of their location
@@ -176,6 +201,8 @@ class ElementsAccessor {
uint32_t entry) = 0;
private:
+ virtual uint32_t GetCapacity(JSObject* holder,
+ FixedArrayBase* backing_store) = 0;
static ElementsAccessor** elements_accessors_;
const char* name_;
@@ -189,6 +216,7 @@ MUST_USE_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
Handle<JSArray> array,
Arguments* args);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ELEMENTS_H_
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 526390bd18..ecf2d22f69 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -58,7 +58,7 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
Handle<Object> receiver, int argc,
Handle<Object> args[],
Handle<Object> new_target) {
- DCHECK(!receiver->IsGlobalObject());
+ DCHECK(!receiver->IsJSGlobalObject());
// Entering JavaScript.
VMState<JS> state(isolate);
@@ -131,9 +131,9 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
// Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers
// directly to a global object.
- if (receiver->IsGlobalObject()) {
+ if (receiver->IsJSGlobalObject()) {
receiver =
- handle(Handle<GlobalObject>::cast(receiver)->global_proxy(), isolate);
+ handle(Handle<JSGlobalObject>::cast(receiver)->global_proxy(), isolate);
}
// api callbacks can be called directly.
@@ -152,7 +152,7 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
isolate, receiver, Execution::ToObject(isolate, receiver), Object);
}
}
- DCHECK(function->context()->global_object()->IsGlobalObject());
+ DCHECK(function->context()->global_object()->IsJSGlobalObject());
auto value = Builtins::InvokeApiFunction(function, receiver, argc, argv);
bool has_exception = value.is_null();
DCHECK(has_exception == isolate->has_pending_exception());
@@ -185,12 +185,12 @@ MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
}
-MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
+MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
+ Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> args[],
MaybeHandle<Object>* exception_out) {
bool is_termination = false;
- Isolate* isolate = func->GetIsolate();
MaybeHandle<Object> maybe_result;
if (exception_out != NULL) *exception_out = MaybeHandle<Object>();
// Enter a try-block while executing the JavaScript code. To avoid
@@ -202,7 +202,7 @@ MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
- maybe_result = Call(isolate, func, receiver, argc, args);
+ maybe_result = Call(isolate, callable, receiver, argc, args);
if (maybe_result.is_null()) {
DCHECK(catcher.HasCaught());
@@ -478,7 +478,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Isolate* isolate = fun->GetIsolate();
Handle<Object> args[] = { recv, fun, pos, is_global };
MaybeHandle<Object> maybe_result =
- TryCall(isolate->get_stack_trace_line_fun(),
+ TryCall(isolate, isolate->get_stack_trace_line_fun(),
isolate->factory()->undefined_value(), arraysize(args), args);
Handle<Object> result;
if (!maybe_result.ToHandle(&result) || !result->IsString()) {
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 84f106a496..f1b1057a61 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -47,7 +47,7 @@ class Execution final : public AllStatic {
// that occurred (if caught exception is true).
// In the exception case, exception_out holds the caught exceptions, unless
// it is a termination exception.
- static MaybeHandle<Object> TryCall(Handle<JSFunction> func,
+ static MaybeHandle<Object> TryCall(Isolate* isolate, Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> argv[],
MaybeHandle<Object>* exception_out = NULL);
@@ -247,6 +247,7 @@ class StackGuard final {
DISALLOW_COPY_AND_ASSIGN(StackGuard);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXECUTION_H_
diff --git a/deps/v8/src/expression-classifier.h b/deps/v8/src/expression-classifier.h
index fb45f41fa1..7392a7add8 100644
--- a/deps/v8/src/expression-classifier.h
+++ b/deps/v8/src/expression-classifier.h
@@ -35,11 +35,12 @@ class ExpressionClassifier {
StrictModeFormalParametersProduction = 1 << 5,
StrongModeFormalParametersProduction = 1 << 6,
ArrowFormalParametersProduction = 1 << 7,
+ LetPatternProduction = 1 << 8,
ExpressionProductions =
(ExpressionProduction | FormalParameterInitializerProduction),
- PatternProductions =
- (BindingPatternProduction | AssignmentPatternProduction),
+ PatternProductions = (BindingPatternProduction |
+ AssignmentPatternProduction | LetPatternProduction),
FormalParametersProductions = (DistinctFormalParametersProduction |
StrictModeFormalParametersProduction |
StrongModeFormalParametersProduction),
@@ -100,6 +101,8 @@ class ExpressionClassifier {
return is_valid(StrongModeFormalParametersProduction);
}
+ bool is_valid_let_pattern() const { return is_valid(LetPatternProduction); }
+
const Error& expression_error() const { return expression_error_; }
const Error& formal_parameter_initializer_error() const {
@@ -128,6 +131,8 @@ class ExpressionClassifier {
return strong_mode_formal_parameter_error_;
}
+ const Error& let_pattern_error() const { return let_pattern_error_; }
+
bool is_simple_parameter_list() const {
return !(function_properties_ & NonSimpleParameter);
}
@@ -217,6 +222,16 @@ class ExpressionClassifier {
strong_mode_formal_parameter_error_.arg = arg;
}
+ void RecordLetPatternError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_let_pattern()) return;
+ invalid_productions_ |= LetPatternProduction;
+ let_pattern_error_.location = loc;
+ let_pattern_error_.message = message;
+ let_pattern_error_.arg = arg;
+ }
+
void Accumulate(const ExpressionClassifier& inner,
unsigned productions = StandardProductions) {
// Propagate errors from inner, but don't overwrite already recorded
@@ -249,6 +264,8 @@ class ExpressionClassifier {
if (errors & StrongModeFormalParametersProduction)
strong_mode_formal_parameter_error_ =
inner.strong_mode_formal_parameter_error_;
+ if (errors & LetPatternProduction)
+ let_pattern_error_ = inner.let_pattern_error_;
}
// As an exception to the above, the result continues to be a valid arrow
@@ -277,9 +294,11 @@ class ExpressionClassifier {
Error duplicate_formal_parameter_error_;
Error strict_mode_formal_parameter_error_;
Error strong_mode_formal_parameter_error_;
+ Error let_pattern_error_;
DuplicateFinder* duplicate_finder_;
};
-}
-} // v8::internal
+
+} // namespace internal
+} // namespace v8
#endif // V8_EXPRESSION_CLASSIFIER_H
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index 009e818497..c8907b42ed 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -22,6 +22,7 @@ class ExternalizeStringExtension : public v8::Extension {
static const char* const kSource;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
diff --git a/deps/v8/src/extensions/free-buffer-extension.h b/deps/v8/src/extensions/free-buffer-extension.h
index bb1418c4a3..6bc5e57cbc 100644
--- a/deps/v8/src/extensions/free-buffer-extension.h
+++ b/deps/v8/src/extensions/free-buffer-extension.h
@@ -19,6 +19,7 @@ class FreeBufferExtension : public v8::Extension {
static void FreeBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index 91433911c2..9be0d4b701 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -30,6 +30,7 @@ class GCExtension : public v8::Extension {
char buffer_[50];
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_GC_EXTENSION_H_
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
index 5dac4097b6..714f86aeba 100644
--- a/deps/v8/src/extensions/statistics-extension.h
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -21,6 +21,7 @@ class StatisticsExtension : public v8::Extension {
static const char* const kSource;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_STATISTICS_EXTENSION_H_
diff --git a/deps/v8/src/extensions/trigger-failure-extension.h b/deps/v8/src/extensions/trigger-failure-extension.h
index 4b10bdc886..7c7ecf882c 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.h
+++ b/deps/v8/src/extensions/trigger-failure-extension.h
@@ -26,6 +26,7 @@ class TriggerFailureExtension : public v8::Extension {
static const char* const kSource;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 8923d071b1..595259b3aa 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -244,6 +244,12 @@ Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
}
+Handle<Name> Factory::InternalizeName(Handle<Name> name) {
+ if (name->IsUniqueName()) return name;
+ return InternalizeString(Handle<String>::cast(name));
+}
+
+
MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
int length = string.length();
@@ -708,14 +714,9 @@ Handle<Symbol> Factory::NewSymbol() {
}
-Handle<Symbol> Factory::NewPrivateSymbol(Handle<Object> name) {
+Handle<Symbol> Factory::NewPrivateSymbol() {
Handle<Symbol> symbol = NewSymbol();
symbol->set_is_private(true);
- if (name->IsString()) {
- symbol->set_name(*name);
- } else {
- DCHECK(name->IsUndefined());
- }
return symbol;
}
@@ -726,6 +727,7 @@ Handle<Context> Factory::NewNativeContext() {
array->set_map_no_write_barrier(*native_context_map());
Handle<Context> context = Handle<Context>::cast(array);
context->set_js_array_maps(*undefined_value());
+ context->set_errors_thrown(Smi::FromInt(0));
DCHECK(context->IsNativeContext());
return context;
}
@@ -1122,8 +1124,8 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
// running the factory method, use the exception as the result.
Handle<Object> result;
MaybeHandle<Object> exception;
- if (!Execution::TryCall(fun, undefined_value(), arraysize(argv), argv,
- &exception)
+ if (!Execution::TryCall(isolate(), fun, undefined_value(), arraysize(argv),
+ argv, &exception)
.ToHandle(&result)) {
Handle<Object> exception_obj;
if (exception.ToHandle(&exception_obj)) {
@@ -1144,8 +1146,8 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
// running the factory method, use the exception as the result.
Handle<Object> result;
MaybeHandle<Object> exception;
- if (!Execution::TryCall(constructor, undefined_value(), arraysize(argv), argv,
- &exception)
+ if (!Execution::TryCall(isolate(), constructor, undefined_value(),
+ arraysize(argv), argv, &exception)
.ToHandle(&result)) {
Handle<Object> exception_obj;
if (exception.ToHandle(&exception_obj)) return exception_obj;
@@ -1317,8 +1319,19 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
PretenureFlag pretenure) {
int map_index =
Context::FunctionMapIndex(info->language_mode(), info->kind());
- Handle<Map> map(Map::cast(context->native_context()->get(map_index)));
- Handle<JSFunction> result = NewFunction(map, info, context, pretenure);
+ Handle<Map> initial_map(Map::cast(context->native_context()->get(map_index)));
+
+ return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
+ pretenure);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
+ Handle<Context> context, PretenureFlag pretenure) {
+ DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+ Handle<JSFunction> result =
+ NewFunction(initial_map, info, context, pretenure);
if (info->ic_age() != isolate()->heap()->global_ic_age()) {
info->ResetForNewContext(isolate()->heap()->global_ic_age());
@@ -1346,12 +1359,11 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
LiteralsArray::New(isolate(), handle(info->feedback_vector()),
number_of_literals, pretenure);
result->set_literals(*literals);
+
// Cache context-specific literals.
- if (FLAG_cache_optimized_code) {
- Handle<Context> native_context(context->native_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(
- info, native_context, undefined_value(), literals, BailoutId::None());
- }
+ Handle<Context> native_context(context->native_context());
+ SharedFunctionInfo::AddToOptimizedCodeMap(
+ info, native_context, undefined_value(), literals, BailoutId::None());
}
return result;
@@ -1492,7 +1504,8 @@ Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
}
-Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
+Handle<JSGlobalObject> Factory::NewJSGlobalObject(
+ Handle<JSFunction> constructor) {
DCHECK(constructor->has_initial_map());
Handle<Map> map(constructor->initial_map());
DCHECK(map->is_dictionary_map());
@@ -1510,7 +1523,7 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
// Initial size of the backing store to avoid resize of the storage during
// bootstrapping. The size differs between the JS global object ad the
// builtins object.
- int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
+ int initial_size = 64;
// Allocate a dictionary object for backing storage.
int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size;
@@ -1534,7 +1547,7 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
}
// Allocate the global object and initialize it with the backing store.
- Handle<GlobalObject> global = New<GlobalObject>(map, OLD_SPACE);
+ Handle<JSGlobalObject> global = New<JSGlobalObject>(map, OLD_SPACE);
isolate()->heap()->InitializeJSObjectFromMap(*global, *dictionary, *map);
// Create a new map for the global object.
@@ -1546,7 +1559,7 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
global->set_properties(*dictionary);
// Make sure result is a global object with properties in dictionary.
- DCHECK(global->IsGlobalObject() && !global->HasFastProperties());
+ DCHECK(global->IsJSGlobalObject() && !global->HasFastProperties());
return global;
}
@@ -2077,17 +2090,6 @@ void Factory::BecomeJSFunction(Handle<JSProxy> proxy) {
}
-template Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
- const FeedbackVectorSpec* spec);
-template Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
- const StaticFeedbackVectorSpec* spec);
-
-template <typename Spec>
-Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(const Spec* spec) {
- return TypeFeedbackVector::Allocate<Spec>(isolate(), spec);
-}
-
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name, int number_of_literals, FunctionKind kind,
Handle<Code> code, Handle<ScopeInfo> scope_info,
@@ -2149,8 +2151,10 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
StaticFeedbackVectorSpec empty_spec;
+ Handle<TypeFeedbackMetadata> feedback_metadata =
+ TypeFeedbackMetadata::New(isolate(), &empty_spec);
Handle<TypeFeedbackVector> feedback_vector =
- NewTypeFeedbackVector(&empty_spec);
+ TypeFeedbackVector::New(isolate(), feedback_metadata);
share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
#if TRACE_MAPS
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
@@ -2171,6 +2175,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_compiler_hints(0);
share->set_opt_count_and_bailout_reason(0);
+ // Link into the list.
+ Handle<Object> new_noscript_list =
+ WeakFixedArray::Add(noscript_shared_function_infos(), share);
+ isolate()->heap()->set_noscript_shared_function_infos(*new_noscript_list);
+
return share;
}
@@ -2270,7 +2279,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
bool strict_mode_callee = is_strict(callee->shared()->language_mode()) ||
- !callee->has_simple_parameters();
+ !callee->shared()->has_simple_parameters();
Handle<Map> map = strict_mode_callee ? isolate()->strict_arguments_map()
: isolate()->sloppy_arguments_map();
AllocationSiteUsageContext context(isolate(), Handle<AllocationSite>(),
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index b7602e023b..80916b8a20 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -7,12 +7,11 @@
#include "src/isolate.h"
#include "src/messages.h"
+#include "src/type-feedback-vector.h"
namespace v8 {
namespace internal {
-class StaticFeedbackVectorSpec;
-
// Interface for handle based allocation.
class Factory final {
public:
@@ -81,6 +80,8 @@ class Factory final {
template<class StringTableKey>
Handle<String> InternalizeStringWithKey(StringTableKey* key);
+ Handle<Name> InternalizeName(Handle<Name> name);
+
// String creation functions. Most of the string creation functions take
// a Heap::PretenureFlag argument to optionally request that they be
@@ -218,7 +219,7 @@ class Factory final {
// Create a symbol.
Handle<Symbol> NewSymbol();
- Handle<Symbol> NewPrivateSymbol(Handle<Object> name);
+ Handle<Symbol> NewPrivateSymbol();
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -375,7 +376,7 @@ class Factory final {
Handle<AllocationSite> site);
// Global objects are pretenured and initialized based on a constructor.
- Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
+ Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
@@ -506,8 +507,11 @@ class Factory final {
bool is_strict = false);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Context> context,
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
+ Handle<Context> context, PretenureFlag pretenure = TENURED);
+
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> function_info, Handle<Context> context,
PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
@@ -616,6 +620,7 @@ class Factory final {
&isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
}
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
// Allocates a new SharedFunctionInfo object.
@@ -626,10 +631,6 @@ class Factory final {
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
MaybeHandle<Code> code);
- // Allocate a new type feedback vector
- template <typename Spec>
- Handle<TypeFeedbackVector> NewTypeFeedbackVector(const Spec* spec);
-
// Allocates a new JSMessageObject object.
Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
Handle<Object> argument,
@@ -720,6 +721,7 @@ class Factory final {
void ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type, int size);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FACTORY_H_
diff --git a/deps/v8/src/fast-dtoa.h b/deps/v8/src/fast-dtoa.h
index 38e8a82499..d9e3ba8932 100644
--- a/deps/v8/src/fast-dtoa.h
+++ b/deps/v8/src/fast-dtoa.h
@@ -57,6 +57,7 @@ bool FastDtoa(double d,
int* length,
int* decimal_point);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FAST_DTOA_H_
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index 042e4fbdd2..2e6693ce38 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -89,8 +89,7 @@ inline int FieldIndex::GetLoadByFieldIndex() const {
inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) {
PropertyDetails details =
map->instance_descriptors()->GetDetails(descriptor_index);
- int field_index =
- map->instance_descriptors()->GetFieldIndex(descriptor_index);
+ int field_index = details.field_index();
return ForPropertyIndex(map, field_index,
details.representation().IsDouble());
}
@@ -119,6 +118,7 @@ inline int FieldIndex::GetKeyedLookupCacheIndex() const {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index 67515be047..2862d36bdb 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -19,6 +19,8 @@ class Map;
// index it was originally generated from.
class FieldIndex final {
public:
+ FieldIndex() : bit_field_(0) {}
+
static FieldIndex ForPropertyIndex(Map* map,
int index,
bool is_double = false);
@@ -111,6 +113,7 @@ class FieldIndex final {
int bit_field_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/deps/v8/src/fixed-dtoa.h b/deps/v8/src/fixed-dtoa.h
index 0a6cb50818..f8a29e0639 100644
--- a/deps/v8/src/fixed-dtoa.h
+++ b/deps/v8/src/fixed-dtoa.h
@@ -29,6 +29,7 @@ namespace internal {
bool FastFixedDtoa(double v, int fractional_count,
Vector<char> buffer, int* length, int* decimal_point);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FIXED_DTOA_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index fc3fe0b863..f65fe08ee5 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -179,44 +179,49 @@ DEFINE_BOOL(strong_mode, false, "experimental strong language mode")
DEFINE_IMPLICATION(use_strong, strong_mode)
DEFINE_BOOL(strong_this, true, "don't allow 'this' to escape from constructors")
-DEFINE_BOOL(es_staging, false, "enable all completed harmony features")
+DEFINE_BOOL(es_staging, false,
+ "enable test-worthy harmony features (for internal use only)")
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
-DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony fetaures")
-DEFINE_IMPLICATION(harmony, es_staging)
+DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
DEFINE_BOOL(legacy_const, true, "legacy semantics for const in sloppy mode")
+// ES2015 const semantics are staged
+DEFINE_NEG_IMPLICATION(harmony, legacy_const)
+
+// Activate on ClusterFuzz.
+DEFINE_IMPLICATION(es_staging, harmony_destructuring)
+DEFINE_IMPLICATION(es_staging, move_object_start)
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS(V) \
V(harmony_modules, "harmony modules") \
- V(harmony_regexps, "harmony regular expression extensions") \
V(harmony_proxies, "harmony proxies") \
V(harmony_unicode_regexps, "harmony unicode regexps") \
- V(harmony_tolength, "harmony ToLength") \
V(harmony_reflect, "harmony Reflect API") \
V(harmony_sloppy_function, "harmony sloppy function block scoping") \
- V(harmony_destructuring, "harmony destructuring") \
- V(harmony_default_parameters, "harmony default parameters") \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_simd, "harmony simd")
+ V(harmony_simd, "harmony simd") \
+ V(harmony_do_expressions, "harmony do-expressions") \
+ V(harmony_regexp_subclass, "harmony regexp subclassing")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_tostring, "harmony toString") \
- V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
- V(harmony_sloppy, "harmony features in sloppy mode") \
+#define HARMONY_STAGED(V) \
+ V(harmony_default_parameters, "harmony default parameters") \
+ V(harmony_destructuring, "harmony destructuring") \
+ V(harmony_regexps, "harmony regular expression extensions") \
+ V(harmony_sloppy, "harmony features in sloppy mode") \
+ V(harmony_tostring, "harmony toString") \
V(harmony_sloppy_let, "harmony let in sloppy mode")
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING(V) \
- V(harmony_arrow_functions, "harmony arrow functions") \
V(harmony_array_includes, "harmony Array.prototype.includes") \
- V(harmony_new_target, "harmony new.target") \
V(harmony_object_observe, "harmony Object.observe") \
V(harmony_rest_parameters, "harmony rest parameters") \
- V(harmony_spread_calls, "harmony spread-calls") \
- V(harmony_spread_arrays, "harmony spread in array literals")
+ V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
+ V(harmony_tolength, "harmony ToLength") \
+ V(harmony_completion, "harmony completion value semantics")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -231,7 +236,7 @@ HARMONY_INPROGRESS(FLAG_INPROGRESS_FEATURES)
#define FLAG_STAGED_FEATURES(id, description) \
DEFINE_BOOL(id, false, "enable " #description) \
- DEFINE_IMPLICATION(es_staging, id)
+ DEFINE_IMPLICATION(harmony, id)
HARMONY_STAGED(FLAG_STAGED_FEATURES)
#undef FLAG_STAGED_FEATURES
@@ -286,6 +291,11 @@ DEFINE_BOOL(string_slices, true, "use string slices")
DEFINE_BOOL(ignition, false, "use ignition interpreter")
DEFINE_IMPLICATION(ignition, vector_stores)
DEFINE_STRING(ignition_filter, "~~", "filter for ignition interpreter")
+DEFINE_BOOL(ignition_fake_try_catch, false,
+ "enable fake try-catch-finally blocks in ignition for testing")
+DEFINE_BOOL(ignition_fallback_on_eval_and_catch, false,
+ "fallback to full-codegen for functions which contain eval, catch"
+ "and es6 blocks")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
DEFINE_BOOL(trace_ignition_codegen, false,
@@ -371,7 +381,6 @@ DEFINE_INT(stress_runs, 0, "number of stress runs")
DEFINE_BOOL(lookup_sample_by_shared, true,
"when picking a function to optimize, watch for shared function "
"info, not JSFunction itself")
-DEFINE_BOOL(cache_optimized_code, true, "cache optimized code for closures")
DEFINE_BOOL(flush_optimized_code_cache, true,
"flushes the cache of optimized code for closures on every GC")
DEFINE_BOOL(inline_construct, true, "inline constructor calls")
@@ -401,13 +410,13 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
// Flags for TurboFan.
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
+DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
+DEFINE_IMPLICATION(turbo, turbo_inlining)
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
DEFINE_BOOL(turbo_preprocess_ranges, true,
"run pre-register allocation heuristics")
DEFINE_BOOL(turbo_loop_stackcheck, true, "enable stack checks in loops")
-
-DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_STRING(turbo_filter, "~~", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_BOOL(trace_turbo_graph, false, "trace generated TurboFan graphs")
@@ -426,13 +435,13 @@ DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
DEFINE_BOOL(turbo_types, true, "use typed lowering in TurboFan")
-DEFINE_BOOL(turbo_type_feedback, false, "use type feedback in TurboFan")
-DEFINE_BOOL(turbo_allocate, false, "enable inline allocations in TurboFan")
DEFINE_BOOL(turbo_source_positions, false,
"track source code positions when building TurboFan IR")
DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
DEFINE_BOOL(function_context_specialization, false,
"enable function context specialization in TurboFan")
+DEFINE_BOOL(native_context_specialization, true,
+ "enable native context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
@@ -453,6 +462,7 @@ DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
#if defined(V8_WASM)
// Flags for native WebAssembly.
DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
+DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
@@ -515,6 +525,12 @@ DEFINE_BOOL(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS/PPC only)")
DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
+DEFINE_IMPLICATION(enable_armv8, enable_vfp3)
+DEFINE_IMPLICATION(enable_armv8, enable_neon)
+DEFINE_IMPLICATION(enable_armv8, enable_32dregs)
+DEFINE_IMPLICATION(enable_armv8, enable_sudiv)
+DEFINE_IMPLICATION(enable_armv8, enable_mls)
+
// bootstrapper.cc
DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object")
@@ -554,7 +570,6 @@ DEFINE_BOOL(trace_stub_failures, false,
"trace deoptimization of generated code stubs")
DEFINE_BOOL(serialize_toplevel, true, "enable caching of toplevel scripts")
-DEFINE_BOOL(serialize_inner, true, "enable caching of inner functions")
DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
// compiler.cc
@@ -652,13 +667,13 @@ DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
"old code (required for code flushing)")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(overapproximate_weak_closure, true,
- "overapproximate weak closer to reduce atomic pause time")
-DEFINE_INT(min_progress_during_object_groups_marking, 128,
- "keep overapproximating the weak closure as long as we discover at "
+DEFINE_BOOL(finalize_marking_incrementally, true,
+ "finalize marking in incremental steps")
+DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
+ "keep finalizing incremental marking as long as we discover at "
"least this many unmarked objects")
-DEFINE_INT(max_object_groups_marking_rounds, 3,
- "at most try this many times to over approximate the weak closure")
+DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
+ "at most try this many times to finalize incremental marking")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(parallel_compaction, false, "use parallel compaction")
DEFINE_BOOL(trace_incremental_marking, false,
@@ -677,6 +692,9 @@ DEFINE_IMPLICATION(trace_detached_contexts, track_detached_contexts)
DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
#endif
DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
+DEFINE_BOOL(memory_reducer, true, "use memory reducer")
+DEFINE_BOOL(scavenge_reclaim_unmodified_objects, false,
+ "remove unmodified and unreferenced objects")
// counters.cc
DEFINE_INT(histogram_interval, 600000,
@@ -694,8 +712,7 @@ DEFINE_BOOL(use_idle_notification, true,
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
-DEFINE_BOOL(vector_stores, false, "use vectors for store ics")
-DEFINE_BOOL(global_var_shortcuts, true, "use ic-less global loads and stores")
+DEFINE_BOOL(vector_stores, true, "use vectors for store ics")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -1078,6 +1095,8 @@ DEFINE_BOOL(unbox_double_fields, V8_DOUBLE_FIELDS_UNBOXING,
"enable in-object double fields unboxing (64-bit only)")
DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
+DEFINE_BOOL(global_var_shortcuts, false, "use ic-less global loads and stores")
+
// Cleanup...
#undef FLAG_FULL
diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags.h
index 545c172d36..7bf515100c 100644
--- a/deps/v8/src/flags.h
+++ b/deps/v8/src/flags.h
@@ -63,6 +63,7 @@ class FlagList {
static uint32_t Hash();
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FLAGS_H_
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index cccd4d191a..4013601dac 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -234,6 +234,10 @@ inline OptimizedFrame::OptimizedFrame(StackFrameIteratorBase* iterator)
}
+inline InterpretedFrame::InterpretedFrame(StackFrameIteratorBase* iterator)
+ : JavaScriptFrame(iterator) {}
+
+
inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
}
@@ -286,6 +290,7 @@ inline StackFrame* SafeStackFrameIterator::frame() const {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FRAMES_INL_H_
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 7e55833b45..0aeda5a5b3 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -11,6 +11,7 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
#include "src/scopeinfo.h"
#include "src/string-stream.h"
@@ -333,11 +334,8 @@ void SafeStackFrameIterator::Advance() {
// ExternalCallbackScope, just skip them as we cannot collect any useful
// information about them.
if (external_callback_scope_->scope_address() < frame_->fp()) {
- Address* callback_address =
- external_callback_scope_->callback_address();
- if (*callback_address != NULL) {
- frame_->state_.pc_address = callback_address;
- }
+ frame_->state_.pc_address =
+ external_callback_scope_->callback_entrypoint_address();
external_callback_scope_ = external_callback_scope_->previous();
DCHECK(external_callback_scope_ == NULL ||
external_callback_scope_->scope_address() > frame_->fp());
@@ -438,6 +436,19 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return JAVA_SCRIPT;
case Code::OPTIMIZED_FUNCTION:
return OPTIMIZED;
+ case Code::BUILTIN:
+ if (!marker->IsSmi()) {
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ // An adapter frame has a special SMI constant for the context and
+ // is not distinguished through the marker.
+ return ARGUMENTS_ADAPTOR;
+ } else {
+ // The interpreter entry trampoline has a non-SMI marker.
+ DCHECK(code_obj->is_interpreter_entry_trampoline());
+ return INTERPRETED;
+ }
+ }
+ break; // Marker encodes the frame type.
case Code::HANDLER:
if (!marker->IsSmi()) {
// Only hydrogen code stub handlers can have a non-SMI marker.
@@ -450,12 +461,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
}
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- // An adapter frame has a special SMI constant for the context and
- // is not distinguished through the marker.
- return ARGUMENTS_ADAPTOR;
- }
-
// Didn't find a code object, or the code kind wasn't specific enough.
// The marker should encode the frame type.
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
@@ -658,7 +663,9 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
if (safepoint_entry.has_doubles()) {
// Number of doubles not known at snapshot time.
DCHECK(!isolate()->serializer_enabled());
- parameters_base += DoubleRegister::NumAllocatableRegisters() *
+ parameters_base +=
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->num_allocatable_double_registers() *
kDoubleSize / kPointerSize;
}
@@ -742,7 +749,7 @@ bool JavaScriptFrame::IsConstructor() const {
}
-bool JavaScriptFrame::HasInlinedFrames() {
+bool JavaScriptFrame::HasInlinedFrames() const {
List<JSFunction*> functions(1);
GetFunctions(&functions);
return functions.length() > 1;
@@ -750,6 +757,7 @@ bool JavaScriptFrame::HasInlinedFrames() {
Object* JavaScriptFrame::GetOriginalConstructor() const {
+ DCHECK(!HasInlinedFrames());
Address fp = caller_fp();
if (has_adapted_arguments()) {
// Skip the arguments adaptor frame and look at the real caller.
@@ -792,7 +800,7 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
}
-void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
+void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) const {
DCHECK(functions->length() == 0);
functions->Add(function());
}
@@ -1034,7 +1042,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
- int* deopt_index) {
+ int* deopt_index) const {
DCHECK(is_optimized());
JSFunction* opt_function = function();
@@ -1058,7 +1066,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
}
-void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
+void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
DCHECK(functions->length() == 0);
DCHECK(is_optimized());
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index d6bfd7aab8..f00cbdb887 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -99,16 +99,17 @@ class StackHandler BASE_EMBEDDED {
#define STACK_FRAME_TYPE_LIST(V) \
- V(ENTRY, EntryFrame) \
- V(ENTRY_CONSTRUCT, EntryConstructFrame) \
- V(EXIT, ExitFrame) \
- V(JAVA_SCRIPT, JavaScriptFrame) \
- V(OPTIMIZED, OptimizedFrame) \
- V(STUB, StubFrame) \
+ V(ENTRY, EntryFrame) \
+ V(ENTRY_CONSTRUCT, EntryConstructFrame) \
+ V(EXIT, ExitFrame) \
+ V(JAVA_SCRIPT, JavaScriptFrame) \
+ V(OPTIMIZED, OptimizedFrame) \
+ V(INTERPRETED, InterpretedFrame) \
+ V(STUB, StubFrame) \
V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
- V(INTERNAL, InternalFrame) \
- V(CONSTRUCT, ConstructFrame) \
- V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+ V(INTERNAL, InternalFrame) \
+ V(CONSTRUCT, ConstructFrame) \
+ V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
class StandardFrameConstants : public AllStatic {
@@ -181,6 +182,7 @@ class InterpreterFrameConstants : public AllStatic {
static const int kLastParamFromRegisterPointer =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
static const int kFunctionFromRegisterPointer = kPointerSize;
+ static const int kContextFromRegisterPointer = 2 * kPointerSize;
};
@@ -246,7 +248,8 @@ class StackFrame BASE_EMBEDDED {
bool is_java_script() const {
Type type = this->type();
- return (type == JAVA_SCRIPT) || (type == OPTIMIZED);
+ return (type == JAVA_SCRIPT) || (type == OPTIMIZED) ||
+ (type == INTERPRETED);
}
// Accessors.
@@ -373,18 +376,18 @@ class StackFrame BASE_EMBEDDED {
// Entry frames are used to enter JavaScript execution from C.
class EntryFrame: public StackFrame {
public:
- virtual Type type() const { return ENTRY; }
+ Type type() const override { return ENTRY; }
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
// Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
static EntryFrame* cast(StackFrame* frame) {
DCHECK(frame->is_entry());
return static_cast<EntryFrame*>(frame);
}
- virtual void SetCallerFp(Address caller_fp);
+ void SetCallerFp(Address caller_fp) override;
protected:
inline explicit EntryFrame(StackFrameIteratorBase* iterator);
@@ -392,11 +395,11 @@ class EntryFrame: public StackFrame {
// The caller stack pointer for entry frames is always zero. The
// real information about the caller frame is available through the
// link to the top exit frame.
- virtual Address GetCallerStackPointer() const { return 0; }
+ Address GetCallerStackPointer() const override { return 0; }
private:
- virtual void ComputeCallerState(State* state) const;
- virtual Type GetCallerState(State* state) const;
+ void ComputeCallerState(State* state) const override;
+ Type GetCallerState(State* state) const override;
friend class StackFrameIteratorBase;
};
@@ -404,9 +407,9 @@ class EntryFrame: public StackFrame {
class EntryConstructFrame: public EntryFrame {
public:
- virtual Type type() const { return ENTRY_CONSTRUCT; }
+ Type type() const override { return ENTRY_CONSTRUCT; }
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
static EntryConstructFrame* cast(StackFrame* frame) {
DCHECK(frame->is_entry_construct());
@@ -424,16 +427,16 @@ class EntryConstructFrame: public EntryFrame {
// Exit frames are used to exit JavaScript execution and go to C.
class ExitFrame: public StackFrame {
public:
- virtual Type type() const { return EXIT; }
+ Type type() const override { return EXIT; }
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
Object*& code_slot() const;
// Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
- virtual void SetCallerFp(Address caller_fp);
+ void SetCallerFp(Address caller_fp) override;
static ExitFrame* cast(StackFrame* frame) {
DCHECK(frame->is_exit());
@@ -450,10 +453,10 @@ class ExitFrame: public StackFrame {
protected:
inline explicit ExitFrame(StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
private:
- virtual void ComputeCallerState(State* state) const;
+ void ComputeCallerState(State* state) const override;
friend class StackFrameIteratorBase;
};
@@ -462,7 +465,7 @@ class ExitFrame: public StackFrame {
class StandardFrame: public StackFrame {
public:
// Testers.
- virtual bool is_standard() const { return true; }
+ bool is_standard() const override { return true; }
// Accessors.
inline Object* context() const;
@@ -473,7 +476,7 @@ class StandardFrame: public StackFrame {
int ComputeExpressionsCount() const;
static Object* GetExpression(Address fp, int index);
- virtual void SetCallerFp(Address caller_fp);
+ void SetCallerFp(Address caller_fp) override;
static StandardFrame* cast(StackFrame* frame) {
DCHECK(frame->is_standard());
@@ -483,7 +486,7 @@ class StandardFrame: public StackFrame {
protected:
inline explicit StandardFrame(StackFrameIteratorBase* iterator);
- virtual void ComputeCallerState(State* state) const;
+ void ComputeCallerState(State* state) const override;
// Accessors.
inline Address caller_fp() const;
@@ -547,7 +550,7 @@ class FrameSummary BASE_EMBEDDED {
class JavaScriptFrame: public StandardFrame {
public:
- virtual Type type() const { return JAVA_SCRIPT; }
+ Type type() const override { return JAVA_SCRIPT; }
// Accessors.
inline JSFunction* function() const;
@@ -578,7 +581,7 @@ class JavaScriptFrame: public StandardFrame {
// Determines whether this frame includes inlined activations. To get details
// about the inlined frames use {GetFunctions} and {Summarize}.
- bool HasInlinedFrames();
+ bool HasInlinedFrames() const;
// Returns the original constructor function that was used in the constructor
// call to this frame. Note that this is only valid on constructor frames.
@@ -591,18 +594,17 @@ class JavaScriptFrame: public StandardFrame {
int GetArgumentsLength() const;
// Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Printing support.
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const;
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
// Determine the code for the frame.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
// Return a list with JSFunctions of this frame.
- virtual void GetFunctions(List<JSFunction*>* functions);
+ virtual void GetFunctions(List<JSFunction*>* functions) const;
// Build a list with summaries for this frame including all inlined frames.
virtual void Summarize(List<FrameSummary>* frames);
@@ -632,7 +634,7 @@ class JavaScriptFrame: public StandardFrame {
protected:
inline explicit JavaScriptFrame(StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
virtual int GetNumberOfIncomingArguments() const;
@@ -649,18 +651,18 @@ class JavaScriptFrame: public StandardFrame {
class StubFrame : public StandardFrame {
public:
- virtual Type type() const { return STUB; }
+ Type type() const override { return STUB; }
// GC support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Determine the code for the frame.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
protected:
inline explicit StubFrame(StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
virtual int GetNumberOfIncomingArguments() const;
@@ -670,24 +672,24 @@ class StubFrame : public StandardFrame {
class OptimizedFrame : public JavaScriptFrame {
public:
- virtual Type type() const { return OPTIMIZED; }
+ Type type() const override { return OPTIMIZED; }
// GC support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Return a list with JSFunctions of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
// is the top-most activation)
- virtual void GetFunctions(List<JSFunction*>* functions);
+ void GetFunctions(List<JSFunction*>* functions) const override;
- virtual void Summarize(List<FrameSummary>* frames);
+ void Summarize(List<FrameSummary>* frames) override;
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns the expected number of stack slots at the handler site.
- virtual int LookupExceptionHandlerInTable(
- int* stack_slots, HandlerTable::CatchPrediction* prediction);
+ int LookupExceptionHandlerInTable(
+ int* stack_slots, HandlerTable::CatchPrediction* prediction) override;
- DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
+ DeoptimizationInputData* GetDeoptimizationData(int* deopt_index) const;
static int StackSlotOffsetRelativeToFp(int slot_index);
@@ -701,15 +703,26 @@ class OptimizedFrame : public JavaScriptFrame {
};
+class InterpretedFrame : public JavaScriptFrame {
+ Type type() const override { return INTERPRETED; }
+
+ protected:
+ inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
+
// Arguments adaptor frames are automatically inserted below
// JavaScript frames when the actual number of parameters does not
// match the formal number of parameters.
class ArgumentsAdaptorFrame: public JavaScriptFrame {
public:
- virtual Type type() const { return ARGUMENTS_ADAPTOR; }
+ Type type() const override { return ARGUMENTS_ADAPTOR; }
// Determine the code for the frame.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
DCHECK(frame->is_arguments_adaptor());
@@ -717,16 +730,15 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
}
// Printing support.
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const;
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
protected:
inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
- virtual int GetNumberOfIncomingArguments() const;
+ int GetNumberOfIncomingArguments() const override;
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
private:
friend class StackFrameIteratorBase;
@@ -735,13 +747,13 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
class InternalFrame: public StandardFrame {
public:
- virtual Type type() const { return INTERNAL; }
+ Type type() const override { return INTERNAL; }
// Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Determine the code for the frame.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
static InternalFrame* cast(StackFrame* frame) {
DCHECK(frame->is_internal());
@@ -751,7 +763,7 @@ class InternalFrame: public StandardFrame {
protected:
inline explicit InternalFrame(StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
private:
friend class StackFrameIteratorBase;
@@ -768,13 +780,13 @@ class StubFailureTrampolineFrame: public StandardFrame {
static const int kCallerStackParameterCountFrameOffset =
StandardFrameConstants::kMarkerOffset - 2 * kPointerSize;
- virtual Type type() const { return STUB_FAILURE_TRAMPOLINE; }
+ Type type() const override { return STUB_FAILURE_TRAMPOLINE; }
// Get the code associated with this frame.
// This method could be called during marking phase of GC.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Architecture-specific register description.
static Register fp_register();
@@ -785,7 +797,7 @@ class StubFailureTrampolineFrame: public StandardFrame {
inline explicit StubFailureTrampolineFrame(
StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
private:
friend class StackFrameIteratorBase;
@@ -796,7 +808,7 @@ class StubFailureTrampolineFrame: public StandardFrame {
// function invocations through 'new'.
class ConstructFrame: public InternalFrame {
public:
- virtual Type type() const { return CONSTRUCT; }
+ Type type() const override { return CONSTRUCT; }
static ConstructFrame* cast(StackFrame* frame) {
DCHECK(frame->is_construct());
@@ -949,6 +961,7 @@ class StackFrameLocator BASE_EMBEDDED {
// zone memory.
Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FRAMES_H_
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index d3d53334d4..5ca7a52af4 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -7,7 +7,6 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
@@ -117,22 +116,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(ne, &ok);
-
- __ ldr(r2, GlobalObjectOperand());
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
-
- __ str(r2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
+ __ AssertNotSmi(r2);
+ __ CompareObjectType(r2, r2, no_reg, FIRST_SPEC_OBJECT_TYPE);
+ __ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -142,7 +131,6 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -193,6 +181,7 @@ void FullCodeGenerator::Generate() {
__ push(r1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
@@ -234,8 +223,8 @@ void FullCodeGenerator::Generate() {
}
}
}
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
// Function register is trashed in case we bailout here. But since that
// could happen only when we allocate a context the value of
// |function_in_register_r1| is correct.
@@ -474,11 +463,10 @@ void FullCodeGenerator::EmitReturnSequence() {
SetReturnPosition(literal());
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
- int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
__ add(sp, sp, Operand(sp_delta));
__ Jump(lr);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
}
@@ -850,10 +838,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
}
__ Push(r2, r0);
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -909,7 +895,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(r2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -1142,7 +1129,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1195,6 +1181,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1245,7 +1233,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ mov(StoreDescriptor::NameRegister(),
@@ -1257,8 +1245,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r0);
__ mov(StoreDescriptor::NameRegister(),
@@ -1393,26 +1382,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- const int slot = var->index();
- const int depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
- } else {
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1755,8 +1729,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
-
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1806,7 +1778,15 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
+ if (FLAG_vector_stores) {
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(Smi::FromInt(array_index)));
+ __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp, kPointerSize));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ } else if (has_fast_elements) {
int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
@@ -2134,8 +2114,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ __ mov(r0, Operand(1));
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2276,7 +2258,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, JSGlobalObject::kNativeContextOffset));
__ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(r2);
__ LoadRoot(r3,
@@ -2508,7 +2490,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2601,7 +2583,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
@@ -2609,25 +2591,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- const int slot = var->index();
- const int depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(r0));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ push(r0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2846,11 +2809,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2859,6 +2820,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// is a sloppy mode method.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ push(ip);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2870,9 +2832,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ ldr(ip, MemOperand(sp, 0));
__ push(ip);
__ str(r0, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2912,7 +2875,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2936,7 +2899,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(ip);
__ str(r0, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2974,11 +2937,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2986,8 +2949,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -3068,91 +3032,40 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call
- // RuntimeHidden_asResolvePossiblyDirectEval to resolve the function we need
- // to call. Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call
+ // RuntimeHidden_asResolvePossiblyDirectEval to resolve the function we need
+ // to call. Then we call the resolved function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the stack with the resolved function.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(r1);
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Touch up the stack with the resolved function.
+ __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ push(r1);
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ // Record source position for debugger.
+ SetCallPosition(expr, arg_count);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ mov(r0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r0);
}
@@ -3782,31 +3695,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
- // Load the argument into r0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3827,19 +3715,6 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3950,19 +3825,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(r1);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3970,6 +3832,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to r1.
int const argc = args->length() - 2;
__ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3983,38 +3846,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &runtime);
-
- // InvokeFunction requires the function in r1. Move it in there.
- __ mov(r1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION, NullCallWrapper());
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(r0);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4068,20 +3899,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ pop(r1);
- __ pop(r2);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4229,6 +4046,10 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ b(vs, &bailout);
__ SmiUntag(string_length);
+ // Bailout for large object allocations.
+ __ cmp(string_length, Operand(Page::kMaxRegularHeapObjectSize));
+ __ b(gt, &bailout);
+
// Get first element in the array to free up the elements register to be used
// for the result.
__ add(element,
@@ -4370,7 +4191,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ Allocate(JSIteratorResult::kSize, r0, r2, r3, &runtime, TAG_OBJECT);
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, JSGlobalObject::kNativeContextOffset));
__ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(r3);
__ pop(r2);
@@ -4397,7 +4218,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ push(r0);
__ ldr(r0, GlobalObjectOperand());
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSGlobalObject::kNativeContextOffset));
__ ldr(r0, ContextOperand(r0, expr->context_index()));
}
@@ -4407,9 +4228,10 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
int arg_count = args->length();
SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ mov(r0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -5013,8 +4835,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ cmp(r0, Operand(0));
- Split(ne, if_true, if_false, fall_through);
+ __ CompareRoot(r0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5123,7 +4945,7 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(FLAG_vector_stores && !slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index b53e8ee6cd..bb6a4309e4 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -7,7 +7,6 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
@@ -116,23 +115,14 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
- __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
-
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
- __ Poke(x10, receiver_offset);
-
- __ Bind(&ok);
+ __ AssertNotSmi(x10);
+ __ CompareObjectType(x10, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ __ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
-
// Open a frame scope to indicate that there is a frame on the stack.
// The MANUAL indicates that the scope shouldn't actually generate code
// to set up the frame because we do it manually below.
@@ -144,7 +134,6 @@ void FullCodeGenerator::Generate() {
// Add(fp, jssp, 2 * kPointerSize);
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
// Reserve space on the stack for locals.
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -195,6 +184,7 @@ void FullCodeGenerator::Generate() {
__ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext, 2);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
@@ -236,8 +226,8 @@ void FullCodeGenerator::Generate() {
}
}
}
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
// Function register is trashed in case we bailout here. But since that
// could happen only when we allocate a context the value of
// |function_in_register_x1| is correct.
@@ -469,7 +459,6 @@ void FullCodeGenerator::EmitReturnSequence() {
// Nothing ensures 16 bytes alignment here.
DCHECK(!current_sp.Is(csp));
__ Mov(current_sp, fp);
- int no_frame_start = masm_->pc_offset();
__ Ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
// Drop the arguments and receiver and return.
// TODO(all): This implementation is overkill as it supports 2**31+1
@@ -480,7 +469,6 @@ void FullCodeGenerator::EmitReturnSequence() {
__ Ret();
int32_t arg_count = info_->scope()->num_parameters() + 1;
__ dc64(kXRegSize * arg_count);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -855,10 +843,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
// Pushing 0 (xzr) indicates no initial value.
__ Push(x2, xzr);
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -914,7 +900,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(x2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -1140,7 +1127,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(x1, x0, x2, xzr);
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ Bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1191,6 +1177,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1242,7 +1230,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
__ Mov(StoreDescriptor::NameRegister(),
@@ -1253,8 +1241,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), x0);
__ Mov(StoreDescriptor::NameRegister(),
@@ -1380,26 +1369,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Mov(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
- } else {
- __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- CallLoadIC(typeof_mode);
- }
+ __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
+ CallLoadIC(typeof_mode);
}
@@ -1740,7 +1714,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1788,7 +1761,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
+ if (FLAG_vector_stores) {
+ __ Mov(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
+ __ Peek(StoreDescriptor::ReceiverRegister(), kPointerSize);
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ } else if (has_fast_elements) {
int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ Peek(x6, kPointerSize); // Copy of array literal.
__ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
@@ -2208,7 +2188,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2303,7 +2283,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -2312,25 +2292,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Mov(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(x0));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(x0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2548,11 +2509,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2565,6 +2524,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ LoadRoot(temp, Heap::kUndefinedValueRootIndex);
__ Push(temp);
}
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2575,9 +2535,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push the target function under the receiver.
__ Pop(x10);
__ Push(x0, x10);
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2617,7 +2578,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2640,7 +2601,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Pop(x10);
__ Push(x0, x10);
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2678,11 +2639,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2690,9 +2651,10 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -2773,94 +2735,42 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ Peek(x10, (arg_count + 1) * kPointerSize);
- __ Push(x10);
- EmitResolvePossiblyDirectEval(arg_count);
+ PushCalleeAndWithBaseObject(expr);
- // Touch up the stack with the resolved function.
- __ Poke(x0, (arg_count + 1) * kPointerSize);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ Peek(x10, (arg_count + 1) * kPointerSize);
+ __ Push(x10);
+ EmitResolvePossiblyDirectEval(arg_count);
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
+ // Touch up the stack with the resolved function.
+ __ Poke(x0, (arg_count + 1) * kPointerSize);
- // Call the evaluated function.
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ Peek(x1, (arg_count + 1) * kXRegSize);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, x0);
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
- __ Push(x1);
- // Emit function call.
- EmitCall(expr);
- }
+ // Record source position for debugger.
+ SetCallPosition(expr, arg_count);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ // Call the evaluated function.
+ __ Peek(x1, (arg_count + 1) * kXRegSize);
+ __ Mov(x0, arg_count);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
}
@@ -3492,32 +3402,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into x0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into x0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3537,19 +3421,6 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into x0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3663,30 +3534,15 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ Pop(x1);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
-
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
+ ASM_LOCATION("FullCodeGenerator::EmitCall");
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
// Push target, receiver and arguments onto the stack.
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to x1.
int const argc = args->length() - 2;
__ Peek(x1, (argc + 1) * kXRegSize);
@@ -3700,38 +3556,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(x0, &runtime);
- __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime);
-
- // InvokeFunction requires the function in x1. Move it in there.
- __ Mov(x1, x0);
- ParameterCount count(arg_count);
- __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper());
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ B(&done);
-
- __ Bind(&runtime);
- __ Push(x0);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ Bind(&done);
-
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3785,19 +3609,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ Pop(x1, x2);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -3946,6 +3757,10 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ Umaddl(string_length, array_length.W(), separator_length.W(),
string_length);
+ // Bailout for large object allocations.
+ __ Cmp(string_length, Page::kMaxRegularHeapObjectSize);
+ __ B(gt, &bailout);
+
// Get first element in the array.
__ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
// Live values in registers:
@@ -4075,7 +3890,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Register empty_fixed_array = x4;
Register untagged_result = x5;
__ Ldr(map_reg, GlobalObjectMemOperand());
- __ Ldr(map_reg, FieldMemOperand(map_reg, GlobalObject::kNativeContextOffset));
+ __ Ldr(map_reg,
+ FieldMemOperand(map_reg, JSGlobalObject::kNativeContextOffset));
__ Ldr(map_reg,
ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
__ Pop(boolean_done);
@@ -4108,7 +3924,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ Push(x0);
__ Ldr(x0, GlobalObjectMemOperand());
- __ Ldr(x0, FieldMemOperand(x0, GlobalObject::kNativeContextOffset));
+ __ Ldr(x0, FieldMemOperand(x0, JSGlobalObject::kNativeContextOffset));
__ Ldr(x0, ContextMemOperand(x0, expr->context_index()));
}
@@ -4118,9 +3934,10 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
int arg_count = args->length();
SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ Peek(x1, (arg_count + 1) * kPointerSize);
- __ CallStub(&stub);
+ __ Mov(x0, arg_count);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4729,7 +4546,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -4885,8 +4703,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Mov(x1, x0);
__ Poke(x1, 2 * kPointerSize);
SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ __ Mov(x0, 1);
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -5030,7 +4850,8 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Register empty_fixed_array = x4;
Register untagged_result = x5;
__ Ldr(map_reg, GlobalObjectMemOperand());
- __ Ldr(map_reg, FieldMemOperand(map_reg, GlobalObject::kNativeContextOffset));
+ __ Ldr(map_reg,
+ FieldMemOperand(map_reg, JSGlobalObject::kNativeContextOffset));
__ Ldr(map_reg,
ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
__ Pop(result_value);
@@ -5156,7 +4977,7 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(FLAG_vector_stores && !slot.IsInvalid());
__ Mov(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
}
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index a29b59cf11..89675b6c9a 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -158,12 +158,12 @@ bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
ArrayLiteral* expr) const {
// TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
return expr->depth() > 1 || expr->is_strong() ||
- expr->values()->length() > JSObject::kInitialMaxFastElementArray;
+ expr->values()->length() > JSArray::kInitialMaxFastElementArray;
}
void FullCodeGenerator::Initialize() {
- InitializeAstVisitor(info_->isolate(), info_->zone());
+ InitializeAstVisitor(info_->isolate());
// The generation of debug code must match between the snapshot code and the
// code that is generated later. This is assumed by the debugger when it is
// calculating PC offsets after generating a debug version of code. Therefore
@@ -482,6 +482,63 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
+ const Callable& callable) {
+ ZoneList<Expression*>* args = expr->arguments();
+ int param_count = callable.descriptor().GetRegisterParameterCount();
+ DCHECK_EQ(args->length(), param_count);
+
+ if (param_count > 0) {
+ int last = param_count - 1;
+ // Put all but last arguments on stack.
+ for (int i = 0; i < last; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ // The last argument goes to the accumulator.
+ VisitForAccumulatorValue(args->at(last));
+
+ // Move the arguments to the registers, as required by the stub.
+ __ Move(callable.descriptor().GetRegisterParameter(last),
+ result_register());
+ for (int i = last; i-- > 0;) {
+ __ Pop(callable.descriptor().GetRegisterParameter(i));
+ }
+ }
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::NumberToString(isolate()));
+}
+
+
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToString(isolate()));
+}
+
+
+void FullCodeGenerator::EmitToLength(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToLength(isolate()));
+}
+
+
+void FullCodeGenerator::EmitToNumber(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToNumber(isolate()));
+}
+
+
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToObject(isolate()));
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::RegExpConstructResult(isolate()));
+}
+
+
bool RecordStatementPosition(MacroAssembler* masm, int pos) {
if (pos == RelocInfo::kNoPosition) return false;
masm->positions_recorder()->RecordStatementPosition(pos);
@@ -503,7 +560,10 @@ void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
- RecordStatementPosition(masm_, fun->end_position() - 1);
+ // For default constructors, start position equals end position, and there
+ // is no source code besides the class literal.
+ int pos = std::max(fun->start_position(), fun->end_position() - 1);
+ RecordStatementPosition(masm_, pos);
if (info_->is_debug()) {
// Always emit a debug break slot before a return.
DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
@@ -745,6 +805,15 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
}
+void FullCodeGenerator::VisitDoExpression(DoExpression* expr) {
+ Comment cmnt(masm_, "[ Do Expression");
+ NestedStatement nested_block(this);
+ SetExpressionPosition(expr);
+ VisitBlock(expr->block());
+ EmitVariableLoad(expr->result());
+}
+
+
void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt);
@@ -1273,8 +1342,7 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
EmitClassDefineProperties(lit);
- if (lit->scope() != NULL) {
- DCHECK_NOT_NULL(lit->class_variable_proxy());
+ if (lit->class_variable_proxy() != nullptr) {
EmitVariableAssignment(lit->class_variable_proxy()->var(),
Token::INIT_CONST, lit->ProxySlot());
}
@@ -1298,9 +1366,9 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
DCHECK(!fun_template.IsEmpty());
// Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Utils::OpenHandle(
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(Utils::OpenHandle(
*fun_template->GetFunction(v8_isolate->GetCurrentContext())
- .ToLocalChecked());
+ .ToLocalChecked()));
const int literals = fun->NumberOfLiterals();
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
@@ -1357,6 +1425,66 @@ void FullCodeGenerator::ExitTryBlock(int handler_index) {
}
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ switch (call_type) {
+ case Call::POSSIBLY_EVAL_CALL:
+ EmitPossiblyEvalCall(expr);
+ break;
+ case Call::GLOBAL_CALL:
+ EmitCallWithLoadIC(expr);
+ break;
+ case Call::LOOKUP_SLOT_CALL:
+ // Call to a lookup slot (dynamically introduced variable).
+ PushCalleeAndWithBaseObject(expr);
+ EmitCall(expr);
+ break;
+ case Call::NAMED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VisitForStackValue(property->obj());
+ EmitCallWithLoadIC(expr);
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VisitForStackValue(property->obj());
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ break;
+ }
+ case Call::NAMED_SUPER_PROPERTY_CALL:
+ EmitSuperCallWithLoadIC(expr);
+ break;
+ case Call::KEYED_SUPER_PROPERTY_CALL:
+ EmitKeyedSuperCallWithLoadIC(expr);
+ break;
+ case Call::SUPER_CALL:
+ EmitSuperConstructorCall(expr);
+ break;
+ case Call::OTHER_CALL:
+ // Call to an arbitrary expression not handled specially above.
+ VisitForStackValue(callee);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // Emit function call.
+ EmitCall(expr);
+ break;
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ DCHECK(expr->return_is_recorded_);
+#endif
+}
+
+
void FullCodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 02da16b865..80266cd70a 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -9,6 +9,7 @@
#include "src/assert-scope.h"
#include "src/ast.h"
#include "src/bit-vector.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
@@ -35,6 +36,8 @@ class FullCodeGenerator: public AstVisitor {
FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
: masm_(masm),
info_(info),
+ isolate_(info->isolate()),
+ zone_(info->zone()),
scope_(info->scope()),
nesting_stack_(NULL),
loop_depth_(0),
@@ -150,10 +153,9 @@ class FullCodeGenerator: public AstVisitor {
Breakable(FullCodeGenerator* codegen, BreakableStatement* statement)
: NestedStatement(codegen), statement_(statement) {
}
- virtual ~Breakable() {}
- virtual Breakable* AsBreakable() { return this; }
- virtual bool IsBreakTarget(Statement* target) {
+ Breakable* AsBreakable() override { return this; }
+ bool IsBreakTarget(Statement* target) override {
return statement() == target;
}
@@ -171,10 +173,9 @@ class FullCodeGenerator: public AstVisitor {
Iteration(FullCodeGenerator* codegen, IterationStatement* statement)
: Breakable(codegen, statement) {
}
- virtual ~Iteration() {}
- virtual Iteration* AsIteration() { return this; }
- virtual bool IsContinueTarget(Statement* target) {
+ Iteration* AsIteration() override { return this; }
+ bool IsContinueTarget(Statement* target) override {
return statement() == target;
}
@@ -190,9 +191,8 @@ class FullCodeGenerator: public AstVisitor {
NestedBlock(FullCodeGenerator* codegen, Block* block)
: Breakable(codegen, block) {
}
- virtual ~NestedBlock() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
auto block_scope = statement()->AsBlock()->scope();
if (block_scope != nullptr) {
if (block_scope->ContextLocalCount() > 0) ++(*context_length);
@@ -207,13 +207,12 @@ class FullCodeGenerator: public AstVisitor {
static const int kElementCount = TryBlockConstant::kElementCount;
explicit TryCatch(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
- virtual ~TryCatch() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
*stack_depth += kElementCount;
return previous_;
}
- virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ NestedStatement* AccumulateDepth(int* stack_depth) override {
*stack_depth += kElementCount;
return previous_;
}
@@ -227,10 +226,9 @@ class FullCodeGenerator: public AstVisitor {
TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
: NestedStatement(codegen), finally_entry_(finally_entry) {
}
- virtual ~TryFinally() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length);
- virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override;
+ NestedStatement* AccumulateDepth(int* stack_depth) override {
*stack_depth += kElementCount;
return previous_;
}
@@ -245,13 +243,12 @@ class FullCodeGenerator: public AstVisitor {
static const int kElementCount = 3;
explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
- virtual ~Finally() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
*stack_depth += kElementCount;
return previous_;
}
- virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ NestedStatement* AccumulateDepth(int* stack_depth) override {
*stack_depth += kElementCount;
return previous_;
}
@@ -265,13 +262,12 @@ class FullCodeGenerator: public AstVisitor {
ForIn(FullCodeGenerator* codegen, ForInStatement* statement)
: Iteration(codegen, statement) {
}
- virtual ~ForIn() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
*stack_depth += kElementCount;
return previous_;
}
- virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ NestedStatement* AccumulateDepth(int* stack_depth) override {
*stack_depth += kElementCount;
return previous_;
}
@@ -284,9 +280,8 @@ class FullCodeGenerator: public AstVisitor {
explicit WithOrCatch(FullCodeGenerator* codegen)
: NestedStatement(codegen) {
}
- virtual ~WithOrCatch() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
++(*context_length);
return previous_;
}
@@ -427,11 +422,6 @@ class FullCodeGenerator: public AstVisitor {
literal()->feedback_vector_spec(), slot));
}
- Smi* SmiFromSlot(FeedbackVectorICSlot slot) const {
- return Smi::FromInt(TypeFeedbackVector::GetIndexFromSpec(
- literal()->feedback_vector_spec(), slot));
- }
-
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
void RecordJSReturnSite(Call* call);
@@ -473,12 +463,13 @@ class FullCodeGenerator: public AstVisitor {
void EmitReturnSequence();
// Platform-specific code sequences for calls
- void EmitCall(Call* expr, CallICState::CallType = CallICState::FUNCTION);
+ void EmitCall(Call* expr, ConvertReceiverMode = ConvertReceiverMode::kAny);
void EmitSuperConstructorCall(Call* expr);
void EmitCallWithLoadIC(Call* expr);
void EmitSuperCallWithLoadIC(Call* expr);
void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
void EmitKeyedSuperCallWithLoadIC(Call* expr);
+ void EmitPossiblyEvalCall(Call* expr);
#define FOR_EACH_FULL_CODE_INTRINSIC(F) \
F(IsSmi) \
@@ -488,7 +479,6 @@ class FullCodeGenerator: public AstVisitor {
F(IsJSProxy) \
F(IsConstructCall) \
F(Call) \
- F(CallFunction) \
F(DefaultConstructorCallSuper) \
F(ArgumentsLength) \
F(Arguments) \
@@ -514,13 +504,14 @@ class FullCodeGenerator: public AstVisitor {
F(DebugBreakInOptimizedCode) \
F(ClassOf) \
F(StringCharCodeAt) \
- F(StringAdd) \
F(SubString) \
F(RegExpExec) \
F(RegExpConstructResult) \
F(ToInteger) \
F(NumberToString) \
F(ToString) \
+ F(ToLength) \
+ F(ToNumber) \
F(ToName) \
F(ToObject) \
F(DebugIsActive) \
@@ -530,6 +521,8 @@ class FullCodeGenerator: public AstVisitor {
FOR_EACH_FULL_CODE_INTRINSIC(GENERATOR_DECLARATION)
#undef GENERATOR_DECLARATION
+ void EmitIntrinsicAsStubCall(CallRuntime* expr, const Callable& callable);
+
// Platform-specific code for resuming generators.
void EmitGeneratorResume(Expression *generator,
Expression *value,
@@ -598,12 +591,12 @@ class FullCodeGenerator: public AstVisitor {
// Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator. slot is only used if FLAG_vector_stores
// is true.
- void EmitAssignment(Expression* expr, FeedbackVectorICSlot slot);
+ void EmitAssignment(Expression* expr, FeedbackVectorSlot slot);
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
void EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot);
+ FeedbackVectorSlot slot);
// Helper functions to EmitVariableAssignment
void EmitStoreToStackLocalOrContextSlot(Variable* var,
@@ -634,10 +627,10 @@ class FullCodeGenerator: public AstVisitor {
// The value of the initializer is expected to be at the top of the stack.
// |offset| is the offset in the stack where the home object can be found.
void EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot);
+ FeedbackVectorSlot slot);
void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
- FeedbackVectorICSlot slot);
+ FeedbackVectorSlot slot);
void EmitLoadSuperConstructor(SuperCallReference* super_call_ref);
@@ -692,6 +685,8 @@ class FullCodeGenerator: public AstVisitor {
const ExpressionContext* context() { return context_; }
void set_new_context(const ExpressionContext* context) { context_ = context; }
+ Isolate* isolate() const { return isolate_; }
+ Zone* zone() const { return zone_; }
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
@@ -718,7 +713,7 @@ class FullCodeGenerator: public AstVisitor {
void PushCalleeAndWithBaseObject(Call* expr);
// AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
@@ -736,7 +731,7 @@ class FullCodeGenerator: public AstVisitor {
bool MustCreateObjectLiteralWithRuntime(ObjectLiteral* expr) const;
bool MustCreateArrayLiteralWithRuntime(ArrayLiteral* expr) const;
- void EmitLoadStoreICSlot(FeedbackVectorICSlot slot);
+ void EmitLoadStoreICSlot(FeedbackVectorSlot slot);
int NewHandlerTableEntry();
@@ -832,20 +827,18 @@ class FullCodeGenerator: public AstVisitor {
explicit AccumulatorValueContext(FullCodeGenerator* codegen)
: ExpressionContext(codegen) { }
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsAccumulatorValue() const { return true; }
+ void Plug(bool flag) const override;
+ void Plug(Register reg) const override;
+ void Plug(Label* materialize_true, Label* materialize_false) const override;
+ void Plug(Variable* var) const override;
+ void Plug(Handle<Object> lit) const override;
+ void Plug(Heap::RootListIndex) const override;
+ void PlugTOS() const override;
+ void DropAndPlug(int count, Register reg) const override;
+ void PrepareTest(Label* materialize_true, Label* materialize_false,
+ Label** if_true, Label** if_false,
+ Label** fall_through) const override;
+ bool IsAccumulatorValue() const override { return true; }
};
class StackValueContext : public ExpressionContext {
@@ -853,20 +846,18 @@ class FullCodeGenerator: public AstVisitor {
explicit StackValueContext(FullCodeGenerator* codegen)
: ExpressionContext(codegen) { }
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsStackValue() const { return true; }
+ void Plug(bool flag) const override;
+ void Plug(Register reg) const override;
+ void Plug(Label* materialize_true, Label* materialize_false) const override;
+ void Plug(Variable* var) const override;
+ void Plug(Handle<Object> lit) const override;
+ void Plug(Heap::RootListIndex) const override;
+ void PlugTOS() const override;
+ void DropAndPlug(int count, Register reg) const override;
+ void PrepareTest(Label* materialize_true, Label* materialize_false,
+ Label** if_true, Label** if_false,
+ Label** fall_through) const override;
+ bool IsStackValue() const override { return true; }
};
class TestContext : public ExpressionContext {
@@ -892,20 +883,18 @@ class FullCodeGenerator: public AstVisitor {
Label* false_label() const { return false_label_; }
Label* fall_through() const { return fall_through_; }
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsTest() const { return true; }
+ void Plug(bool flag) const override;
+ void Plug(Register reg) const override;
+ void Plug(Label* materialize_true, Label* materialize_false) const override;
+ void Plug(Variable* var) const override;
+ void Plug(Handle<Object> lit) const override;
+ void Plug(Heap::RootListIndex) const override;
+ void PlugTOS() const override;
+ void DropAndPlug(int count, Register reg) const override;
+ void PrepareTest(Label* materialize_true, Label* materialize_false,
+ Label** if_true, Label** if_false,
+ Label** fall_through) const override;
+ bool IsTest() const override { return true; }
private:
Expression* condition_;
@@ -919,20 +908,18 @@ class FullCodeGenerator: public AstVisitor {
explicit EffectContext(FullCodeGenerator* codegen)
: ExpressionContext(codegen) { }
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsEffect() const { return true; }
+ void Plug(bool flag) const override;
+ void Plug(Register reg) const override;
+ void Plug(Label* materialize_true, Label* materialize_false) const override;
+ void Plug(Variable* var) const override;
+ void Plug(Handle<Object> lit) const override;
+ void Plug(Heap::RootListIndex) const override;
+ void PlugTOS() const override;
+ void DropAndPlug(int count, Register reg) const override;
+ void PrepareTest(Label* materialize_true, Label* materialize_false,
+ Label** if_true, Label** if_false,
+ Label** fall_through) const override;
+ bool IsEffect() const override { return true; }
};
class EnterBlockScopeIfNeeded {
@@ -953,6 +940,8 @@ class FullCodeGenerator: public AstVisitor {
MacroAssembler* masm_;
CompilationInfo* info_;
+ Isolate* isolate_;
+ Zone* zone_;
Scope* scope_;
Label return_label_;
NestedStatement* nesting_stack_;
@@ -976,28 +965,6 @@ class FullCodeGenerator: public AstVisitor {
};
-// A map from property names to getter/setter pairs allocated in the zone.
-class AccessorTable: public TemplateHashMap<Literal,
- ObjectLiteral::Accessors,
- ZoneAllocationPolicy> {
- public:
- explicit AccessorTable(Zone* zone) :
- TemplateHashMap<Literal, ObjectLiteral::Accessors,
- ZoneAllocationPolicy>(Literal::Match,
- ZoneAllocationPolicy(zone)),
- zone_(zone) { }
-
- Iterator lookup(Literal* literal) {
- Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
- if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
- return it;
- }
-
- private:
- Zone* zone_;
-};
-
-
class BackEdgeTable {
public:
BackEdgeTable(Code* code, DisallowHeapAllocation* required) {
@@ -1081,6 +1048,7 @@ class BackEdgeTable {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FULL_CODEGEN_FULL_CODEGEN_H_
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index cce7357962..089915a587 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -7,7 +7,6 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ia32/frames-ia32.h"
@@ -106,24 +105,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
-
- __ cmp(ecx, isolate()->factory()->undefined_value());
- __ j(not_equal, &ok, Label::kNear);
-
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
- __ mov(Operand(esp, receiver_offset), ecx);
-
- __ bind(&ok);
+ __ AssertNotSmi(ecx);
+ __ CmpObjectType(ecx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -133,7 +120,6 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -187,6 +173,7 @@ void FullCodeGenerator::Generate() {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
@@ -231,8 +218,8 @@ void FullCodeGenerator::Generate() {
}
}
}
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
// Function register is trashed in case we bailout here. But since that
// could happen only when we allocate a context the value of
// |function_in_register| is correct.
@@ -436,13 +423,11 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&ok);
SetReturnPosition(literal());
- int no_frame_start = masm_->pc_offset();
__ leave();
int arg_count = info_->scope()->num_parameters() + 1;
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -800,10 +785,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ push(
+ Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -855,7 +839,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
__ push(Immediate(variable->name()));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ push(
+ Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -1077,7 +1063,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1127,6 +1112,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1175,7 +1162,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(StoreDescriptor::NameRegister(),
@@ -1186,8 +1173,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), eax);
__ mov(StoreDescriptor::NameRegister(),
@@ -1318,27 +1306,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Move(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), var->name());
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), var->name());
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1678,7 +1650,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_constant_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1729,7 +1700,15 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (has_constant_fast_elements) {
+ if (FLAG_vector_stores) {
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(Smi::FromInt(array_index)));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ } else if (has_constant_fast_elements) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
@@ -1738,17 +1717,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Store the subexpression value in the array's elements.
__ mov(FieldOperand(ebx, offset), result_register());
// Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
+ __ RecordWriteField(ebx, offset, result_register(), ecx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
// Store the subexpression value in the array's elements.
__ mov(ecx, Immediate(Smi::FromInt(array_index)));
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
-
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -2060,8 +2036,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ __ Set(eax, 1);
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2190,7 +2168,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, FieldOperand(ebx, JSGlobalObject::kNativeContextOffset));
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -2417,7 +2395,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2508,7 +2486,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
@@ -2516,26 +2494,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Move(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(eax));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(eax);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2750,10 +2708,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2761,6 +2718,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ push(Immediate(isolate()->factory()->undefined_value()));
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2771,9 +2729,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push the target function under the receiver.
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2810,7 +2769,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2833,7 +2792,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2868,11 +2827,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2880,8 +2839,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -2960,88 +2920,38 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ PushCalleeAndWithBaseObject(expr);
- // Touch up the stack with the resolved function.
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
+ // Touch up the stack with the resolved function.
+ __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ push(Immediate(isolate()->factory()->undefined_value()));
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ SetCallPosition(expr, arg_count);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ Set(eax, arg_count);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
}
@@ -3680,32 +3590,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into eax and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3727,19 +3611,6 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3853,19 +3724,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(edx);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3873,6 +3731,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to edi.
int const argc = args->length() - 2;
__ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
@@ -3886,38 +3745,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; ++i) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in edi. Move it in there.
- __ mov(edi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper());
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3966,21 +3793,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ pop(ebx);
- __ pop(ecx);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4145,6 +3957,11 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ j(overflow, &bailout);
__ shr(string_length, 1);
+
+ // Bailout for large object allocations.
+ __ cmp(string_length, Page::kMaxRegularHeapObjectSize);
+ __ j(greater, &bailout);
+
// Live registers and stack values:
// string_length
// elements
@@ -4303,7 +4120,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
__ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, FieldOperand(ebx, JSGlobalObject::kNativeContextOffset));
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -4328,7 +4145,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ push(Immediate(isolate()->factory()->undefined_value()));
__ mov(eax, GlobalObjectOperand());
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
+ __ mov(eax, FieldOperand(eax, JSGlobalObject::kNativeContextOffset));
__ mov(eax, ContextOperand(eax, expr->context_index()));
}
@@ -4338,9 +4155,10 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
int arg_count = args->length();
SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ Set(eax, arg_count);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4949,8 +4767,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ test(eax, eax);
- Split(not_zero, if_true, if_false, fall_through);
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5058,7 +4876,7 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(FLAG_vector_stores && !slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Immediate(SmiFromSlot(slot)));
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index f38c01bbea..3a322ea315 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -15,7 +15,6 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
@@ -125,22 +124,13 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ lw(at, MemOperand(sp, receiver_offset));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ lw(a2, GlobalObjectOperand());
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ sw(a2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
+ __ lw(a2, MemOperand(sp, receiver_offset));
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, a2, a2);
+ __ Check(ge, kSloppyFunctionExpectsJSReceiverReceiver, a2,
+ Operand(FIRST_SPEC_OBJECT_TYPE));
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -150,7 +140,6 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -202,6 +191,7 @@ void FullCodeGenerator::Generate() {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
@@ -243,8 +233,8 @@ void FullCodeGenerator::Generate() {
}
}
}
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
// Function register is trashed in case we bailout here. But since that
// could happen only when we allocate a context the value of
// |function_in_register_a1| is correct.
@@ -474,11 +464,9 @@ void FullCodeGenerator::EmitReturnSequence() {
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
masm_->mov(sp, fp);
- int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
masm_->Addu(sp, sp, Operand(sp_delta));
masm_->Jump(ra);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
}
@@ -855,10 +843,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
}
__ Push(a2, a0);
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -914,7 +900,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -1142,7 +1129,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(a1, a0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1194,6 +1180,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1244,7 +1232,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ li(StoreDescriptor::NameRegister(),
@@ -1256,8 +1244,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), v0);
__ li(StoreDescriptor::NameRegister(),
@@ -1389,27 +1378,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1753,8 +1726,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
-
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1804,7 +1775,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
+ if (FLAG_vector_stores) {
+ __ li(StoreDescriptor::NameRegister(),
+ Operand(Smi::FromInt(array_index)));
+ __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp, kPointerSize));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ } else if (has_fast_elements) {
int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ lw(t2, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
@@ -2136,8 +2116,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a1, a0);
__ sw(a1, MemOperand(sp, 2 * kPointerSize));
SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ __ li(a0, Operand(1));
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2267,7 +2249,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
+ __ lw(a1, FieldMemOperand(a1, JSGlobalObject::kNativeContextOffset));
__ lw(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(a2);
__ LoadRoot(a3,
@@ -2501,7 +2483,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2594,7 +2576,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
@@ -2603,27 +2585,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(a0));
- __ mov(StoreGlobalViaContextDescriptor::ValueRegister(), result_register());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(a0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2843,11 +2804,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2856,6 +2815,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// is a sloppy mode method.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ push(at);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2867,9 +2827,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ lw(at, MemOperand(sp, 0));
__ push(at);
__ sw(v0, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2907,7 +2868,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2931,7 +2892,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(at);
__ sw(v0, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2967,11 +2928,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2979,9 +2940,10 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -3063,88 +3025,38 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(a1);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the stack with the resolved function.
- __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(a1);
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ push(a1);
- // Emit function call.
- EmitCall(expr);
- }
+ // Touch up the stack with the resolved function.
+ __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Record source position for debugger.
+ SetCallPosition(expr, arg_count);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ li(a0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
}
@@ -3794,34 +3706,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into a0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into a0 and convert it.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3842,20 +3726,6 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into a0 and convert it.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3971,20 +3841,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(a1);
- __ mov(a0, result_register()); // StringAddStub requires args in a0, a1.
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3992,6 +3848,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to a1.
int const argc = args->length() - 2;
__ lw(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -4005,38 +3862,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(v0, &runtime);
- __ GetObjectType(v0, a1, a1);
- __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
-
- // InvokeFunction requires the function in a1. Move it in there.
- __ mov(a1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper());
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(v0);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4094,21 +3919,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ mov(a0, result_register());
- __ pop(a1);
- __ pop(a2);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4259,6 +4069,10 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ BranchOnOverflow(&bailout, scratch3);
__ SmiUntag(string_length);
+ // Bailout for large object allocations.
+ __ Branch(&bailout, gt, string_length,
+ Operand(Page::kMaxRegularHeapObjectSize));
+
// Get first element in the array to free up the elements register to be used
// for the result.
__ Addu(element,
@@ -4397,7 +4211,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
__ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
+ __ lw(a1, FieldMemOperand(a1, JSGlobalObject::kNativeContextOffset));
__ lw(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
__ Pop(a2, a3);
__ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
@@ -4423,7 +4237,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ push(v0);
__ lw(v0, GlobalObjectOperand());
- __ lw(v0, FieldMemOperand(v0, GlobalObject::kNativeContextOffset));
+ __ lw(v0, FieldMemOperand(v0, JSGlobalObject::kNativeContextOffset));
__ lw(v0, ContextOperand(v0, expr->context_index()));
}
@@ -4433,9 +4247,10 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
int arg_count = args->length();
SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ li(a0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -5043,7 +4858,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ __ LoadRoot(a1, Heap::kTrueValueRootIndex);
+ Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5156,7 +4972,7 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(FLAG_vector_stores && !slot.IsInvalid());
__ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index dcdff515ef..a51e873709 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -15,7 +15,6 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
@@ -125,29 +124,21 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ ld(at, MemOperand(sp, receiver_offset));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ ld(a2, GlobalObjectOperand());
- __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ sd(a2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ __ ld(a2, MemOperand(sp, receiver_offset));
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, a2, a2);
+ __ Check(ge, kSloppyFunctionExpectsJSReceiverReceiver, a2,
+ Operand(FIRST_SPEC_OBJECT_TYPE));
}
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -199,6 +190,7 @@ void FullCodeGenerator::Generate() {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
@@ -240,8 +232,8 @@ void FullCodeGenerator::Generate() {
}
}
}
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
// Function register is trashed in case we bailout here. But since that
// could happen only when we allocate a context the value of
// |function_in_register_a1| is correct.
@@ -472,11 +464,9 @@ void FullCodeGenerator::EmitReturnSequence() {
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
masm_->mov(sp, fp);
- int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
masm_->Daddu(sp, sp, Operand(sp_delta));
masm_->Jump(ra);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
}
@@ -853,10 +843,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
}
__ Push(a2, a0);
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -912,7 +900,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -1140,7 +1129,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(a1, a0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1192,6 +1180,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1242,7 +1232,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ li(StoreDescriptor::NameRegister(),
@@ -1254,8 +1244,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), v0);
__ li(StoreDescriptor::NameRegister(),
@@ -1387,27 +1378,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1751,8 +1726,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
-
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1802,7 +1775,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
+ if (FLAG_vector_stores) {
+ __ li(StoreDescriptor::NameRegister(),
+ Operand(Smi::FromInt(array_index)));
+ __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp, kPointerSize));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ } else if (has_fast_elements) {
int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ ld(a6, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ ld(a1, FieldMemOperand(a6, JSObject::kElementsOffset));
@@ -2132,8 +2114,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a1, a0);
__ sd(a1, MemOperand(sp, 2 * kPointerSize));
SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ __ li(a0, Operand(1));
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2265,7 +2249,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ ld(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
+ __ ld(a1, FieldMemOperand(a1, JSGlobalObject::kNativeContextOffset));
__ ld(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(a2);
__ LoadRoot(a3,
@@ -2500,7 +2484,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2593,7 +2577,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
@@ -2602,27 +2586,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(a0));
- __ mov(StoreGlobalViaContextDescriptor::ValueRegister(), result_register());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(a0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2846,11 +2809,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2859,6 +2820,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// is a sloppy mode method.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ push(at);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2870,9 +2832,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ ld(at, MemOperand(sp, 0));
__ push(at);
__ sd(v0, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2910,7 +2873,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2934,7 +2897,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(at);
__ sd(v0, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2970,11 +2933,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2982,9 +2945,10 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -3065,88 +3029,38 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(a1);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the stack with the resolved function.
- __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(a1);
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ push(a1);
- // Emit function call.
- EmitCall(expr);
- }
+ // Touch up the stack with the resolved function.
+ __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Record source position for debugger.
+ SetCallPosition(expr, arg_count);
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ li(a0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
}
@@ -3797,34 +3711,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into a0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into a0 and convert it.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3845,20 +3731,6 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into a0 and convert it.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3974,20 +3846,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(a1);
- __ mov(a0, result_register()); // StringAddStub requires args in a0, a1.
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3995,6 +3853,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to a1.
int const argc = args->length() - 2;
__ ld(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -4008,38 +3867,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(v0, &runtime);
- __ GetObjectType(v0, a1, a1);
- __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
-
- // InvokeFunction requires the function in a1. Move it in there.
- __ mov(a1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper());
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(v0);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4097,21 +3924,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ mov(a0, result_register());
- __ pop(a1);
- __ pop(a2);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4262,6 +4074,10 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
+ // Bailout for large object allocations.
+ __ Branch(&bailout, gt, string_length,
+ Operand(Page::kMaxRegularHeapObjectSize));
+
// Get first element in the array to free up the elements register to be used
// for the result.
__ Daddu(element,
@@ -4400,7 +4216,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
__ ld(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
+ __ ld(a1, FieldMemOperand(a1, JSGlobalObject::kNativeContextOffset));
__ ld(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
__ Pop(a2, a3);
__ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
@@ -4426,7 +4242,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ push(v0);
__ ld(v0, GlobalObjectOperand());
- __ ld(v0, FieldMemOperand(v0, GlobalObject::kNativeContextOffset));
+ __ ld(v0, FieldMemOperand(v0, JSGlobalObject::kNativeContextOffset));
__ ld(v0, ContextOperand(v0, expr->context_index()));
}
@@ -4436,9 +4252,10 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
int arg_count = args->length();
SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ li(a0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -5045,7 +4862,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ __ LoadRoot(a1, Heap::kTrueValueRootIndex);
+ Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5158,7 +4976,7 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(FLAG_vector_stores && !slot.IsInvalid());
__ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index 03def66d5f..d6ce1c02cd 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -7,7 +7,6 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
@@ -114,22 +113,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset), r0);
- __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bne(&ok);
-
- __ LoadP(r5, GlobalObjectOperand());
- __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
-
- __ StoreP(r5, MemOperand(sp, receiver_offset), r0);
-
- __ bind(&ok);
+ __ AssertNotSmi(r5);
+ __ CompareObjectType(r5, r5, no_reg, FIRST_SPEC_OBJECT_TYPE);
+ __ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -146,7 +135,6 @@ void FullCodeGenerator::Generate() {
}
info->set_prologue_offset(prologue_offset);
__ Prologue(info->IsCodePreAgingActive(), prologue_offset);
- info->AddNoFrameRange(0, masm_->pc_offset());
{
Comment cmnt(masm_, "[ Allocate locals");
@@ -198,6 +186,7 @@ void FullCodeGenerator::Generate() {
__ push(r4);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
@@ -239,8 +228,8 @@ void FullCodeGenerator::Generate() {
}
}
}
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
// Function register is trashed in case we bailout here. But since that
// could happen only when we allocate a context the value of
// |function_in_register_r4| is correct.
@@ -467,9 +456,8 @@ void FullCodeGenerator::EmitReturnSequence() {
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
- int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
__ blr();
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
}
@@ -815,15 +803,12 @@ void FullCodeGenerator::VisitVariableDeclaration(
// must not destroy the current value.
if (hole_init) {
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ Push(r5, r3);
} else {
__ LoadSmiLiteral(r3, Smi::FromInt(0)); // Indicates no initial value.
- __ Push(r5, r3);
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(r5, r3);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -874,7 +859,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(r5);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -1109,7 +1095,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r4, r3); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1164,6 +1149,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1211,7 +1198,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ mov(StoreDescriptor::NameRegister(),
@@ -1223,8 +1210,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r3);
__ mov(StoreDescriptor::NameRegister(),
@@ -1359,26 +1347,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- const int slot = var->index();
- const int depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
- } else {
- __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1719,7 +1692,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1768,7 +1740,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
+ if (FLAG_vector_stores) {
+ __ LoadSmiLiteral(StoreDescriptor::NameRegister(),
+ Smi::FromInt(array_index));
+ __ LoadP(StoreDescriptor::ReceiverRegister(),
+ MemOperand(sp, kPointerSize));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ } else if (has_fast_elements) {
int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ LoadP(r8, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ LoadP(r4, FieldMemOperand(r8, JSObject::kElementsOffset));
@@ -2097,8 +2078,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mr(r4, r3);
__ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ __ li(r3, Operand(1));
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2245,7 +2228,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+ __ LoadP(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
__ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(r5);
__ LoadRoot(r6,
@@ -2509,7 +2492,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2602,7 +2585,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
@@ -2610,25 +2593,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- const int slot = var->index();
- const int depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(r3));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ push(r3);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2842,11 +2806,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{
StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
@@ -2856,6 +2818,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// is a sloppy mode method.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ push(r0);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2867,9 +2830,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ LoadP(r0, MemOperand(sp, 0));
__ push(r0);
__ StoreP(r3, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2907,7 +2871,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2930,7 +2894,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
__ push(ip);
__ StoreP(r3, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2966,11 +2930,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2978,8 +2942,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -3060,91 +3025,40 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ push(r4);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the stack with the resolved function.
- __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ push(r4);
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Touch up the stack with the resolved function.
+ __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r3);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ push(r4);
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ // Record source position for debugger.
+ SetCallPosition(expr, arg_count);
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ mov(r3, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r3);
}
@@ -3791,31 +3705,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
- // Load the argument into r3 and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r3 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3836,18 +3725,6 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
- // Load the argument into r3 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3949,19 +3826,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(r4);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3969,6 +3833,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to r4.
int const argc = args->length() - 2;
__ LoadP(r4, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3982,38 +3847,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(r3, &runtime);
- __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
- __ bne(&runtime);
-
- // InvokeFunction requires the function in r4. Move it in there.
- __ mr(r4, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(r4, count, CALL_FUNCTION, NullCallWrapper());
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ b(&done);
-
- __ bind(&runtime);
- __ push(r3);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4070,19 +3903,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ Pop(r5, r4);
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4252,6 +4072,10 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ BranchOnOverflow(&bailout);
__ SmiUntag(string_length);
+ // Bailout for large object allocations.
+ __ Cmpi(string_length, Operand(Page::kMaxRegularHeapObjectSize), r0);
+ __ bgt(&bailout);
+
// Get first element in the array to free up the elements register to be used
// for the result.
__ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4391,7 +4215,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime, TAG_OBJECT);
__ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+ __ LoadP(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
__ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
__ Pop(r5, r6);
__ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
@@ -4417,7 +4241,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ push(r3);
__ LoadP(r3, GlobalObjectOperand());
- __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSGlobalObject::kNativeContextOffset));
__ LoadP(r3, ContextOperand(r3, expr->context_index()));
}
@@ -4427,9 +4251,10 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
int arg_count = args->length();
SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ CallStub(&stub);
+ __ mov(r3, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -5032,8 +4857,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ cmpi(r3, Operand::Zero());
- Split(ne, if_true, if_false, fall_through);
+ __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5143,7 +4968,7 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(FLAG_vector_stores && !slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index 0133c09d6e..4aa86d6bb2 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -7,7 +7,6 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
@@ -105,24 +104,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
__ movp(rcx, args.GetReceiverOperand());
-
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &ok, Label::kNear);
-
- __ movp(rcx, GlobalObjectOperand());
- __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
-
- __ movp(args.GetReceiverOperand(), rcx);
-
- __ bind(&ok);
+ __ AssertNotSmi(rcx);
+ __ CmpObjectType(rcx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -132,7 +119,6 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -185,6 +171,7 @@ void FullCodeGenerator::Generate() {
__ Push(rdi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
@@ -226,8 +213,8 @@ void FullCodeGenerator::Generate() {
}
}
}
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
// Function register is trashed in case we bailout here. But since that
// could happen only when we allocate a context the value of
// |function_in_register| is correct.
@@ -439,14 +426,11 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&ok);
SetReturnPosition(literal());
- int no_frame_start = masm_->pc_offset();
__ leave();
int arg_count = info_->scope()->num_parameters() + 1;
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, rcx);
-
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -814,10 +798,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -870,7 +852,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
__ Push(variable->name());
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -1102,7 +1085,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1155,6 +1137,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1203,7 +1187,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
__ Move(StoreDescriptor::NameRegister(),
@@ -1215,8 +1199,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ movp(StoreDescriptor::ReceiverRegister(), rax);
__ Move(StoreDescriptor::NameRegister(),
@@ -1349,27 +1334,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Set(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ Move(LoadDescriptor::NameRegister(), var->name());
- __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- CallLoadIC(typeof_mode);
- }
+ __ Move(LoadDescriptor::NameRegister(), var->name());
+ __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
+ CallLoadIC(typeof_mode);
}
@@ -1706,7 +1675,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_constant_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1757,7 +1725,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (has_constant_fast_elements) {
+ if (FLAG_vector_stores) {
+ __ Move(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
+ __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, kPointerSize));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ } else if (has_constant_fast_elements) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
@@ -2089,8 +2064,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ __ Set(rax, 1);
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2217,7 +2194,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ movp(rbx, GlobalObjectOperand());
- __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
+ __ movp(rbx, FieldOperand(rbx, JSGlobalObject::kNativeContextOffset));
__ movp(rbx, ContextOperand(rbx, Context::ITERATOR_RESULT_MAP_INDEX));
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
@@ -2411,7 +2388,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2502,7 +2479,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(StoreDescriptor::NameRegister(), var->name());
@@ -2510,26 +2487,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Set(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(rax));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(rax);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2741,17 +2698,17 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
}
- // Push undefined as receiver. This is patched in the method prologue if it
+ // Push undefined as receiver. This is patched in the Call builtin if it
// is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2762,9 +2719,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push the target function under the receiver.
__ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2802,7 +2760,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2825,7 +2783,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2861,11 +2819,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2873,8 +2831,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -2953,87 +2912,37 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and resolve
- // eval.
- __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the callee.
- __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+ // Push a copy of the function (found below the arguments) and resolve
+ // eval.
+ __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Touch up the callee.
+ __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ SetCallPosition(expr, arg_count);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ Set(rax, arg_count);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, rax);
}
@@ -3677,32 +3586,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into rax and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into rax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3724,19 +3607,6 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into rax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3850,19 +3720,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ Pop(rdx);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3870,6 +3727,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to rdi.
int const argc = args->length() - 2;
__ movp(rdi, Operand(rsp, (argc + 1) * kPointerSize));
@@ -3883,38 +3741,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in rdi. Move it in there.
- __ movp(rdi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION, NullCallWrapper());
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ Push(rax);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3962,20 +3788,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ Pop(rbx);
- __ Pop(rcx);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4152,6 +3964,11 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ j(overflow, &bailout);
__ addl(string_length, scratch);
__ j(overflow, &bailout);
+ __ jmp(&bailout);
+
+ // Bailout for large object allocations.
+ __ cmpl(string_length, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ j(greater, &bailout);
// Live registers and stack values:
// string_length: Total length of result string.
@@ -4320,7 +4137,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &runtime, TAG_OBJECT);
__ movp(rbx, GlobalObjectOperand());
- __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
+ __ movp(rbx, FieldOperand(rbx, JSGlobalObject::kNativeContextOffset));
__ movp(rbx, ContextOperand(rbx, Context::ITERATOR_RESULT_MAP_INDEX));
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
@@ -4344,7 +4161,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ movp(rax, GlobalObjectOperand());
- __ movp(rax, FieldOperand(rax, GlobalObject::kNativeContextOffset));
+ __ movp(rax, FieldOperand(rax, JSGlobalObject::kNativeContextOffset));
__ movp(rax, ContextOperand(rax, expr->context_index()));
}
@@ -4354,9 +4171,10 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
int arg_count = args->length();
SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ Set(rax, arg_count);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4963,8 +4781,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ testp(rax, rax);
- Split(not_zero, if_true, if_false, fall_through);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5075,7 +4893,7 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(FLAG_vector_stores && !slot.IsInvalid());
__ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
}
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index a85152d7a9..8b1e5e98d4 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -7,7 +7,6 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
@@ -106,24 +105,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
-
- __ cmp(ecx, isolate()->factory()->undefined_value());
- __ j(not_equal, &ok, Label::kNear);
-
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
- __ mov(Operand(esp, receiver_offset), ecx);
-
- __ bind(&ok);
+ __ AssertNotSmi(ecx);
+ __ CmpObjectType(ecx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -133,7 +120,6 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -187,6 +173,7 @@ void FullCodeGenerator::Generate() {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
@@ -228,8 +215,8 @@ void FullCodeGenerator::Generate() {
}
}
}
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
// Function register is trashed in case we bailout here. But since that
// could happen only when we allocate a context the value of
// |function_in_register| is correct.
@@ -433,13 +420,11 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&ok);
SetReturnPosition(literal());
- int no_frame_start = masm_->pc_offset();
__ leave();
int arg_count = info_->scope()->num_parameters() + 1;
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -797,16 +782,14 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ push(
+ Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
}
-
void FullCodeGenerator::VisitFunctionDeclaration(
FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
@@ -848,7 +831,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
__ push(Immediate(variable->name()));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ push(
+ Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
break;
}
}
@@ -1070,7 +1055,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1120,6 +1104,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1168,7 +1154,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(StoreDescriptor::NameRegister(),
@@ -1179,8 +1165,9 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), eax);
__ mov(StoreDescriptor::NameRegister(),
@@ -1311,27 +1298,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Move(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), var->name());
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), var->name());
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1671,7 +1642,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_constant_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1722,7 +1692,15 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (has_constant_fast_elements) {
+ if (FLAG_vector_stores) {
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(Smi::FromInt(array_index)));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ } else if (has_constant_fast_elements) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
@@ -1739,7 +1717,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
-
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -2051,8 +2028,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ __ Set(eax, 1);
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2181,7 +2160,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&done_allocate);
__ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, FieldOperand(ebx, JSGlobalObject::kNativeContextOffset));
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -2408,7 +2387,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2499,7 +2478,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
@@ -2507,26 +2486,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Move(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(eax));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(eax);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2741,10 +2700,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2752,6 +2710,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ push(Immediate(isolate()->factory()->undefined_value()));
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2762,9 +2721,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push the target function under the receiver.
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2801,7 +2761,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2824,7 +2784,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2859,11 +2819,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2871,8 +2831,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -2951,88 +2912,38 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ PushCalleeAndWithBaseObject(expr);
- // Touch up the stack with the resolved function.
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
+ // Touch up the stack with the resolved function.
+ __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ push(Immediate(isolate()->factory()->undefined_value()));
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ SetCallPosition(expr, arg_count);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ Set(eax, arg_count);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
}
@@ -3671,32 +3582,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into eax and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3718,19 +3603,6 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3844,19 +3716,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(edx);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3864,6 +3723,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to edi.
int const argc = args->length() - 2;
__ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
@@ -3877,38 +3737,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; ++i) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in edi. Move it in there.
- __ mov(edi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper());
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3957,21 +3785,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ pop(ebx);
- __ pop(ecx);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4136,6 +3949,11 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ j(overflow, &bailout);
__ shr(string_length, 1);
+
+ // Bailout for large object allocations.
+ __ cmp(string_length, Page::kMaxRegularHeapObjectSize);
+ __ j(greater, &bailout);
+
// Live registers and stack values:
// string_length
// elements
@@ -4294,7 +4112,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
__ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, FieldOperand(ebx, JSGlobalObject::kNativeContextOffset));
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -4319,7 +4137,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ push(Immediate(isolate()->factory()->undefined_value()));
__ mov(eax, GlobalObjectOperand());
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
+ __ mov(eax, FieldOperand(eax, JSGlobalObject::kNativeContextOffset));
__ mov(eax, ContextOperand(eax, expr->context_index()));
}
@@ -4329,9 +4147,10 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
int arg_count = args->length();
SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ Set(eax, arg_count);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4940,8 +4759,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ test(eax, eax);
- Split(not_zero, if_true, if_false, fall_through);
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5049,7 +4868,7 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(FLAG_vector_stores && !slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Immediate(SmiFromSlot(slot)));
diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h
index 1be63323ca..c17acf52f2 100644
--- a/deps/v8/src/func-name-inferrer.h
+++ b/deps/v8/src/func-name-inferrer.h
@@ -121,6 +121,7 @@ class FuncNameInferrer : public ZoneObject {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FUNC_NAME_INFERRER_H_
diff --git a/deps/v8/src/futex-emulation.h b/deps/v8/src/futex-emulation.h
index e7e2230da2..9949bdf44f 100644
--- a/deps/v8/src/futex-emulation.h
+++ b/deps/v8/src/futex-emulation.h
@@ -125,7 +125,7 @@ class FutexEmulation : public AllStatic {
static base::LazyMutex mutex_;
static base::LazyInstance<FutexWaitList>::type wait_list_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FUTEX_EMULATION_H_
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 650999f394..3608fe81b6 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -54,6 +54,8 @@ class GlobalHandles::Node {
Internals::kNodeIsIndependentShift);
STATIC_ASSERT(static_cast<int>(IsPartiallyDependent::kShift) ==
Internals::kNodeIsPartiallyDependentShift);
+ STATIC_ASSERT(static_cast<int>(IsActive::kShift) ==
+ Internals::kNodeIsActiveShift);
}
#ifdef ENABLE_HANDLE_ZAPPING
@@ -64,7 +66,11 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0;
set_independent(false);
- set_partially_dependent(false);
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ set_active(false);
+ } else {
+ set_partially_dependent(false);
+ }
set_in_new_space_list(false);
parameter_or_next_free_.next_free = NULL;
weak_callback_ = NULL;
@@ -86,7 +92,11 @@ class GlobalHandles::Node {
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
- set_partially_dependent(false);
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ set_active(false);
+ } else {
+ set_partially_dependent(false);
+ }
set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
weak_callback_ = NULL;
@@ -106,7 +116,11 @@ class GlobalHandles::Node {
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
- set_partially_dependent(false);
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ set_active(false);
+ } else {
+ set_partially_dependent(false);
+ }
weak_callback_ = NULL;
DecreaseBlockUses();
}
@@ -140,12 +154,23 @@ class GlobalHandles::Node {
}
bool is_partially_dependent() {
+ CHECK(!FLAG_scavenge_reclaim_unmodified_objects);
return IsPartiallyDependent::decode(flags_);
}
void set_partially_dependent(bool v) {
+ CHECK(!FLAG_scavenge_reclaim_unmodified_objects);
flags_ = IsPartiallyDependent::update(flags_, v);
}
+ bool is_active() {
+ CHECK(FLAG_scavenge_reclaim_unmodified_objects);
+ return IsActive::decode(flags_);
+ }
+ void set_active(bool v) {
+ CHECK(FLAG_scavenge_reclaim_unmodified_objects);
+ flags_ = IsActive::update(flags_, v);
+ }
+
bool is_in_new_space_list() {
return IsInNewSpaceList::decode(flags_);
}
@@ -349,6 +374,8 @@ class GlobalHandles::Node {
// in_new_space_list) and a State.
class NodeState : public BitField<State, 0, 3> {};
class IsIndependent : public BitField<bool, 3, 1> {};
+ // The following two fields are mutually exclusive
+ class IsActive : public BitField<bool, 4, 1> {};
class IsPartiallyDependent : public BitField<bool, 4, 1> {};
class IsInNewSpaceList : public BitField<bool, 5, 1> {};
class NodeWeaknessType : public BitField<WeaknessType, 6, 2> {};
@@ -646,10 +673,18 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
- if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- !node->is_partially_dependent())) {
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ if (node->IsStrongRetainer() ||
+ (node->IsWeakRetainer() && !node->is_independent() &&
+ node->is_active())) {
+ v->VisitPointer(node->location());
+ }
+ } else {
+ if (node->IsStrongRetainer() ||
+ (node->IsWeakRetainer() && !node->is_independent() &&
+ !node->is_partially_dependent())) {
v->VisitPointer(node->location());
+ }
}
}
}
@@ -687,6 +722,49 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
}
+void GlobalHandles::IdentifyWeakUnmodifiedObjects(
+ WeakSlotCallback is_unmodified) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ if (node->IsWeak() && !is_unmodified(node->location())) {
+ node->set_active(true);
+ }
+ }
+}
+
+
+void GlobalHandles::MarkNewSpaceWeakUnmodifiedObjectsPending(
+ WeakSlotCallbackWithHeap is_unscavenged) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ DCHECK(node->is_in_new_space_list());
+ if ((node->is_independent() || !node->is_active()) && node->IsWeak() &&
+ is_unscavenged(isolate_->heap(), node->location())) {
+ node->MarkPending();
+ }
+ }
+}
+
+
+void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ DCHECK(node->is_in_new_space_list());
+ if ((node->is_independent() || !node->is_active()) &&
+ node->IsWeakRetainer()) {
+ // Pending weak phantom handles die immediately. Everything else survives.
+ if (node->state() == Node::PENDING &&
+ node->weakness_type() != NORMAL_WEAK) {
+ node->CollectPhantomCallbackData(isolate(),
+ &pending_phantom_callbacks_);
+ } else {
+ v->VisitPointer(node->location());
+ }
+ }
+ }
+}
+
+
bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
WeakSlotCallbackWithHeap can_skip) {
ComputeObjectGroupsAndImplicitReferences();
@@ -757,13 +835,23 @@ int GlobalHandles::PostScavengeProcessing(
// the freed_nodes.
continue;
}
- // Skip dependent handles. Their weak callbacks might expect to be
+ // Skip dependent or unmodified handles. Their weak callbacks might expect
+ // to be
// called between two global garbage collection callbacks which
// are not called for minor collections.
- if (!node->is_independent() && !node->is_partially_dependent()) {
- continue;
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ if (!node->is_independent() && (node->is_active())) {
+ node->set_active(false);
+ continue;
+ }
+ node->set_active(false);
+ } else {
+ if (!node->is_independent() && !node->is_partially_dependent()) {
+ continue;
+ }
+ node->clear_partially_dependent();
}
- node->clear_partially_dependent();
+
if (node->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of
@@ -790,7 +878,11 @@ int GlobalHandles::PostMarkSweepProcessing(
// the freed_nodes.
continue;
}
- it.node()->clear_partially_dependent();
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ it.node()->set_active(false);
+ } else {
+ it.node()->clear_partially_dependent();
+ }
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
@@ -955,6 +1047,16 @@ void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
}
+void GlobalHandles::IterateWeakRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ if (node->has_wrapper_class_id() && node->IsWeak()) {
+ v->VisitEmbedderReference(node->location(), node->wrapper_class_id());
+ }
+ }
+}
+
+
int GlobalHandles::NumberOfWeakHandles() {
int count = 0;
for (NodeIterator it(this); !it.done(); it.Advance()) {
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 0ee8c20a37..7047d8ca01 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -197,6 +197,10 @@ class GlobalHandles {
// class ID.
void IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v);
+ // Iterate over all handles in the new space that are weak, unmodified
+ // and have class IDs
+ void IterateWeakRootsInNewSpaceWithClassIds(ObjectVisitor* v);
+
// Iterates over all weak roots in heap.
void IterateWeakRoots(ObjectVisitor* v);
@@ -204,7 +208,7 @@ class GlobalHandles {
// them as pending.
void IdentifyWeakHandles(WeakSlotCallback f);
- // NOTE: Three ...NewSpace... functions below are used during
+ // NOTE: Five ...NewSpace... functions below are used during
// scavenge collections and iterate over sets of handles that are
// guaranteed to contain all handles holding new space objects (but
// may also include old space objects).
@@ -220,6 +224,19 @@ class GlobalHandles {
// See the note above.
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
+ // Finds weak independent or unmodified handles satisfying
+ // the callback predicate and marks them as pending. See the note above.
+ void MarkNewSpaceWeakUnmodifiedObjectsPending(
+ WeakSlotCallbackWithHeap is_unscavenged);
+
+ // Iterates over weak independent or unmodified handles.
+ // See the note above.
+ void IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v);
+
+ // Identify unmodified objects that are in weak state and marks them
+ // unmodified
+ void IdentifyWeakUnmodifiedObjects(WeakSlotCallback is_unmodified);
+
// Iterate over objects in object groups that have at least one object
// which requires visiting. The callback has to return true if objects
// can be skipped and false otherwise.
@@ -438,6 +455,7 @@ class EternalHandles {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_GLOBAL_HANDLES_H_
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 9d4bafb6ff..c3358870e5 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -217,6 +217,20 @@ F FUNCTION_CAST(Address addr) {
}
+// Determine whether the architecture uses function descriptors
+// which provide a level of indirection between the function pointer
+// and the function entrypoint.
+#if V8_HOST_ARCH_PPC && \
+ (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
+#define USES_FUNCTION_DESCRIPTORS 1
+#define FUNCTION_ENTRYPOINT_ADDRESS(f) \
+ (reinterpret_cast<v8::internal::Address*>( \
+ &(reinterpret_cast<intptr_t*>(f)[0])))
+#else
+#define USES_FUNCTION_DESCRIPTORS 0
+#endif
+
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
// (sorted alphabetically)
@@ -443,6 +457,7 @@ class String;
class Symbol;
class Name;
class Struct;
+class TypeFeedbackVector;
class Variable;
class RelocInfo;
class Deserializer;
@@ -580,20 +595,7 @@ enum InlineCacheState {
// A generic handler is installed and no extra typefeedback is recorded.
GENERIC,
// Special state for debug break or step in prepare stubs.
- DEBUG_STUB,
- // Type-vector-based ICs have a default state, with the full calculation
- // of IC state only determined by a look at the IC and the typevector
- // together.
- DEFAULT
-};
-
-
-enum CallFunctionFlags {
- NO_CALL_FUNCTION_FLAGS,
- CALL_AS_METHOD,
- // Always wrap the receiver and call to the JSFunction. Only use this flag
- // both the receiver type and the target method are statically known.
- WRAP_AND_CALL
+ DEBUG_STUB
};
@@ -741,6 +743,31 @@ enum CpuFeature {
};
+// Defines hints about receiver values based on structural knowledge.
+enum class ConvertReceiverMode : unsigned {
+ kNullOrUndefined, // Guaranteed to be null or undefined.
+ kNotNullOrUndefined, // Guaranteed to never be null or undefined.
+ kAny // No specific knowledge about receiver.
+};
+
+inline size_t hash_value(ConvertReceiverMode mode) {
+ return bit_cast<unsigned>(mode);
+}
+
+inline std::ostream& operator<<(std::ostream& os, ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return os << "NULL_OR_UNDEFINED";
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return os << "NOT_NULL_OR_UNDEFINED";
+ case ConvertReceiverMode::kAny:
+ return os << "ANY";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
// Used to specify if a macro instruction must perform a smi check on tagged
// values.
enum SmiCheckType {
@@ -756,8 +783,7 @@ enum ScopeType {
SCRIPT_SCOPE, // The top-level scope for a script or a top-level eval.
CATCH_SCOPE, // The scope introduced by catch.
BLOCK_SCOPE, // The scope introduced by a new block.
- WITH_SCOPE, // The scope introduced by with.
- ARROW_SCOPE // The top-level scope for an arrow function literal.
+ WITH_SCOPE // The scope introduced by with.
};
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
@@ -935,6 +961,8 @@ enum FunctionKind {
kInObjectLiteral = 1 << 7,
kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
+ kClassConstructor =
+ kBaseConstructor | kSubclassConstructor | kDefaultConstructor,
kConciseMethodInObjectLiteral = kConciseMethod | kInObjectLiteral,
kConciseGeneratorMethodInObjectLiteral =
kConciseGeneratorMethod | kInObjectLiteral,
@@ -1003,9 +1031,7 @@ inline bool IsSubclassConstructor(FunctionKind kind) {
inline bool IsClassConstructor(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
- return kind &
- (FunctionKind::kBaseConstructor | FunctionKind::kSubclassConstructor |
- FunctionKind::kDefaultConstructor);
+ return kind & FunctionKind::kClassConstructor;
}
@@ -1020,7 +1046,8 @@ inline FunctionKind WithObjectLiteralBit(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
return kind;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
namespace i = v8::internal;
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 8c547e1b9c..cfaf4fb6eb 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -14,15 +14,23 @@ namespace v8 {
namespace internal {
HandleBase::HandleBase(Object* object, Isolate* isolate)
- : location_(HandleScope::CreateHandle(isolate, object)) {}
+ : location_(HandleScope::GetHandle(isolate, object)) {}
+
+
+template <typename T>
+// Allocate a new handle for the object, do not canonicalize.
+Handle<T> Handle<T>::New(T* object, Isolate* isolate) {
+ return Handle(
+ reinterpret_cast<T**>(HandleScope::CreateHandle(isolate, object)));
+}
HandleScope::HandleScope(Isolate* isolate) {
- HandleScopeData* current = isolate->handle_scope_data();
+ HandleScopeData* data = isolate->handle_scope_data();
isolate_ = isolate;
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- current->level++;
+ prev_next_ = data->next;
+ prev_limit_ = data->limit;
+ data->level++;
}
@@ -76,7 +84,7 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
// Throw away all handles in the current scope.
CloseScope(isolate_, prev_next_, prev_limit_);
// Allocate one handle in the parent scope.
- DCHECK(current->level > 0);
+ DCHECK(current->level > current->sealed_level);
Handle<T> result(value, isolate_);
// Reinitialize the current scope (so that it's ready
// to be used or closed again).
@@ -87,24 +95,30 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
}
-template <typename T>
-T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
+Object** HandleScope::CreateHandle(Isolate* isolate, Object* value) {
DCHECK(AllowHandleAllocation::IsAllowed());
- HandleScopeData* current = isolate->handle_scope_data();
+ HandleScopeData* data = isolate->handle_scope_data();
- Object** cur = current->next;
- if (cur == current->limit) cur = Extend(isolate);
+ Object** result = data->next;
+ if (result == data->limit) result = Extend(isolate);
// Update the current next field, set the value in the created
// handle, and return the result.
- DCHECK(cur < current->limit);
- current->next = cur + 1;
+ DCHECK(result < data->limit);
+ data->next = result + 1;
- T** result = reinterpret_cast<T**>(cur);
*result = value;
return result;
}
+Object** HandleScope::GetHandle(Isolate* isolate, Object* value) {
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ HandleScopeData* data = isolate->handle_scope_data();
+ CanonicalHandleScope* canonical = data->canonical_scope;
+ return canonical ? canonical->Lookup(value) : CreateHandle(isolate, value);
+}
+
+
#ifdef DEBUG
inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
// Make sure the current thread is allowed to create handles to begin with.
@@ -112,10 +126,10 @@ inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
HandleScopeData* current = isolate_->handle_scope_data();
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
- limit_ = current->limit;
+ prev_limit_ = current->limit;
current->limit = current->next;
- level_ = current->level;
- current->level = 0;
+ prev_sealed_level_ = current->sealed_level;
+ current->sealed_level = current->level;
}
@@ -123,10 +137,10 @@ inline SealHandleScope::~SealHandleScope() {
// Restore state in current handle scope to re-enable handle
// allocations.
HandleScopeData* current = isolate_->handle_scope_data();
- DCHECK_EQ(0, current->level);
- current->level = level_;
DCHECK_EQ(current->next, current->limit);
- current->limit = limit_;
+ current->limit = prev_limit_;
+ DCHECK_EQ(current->level, current->sealed_level);
+ current->sealed_level = prev_sealed_level_;
}
#endif
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index ae6fac89d3..b162ba8645 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -4,7 +4,9 @@
#include "src/handles.h"
+#include "src/address-map.h"
#include "src/base/logging.h"
+#include "src/identity-map.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -55,7 +57,7 @@ Object** HandleScope::Extend(Isolate* isolate) {
DCHECK(result == current->limit);
// Make sure there's at least one scope on the stack and that the
// top of the scope stack isn't a barrier.
- if (!Utils::ApiCheck(current->level != 0,
+ if (!Utils::ApiCheck(current->level != current->sealed_level,
"v8::HandleScope::CreateHandle()",
"Cannot create a handle without a HandleScope")) {
return NULL;
@@ -117,6 +119,48 @@ Address HandleScope::current_limit_address(Isolate* isolate) {
}
+CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
+ : isolate_(isolate) {
+ HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
+ prev_canonical_scope_ = handle_scope_data->canonical_scope;
+ handle_scope_data->canonical_scope = this;
+ root_index_map_ = new RootIndexMap(isolate);
+ identity_map_ = new IdentityMap<Object**>(isolate->heap(), &zone_);
+ canonical_level_ = handle_scope_data->level;
+}
+
+
+CanonicalHandleScope::~CanonicalHandleScope() {
+ delete root_index_map_;
+ delete identity_map_;
+ isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
+}
+
+
+Object** CanonicalHandleScope::Lookup(Object* object) {
+ DCHECK_LE(canonical_level_, isolate_->handle_scope_data()->level);
+ if (isolate_->handle_scope_data()->level != canonical_level_) {
+ // We are in an inner handle scope. Do not canonicalize since we will leave
+ // this handle scope while still being in the canonical scope.
+ return HandleScope::CreateHandle(isolate_, object);
+ }
+ if (object->IsHeapObject()) {
+ int index = root_index_map_->Lookup(HeapObject::cast(object));
+ if (index != RootIndexMap::kInvalidRootIndex) {
+ return isolate_->heap()
+ ->root_handle(static_cast<Heap::RootListIndex>(index))
+ .location();
+ }
+ }
+ Object*** entry = identity_map_->Get(object);
+ if (*entry == nullptr) {
+ // Allocate new handle location.
+ *entry = HandleScope::CreateHandle(isolate_, object);
+ }
+ return reinterpret_cast<Object**>(*entry);
+}
+
+
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
impl_->BeginDeferredScope();
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 85fa839f3f..4c10f20738 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -10,6 +10,7 @@
#include "src/base/macros.h"
#include "src/checks.h"
#include "src/globals.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -92,6 +93,9 @@ class Handle final : public HandleBase {
V8_INLINE explicit Handle(T* object) : Handle(object, object->GetIsolate()) {}
V8_INLINE Handle(T* object, Isolate* isolate) : HandleBase(object, isolate) {}
+ // Allocate a new handle for the object, do not canonicalize.
+ V8_INLINE static Handle<T> New(T* object, Isolate* isolate);
+
// Constructor for handling automatic up casting.
// Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
template <typename S>
@@ -254,9 +258,11 @@ class HandleScope {
// Counts the number of allocated handles.
static int NumberOfHandles(Isolate* isolate);
+ // Create a new handle or lookup a canonical handle.
+ V8_INLINE static Object** GetHandle(Isolate* isolate, Object* value);
+
// Creates a new handle with the given value.
- template <typename T>
- static inline T** CreateHandle(Isolate* isolate, T* value);
+ V8_INLINE static Object** CreateHandle(Isolate* isolate, Object* value);
// Deallocates any extensions used by the current scope.
static void DeleteExtensions(Isolate* isolate);
@@ -305,11 +311,44 @@ class HandleScope {
friend class v8::HandleScope;
friend class DeferredHandles;
+ friend class DeferredHandleScope;
friend class HandleScopeImplementer;
friend class Isolate;
};
+// Forward declarations for CanonicalHandleScope.
+template <typename V>
+class IdentityMap;
+class RootIndexMap;
+
+
+// A CanonicalHandleScope does not open a new HandleScope. It changes the
+// existing HandleScope so that Handles created within are canonicalized.
+// This does not apply to nested inner HandleScopes unless a nested
+// CanonicalHandleScope is introduced. Handles are only canonicalized within
+// the same CanonicalHandleScope, but not across nested ones.
+class CanonicalHandleScope final {
+ public:
+ explicit CanonicalHandleScope(Isolate* isolate);
+ ~CanonicalHandleScope();
+
+ private:
+ Object** Lookup(Object* object);
+
+ Isolate* isolate_;
+ Zone zone_;
+ RootIndexMap* root_index_map_;
+ IdentityMap<Object**>* identity_map_;
+ // Ordinary nested handle scopes within the current one are not canonical.
+ int canonical_level_;
+ // We may have nested canonical scopes. Handles are canonical within each one.
+ CanonicalHandleScope* prev_canonical_scope_;
+
+ friend class HandleScope;
+};
+
+
class DeferredHandleScope final {
public:
explicit DeferredHandleScope(Isolate* isolate);
@@ -345,8 +384,8 @@ class SealHandleScope final {
inline ~SealHandleScope();
private:
Isolate* isolate_;
- Object** limit_;
- int level_;
+ Object** prev_limit_;
+ int prev_sealed_level_;
#endif
};
@@ -355,10 +394,13 @@ struct HandleScopeData final {
Object** next;
Object** limit;
int level;
+ int sealed_level;
+ CanonicalHandleScope* canonical_scope;
void Initialize() {
next = limit = NULL;
- level = 0;
+ sealed_level = level = 0;
+ canonical_scope = NULL;
}
};
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
deleted file mode 100644
index 0867f7cd46..0000000000
--- a/deps/v8/src/harmony-array.js
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GetIterator;
-var GetMethod;
-var GlobalArray = global.Array;
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var MathMax;
-var MathMin;
-var ObjectIsFrozen;
-var ObjectDefineProperty;
-var ToNumber;
-
-utils.Import(function(from) {
- GetIterator = from.GetIterator;
- GetMethod = from.GetMethod;
- MathMax = from.MathMax;
- MathMin = from.MathMin;
- ObjectIsFrozen = from.ObjectIsFrozen;
- ObjectDefineProperty = from.ObjectDefineProperty;
- ToNumber = from.ToNumber;
-});
-
-// -------------------------------------------------------------------
-
-function InnerArrayCopyWithin(target, start, end, array, length) {
- target = TO_INTEGER(target);
- var to;
- if (target < 0) {
- to = MathMax(length + target, 0);
- } else {
- to = MathMin(target, length);
- }
-
- start = TO_INTEGER(start);
- var from;
- if (start < 0) {
- from = MathMax(length + start, 0);
- } else {
- from = MathMin(start, length);
- }
-
- end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
- var final;
- if (end < 0) {
- final = MathMax(length + end, 0);
- } else {
- final = MathMin(end, length);
- }
-
- var count = MathMin(final - from, length - to);
- var direction = 1;
- if (from < to && to < (from + count)) {
- direction = -1;
- from = from + count - 1;
- to = to + count - 1;
- }
-
- while (count > 0) {
- if (from in array) {
- array[to] = array[from];
- } else {
- delete array[to];
- }
- from = from + direction;
- to = to + direction;
- count--;
- }
-
- return array;
-}
-
-// ES6 draft 03-17-15, section 22.1.3.3
-function ArrayCopyWithin(target, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- return InnerArrayCopyWithin(target, start, end, array, length);
-}
-
-function InnerArrayFind(predicate, thisArg, array, length) {
- if (!IS_CALLABLE(predicate)) {
- throw MakeTypeError(kCalledNonCallable, predicate);
- }
-
- for (var i = 0; i < length; i++) {
- var element = array[i];
- if (%_Call(predicate, thisArg, element, i, array)) {
- return element;
- }
- }
-
- return;
-}
-
-// ES6 draft 07-15-13, section 15.4.3.23
-function ArrayFind(predicate, thisArg) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
-
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
-
- return InnerArrayFind(predicate, thisArg, array, length);
-}
-
-function InnerArrayFindIndex(predicate, thisArg, array, length) {
- if (!IS_CALLABLE(predicate)) {
- throw MakeTypeError(kCalledNonCallable, predicate);
- }
-
- for (var i = 0; i < length; i++) {
- var element = array[i];
- if (%_Call(predicate, thisArg, element, i, array)) {
- return i;
- }
- }
-
- return -1;
-}
-
-// ES6 draft 07-15-13, section 15.4.3.24
-function ArrayFindIndex(predicate, thisArg) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
-
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
-
- return InnerArrayFindIndex(predicate, thisArg, array, length);
-}
-
-// ES6, draft 04-05-14, section 22.1.3.6
-function InnerArrayFill(value, start, end, array, length) {
- var i = IS_UNDEFINED(start) ? 0 : TO_INTEGER(start);
- var end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
-
- if (i < 0) {
- i += length;
- if (i < 0) i = 0;
- } else {
- if (i > length) i = length;
- }
-
- if (end < 0) {
- end += length;
- if (end < 0) end = 0;
- } else {
- if (end > length) end = length;
- }
-
- if ((end - i) > 0 && ObjectIsFrozen(array)) {
- throw MakeTypeError(kArrayFunctionsOnFrozen);
- }
-
- for (; i < end; i++)
- array[i] = value;
- return array;
-}
-
-// ES6, draft 04-05-14, section 22.1.3.6
-function ArrayFill(value, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
-
- return InnerArrayFill(value, start, end, array, length);
-}
-
-function AddArrayElement(constructor, array, i, value) {
- if (constructor === GlobalArray) {
- %AddElement(array, i, value);
- } else {
- ObjectDefineProperty(array, i, {
- value: value, writable: true, configurable: true, enumerable: true
- });
- }
-}
-
-// ES6, draft 10-14-14, section 22.1.2.1
-function ArrayFrom(arrayLike, mapfn, receiver) {
- var items = TO_OBJECT(arrayLike);
- var mapping = !IS_UNDEFINED(mapfn);
-
- if (mapping) {
- if (!IS_CALLABLE(mapfn)) {
- throw MakeTypeError(kCalledNonCallable, mapfn);
- }
- }
-
- var iterable = GetMethod(items, iteratorSymbol);
- var k;
- var result;
- var mappedValue;
- var nextValue;
-
- if (!IS_UNDEFINED(iterable)) {
- result = %IsConstructor(this) ? new this() : [];
-
- var iterator = GetIterator(items, iterable);
-
- k = 0;
- while (true) {
- var next = iterator.next();
-
- if (!IS_OBJECT(next)) {
- throw MakeTypeError(kIteratorResultNotAnObject, next);
- }
-
- if (next.done) {
- result.length = k;
- return result;
- }
-
- nextValue = next.value;
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
- }
- AddArrayElement(this, result, k, mappedValue);
- k++;
- }
- } else {
- var len = TO_LENGTH(items.length);
- result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
-
- for (k = 0; k < len; ++k) {
- nextValue = items[k];
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
- }
- AddArrayElement(this, result, k, mappedValue);
- }
-
- result.length = k;
- return result;
- }
-}
-
-// ES6, draft 05-22-14, section 22.1.2.3
-function ArrayOf() {
- var length = %_ArgumentsLength();
- var constructor = this;
- // TODO: Implement IsConstructor (ES6 section 7.2.5)
- var array = %IsConstructor(constructor) ? new constructor(length) : [];
- for (var i = 0; i < length; i++) {
- AddArrayElement(constructor, array, i, %_Arguments(i));
- }
- array.length = length;
- return array;
-}
-
-// -------------------------------------------------------------------
-
-%FunctionSetLength(ArrayCopyWithin, 2);
-%FunctionSetLength(ArrayFrom, 1);
-%FunctionSetLength(ArrayFill, 1);
-%FunctionSetLength(ArrayFind, 1);
-%FunctionSetLength(ArrayFindIndex, 1);
-
-// Set up non-enumerable functions on the Array object.
-utils.InstallFunctions(GlobalArray, DONT_ENUM, [
- "from", ArrayFrom,
- "of", ArrayOf
-]);
-
-// Set up the non-enumerable functions on the Array prototype object.
-utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
- "copyWithin", ArrayCopyWithin,
- "find", ArrayFind,
- "findIndex", ArrayFindIndex,
- "fill", ArrayFill
-]);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.ArrayFrom = ArrayFrom;
- to.InnerArrayCopyWithin = InnerArrayCopyWithin;
- to.InnerArrayFill = InnerArrayFill;
- to.InnerArrayFind = InnerArrayFind;
- to.InnerArrayFindIndex = InnerArrayFindIndex;
-});
-
-})
diff --git a/deps/v8/src/harmony-concat-spreadable.js b/deps/v8/src/harmony-concat-spreadable.js
deleted file mode 100644
index c5d906a642..0000000000
--- a/deps/v8/src/harmony-concat-spreadable.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-var isConcatSpreadableSymbol =
- utils.ImportNow("is_concat_spreadable_symbol");
-
-utils.InstallConstants(global.Symbol, [
- // TODO(littledan): Move to symbol.js when shipping
- "isConcatSpreadable", isConcatSpreadableSymbol
-]);
-
-})
diff --git a/deps/v8/src/harmony-object-observe.js b/deps/v8/src/harmony-object-observe.js
deleted file mode 100644
index 44006cd2e9..0000000000
--- a/deps/v8/src/harmony-object-observe.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-utils.InstallFunctions(global.Object, DONT_ENUM, $observeObjectMethods);
-utils.InstallFunctions(global.Array, DONT_ENUM, $observeArrayMethods);
-
-})
diff --git a/deps/v8/src/harmony-reflect.js b/deps/v8/src/harmony-reflect.js
deleted file mode 100644
index f1fe8605e5..0000000000
--- a/deps/v8/src/harmony-reflect.js
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-var GlobalReflect = global.Reflect;
-var ReflectApply = utils.ImportNow("reflect_apply");
-var ReflectConstruct = utils.ImportNow("reflect_construct");
-
-utils.InstallFunctions(GlobalReflect, DONT_ENUM, [
- "apply", ReflectApply,
- "construct", ReflectConstruct
-]);
-
-})
diff --git a/deps/v8/src/harmony-regexp.js b/deps/v8/src/harmony-regexp.js
deleted file mode 100644
index 1ab76fad4a..0000000000
--- a/deps/v8/src/harmony-regexp.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalRegExp = global.RegExp;
-
-// -------------------------------------------------------------------
-
-// ES6 draft 12-06-13, section 21.2.5.3
-// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
-function RegExpGetFlags() {
- if (!IS_SPEC_OBJECT(this)) {
- throw MakeTypeError(kFlagsGetterNonObject, TO_STRING(this));
- }
- var result = '';
- if (this.global) result += 'g';
- if (this.ignoreCase) result += 'i';
- if (this.multiline) result += 'm';
- if (this.unicode) result += 'u';
- if (this.sticky) result += 'y';
- return result;
-}
-
-%DefineAccessorPropertyUnchecked(GlobalRegExp.prototype, 'flags',
- RegExpGetFlags, null, DONT_ENUM);
-%SetNativeFlag(RegExpGetFlags);
-
-})
diff --git a/deps/v8/src/harmony-tostring.js b/deps/v8/src/harmony-tostring.js
deleted file mode 100644
index 8e76c3a5bb..0000000000
--- a/deps/v8/src/harmony-tostring.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalSymbol = global.Symbol;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-utils.InstallConstants(GlobalSymbol, [
- // TODO(dslomov, caitp): Move to symbol.js when shipping
- "toStringTag", toStringTagSymbol
-]);
-
-})
diff --git a/deps/v8/src/harmony-typedarray.js b/deps/v8/src/harmony-typedarray.js
deleted file mode 100644
index 9d66e211e9..0000000000
--- a/deps/v8/src/harmony-typedarray.js
+++ /dev/null
@@ -1,414 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-macro TYPED_ARRAYS(FUNCTION)
-// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
-FUNCTION(Uint8Array)
-FUNCTION(Int8Array)
-FUNCTION(Uint16Array)
-FUNCTION(Int16Array)
-FUNCTION(Uint32Array)
-FUNCTION(Int32Array)
-FUNCTION(Float32Array)
-FUNCTION(Float64Array)
-FUNCTION(Uint8ClampedArray)
-endmacro
-
-macro DECLARE_GLOBALS(NAME)
-var GlobalNAME = global.NAME;
-endmacro
-
-TYPED_ARRAYS(DECLARE_GLOBALS)
-DECLARE_GLOBALS(Array)
-
-var ArrayFrom;
-var ArrayToString;
-var InnerArrayCopyWithin;
-var InnerArrayEvery;
-var InnerArrayFill;
-var InnerArrayFilter;
-var InnerArrayFind;
-var InnerArrayFindIndex;
-var InnerArrayForEach;
-var InnerArrayIndexOf;
-var InnerArrayJoin;
-var InnerArrayLastIndexOf;
-var InnerArrayMap;
-var InnerArraySome;
-var InnerArraySort;
-var InnerArrayToLocaleString;
-var IsNaN;
-var MathMax;
-var MathMin;
-var PackedArrayReverse;
-var ToNumber;
-
-utils.Import(function(from) {
- ArrayFrom = from.ArrayFrom;
- ArrayToString = from.ArrayToString;
- InnerArrayCopyWithin = from.InnerArrayCopyWithin;
- InnerArrayEvery = from.InnerArrayEvery;
- InnerArrayFill = from.InnerArrayFill;
- InnerArrayFilter = from.InnerArrayFilter;
- InnerArrayFind = from.InnerArrayFind;
- InnerArrayFindIndex = from.InnerArrayFindIndex;
- InnerArrayForEach = from.InnerArrayForEach;
- InnerArrayIndexOf = from.InnerArrayIndexOf;
- InnerArrayJoin = from.InnerArrayJoin;
- InnerArrayLastIndexOf = from.InnerArrayLastIndexOf;
- InnerArrayMap = from.InnerArrayMap;
- InnerArrayReduce = from.InnerArrayReduce;
- InnerArrayReduceRight = from.InnerArrayReduceRight;
- InnerArraySome = from.InnerArraySome;
- InnerArraySort = from.InnerArraySort;
- InnerArrayToLocaleString = from.InnerArrayToLocaleString;
- IsNaN = from.IsNaN;
- MathMax = from.MathMax;
- MathMin = from.MathMin;
- PackedArrayReverse = from.PackedArrayReverse;
- ToNumber = from.ToNumber;
-});
-
-// -------------------------------------------------------------------
-
-function ConstructTypedArray(constructor, arg) {
- // TODO(littledan): This is an approximation of the spec, which requires
- // that only real TypedArray classes should be accepted (22.2.2.1.1)
- if (!%IsConstructor(constructor) || IS_UNDEFINED(constructor.prototype) ||
- !%HasOwnProperty(constructor.prototype, "BYTES_PER_ELEMENT")) {
- throw MakeTypeError(kNotTypedArray);
- }
-
- // TODO(littledan): The spec requires that, rather than directly calling
- // the constructor, a TypedArray is created with the proper proto and
- // underlying size and element size, and elements are put in one by one.
- // By contrast, this would allow subclasses to make a radically different
- // constructor with different semantics.
- return new constructor(arg);
-}
-
-function ConstructTypedArrayLike(typedArray, arg) {
- // TODO(littledan): The spec requires that we actuallly use
- // typedArray.constructor[Symbol.species] (bug v8:4093)
- // Also, it should default to the default constructor from
- // table 49 if typedArray.constructor doesn't exist.
- return ConstructTypedArray(typedArray.constructor, arg);
-}
-
-function TypedArrayCopyWithin(target, start, end) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- // TODO(littledan): Replace with a memcpy for better performance
- return InnerArrayCopyWithin(target, start, end, this, length);
-}
-%FunctionSetLength(TypedArrayCopyWithin, 2);
-
-// ES6 draft 05-05-15, section 22.2.3.7
-function TypedArrayEvery(f, receiver) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayEvery(f, receiver, this, length);
-}
-%FunctionSetLength(TypedArrayEvery, 1);
-
-// ES6 draft 08-24-14, section 22.2.3.12
-function TypedArrayForEach(f, receiver) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- InnerArrayForEach(f, receiver, this, length);
-}
-%FunctionSetLength(TypedArrayForEach, 1);
-
-// ES6 draft 04-05-14 section 22.2.3.8
-function TypedArrayFill(value, start, end) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayFill(value, start, end, this, length);
-}
-%FunctionSetLength(TypedArrayFill, 1);
-
-// ES6 draft 07-15-13, section 22.2.3.9
-function TypedArrayFilter(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- var array = InnerArrayFilter(predicate, thisArg, this, length);
- return ConstructTypedArrayLike(this, array);
-}
-%FunctionSetLength(TypedArrayFilter, 1);
-
-// ES6 draft 07-15-13, section 22.2.3.10
-function TypedArrayFind(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayFind(predicate, thisArg, this, length);
-}
-%FunctionSetLength(TypedArrayFind, 1);
-
-// ES6 draft 07-15-13, section 22.2.3.11
-function TypedArrayFindIndex(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayFindIndex(predicate, thisArg, this, length);
-}
-%FunctionSetLength(TypedArrayFindIndex, 1);
-
-// ES6 draft 05-18-15, section 22.2.3.21
-function TypedArrayReverse() {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return PackedArrayReverse(this, length);
-}
-
-
-function TypedArrayComparefn(x, y) {
- if (IsNaN(x) && IsNaN(y)) {
- return IsNaN(y) ? 0 : 1;
- }
- if (IsNaN(x)) {
- return 1;
- }
- if (x === 0 && x === y) {
- if (%_IsMinusZero(x)) {
- if (!%_IsMinusZero(y)) {
- return -1;
- }
- } else if (%_IsMinusZero(y)) {
- return 1;
- }
- }
- return x - y;
-}
-
-
-// ES6 draft 05-18-15, section 22.2.3.25
-function TypedArraySort(comparefn) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- if (IS_UNDEFINED(comparefn)) {
- comparefn = TypedArrayComparefn;
- }
-
- return InnerArraySort(this, length, comparefn);
-}
-
-
-// ES6 section 22.2.3.13
-function TypedArrayIndexOf(element, index) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- return InnerArrayIndexOf(this, element, index, length);
-}
-%FunctionSetLength(TypedArrayIndexOf, 1);
-
-
-// ES6 section 22.2.3.16
-function TypedArrayLastIndexOf(element, index) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayLastIndexOf(this, element, index, length,
- %_ArgumentsLength());
-}
-%FunctionSetLength(TypedArrayLastIndexOf, 1);
-
-
-// ES6 draft 07-15-13, section 22.2.3.18
-function TypedArrayMap(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- // TODO(littledan): Preallocate rather than making an intermediate
- // InternalArray, for better performance.
- var length = %_TypedArrayGetLength(this);
- var array = InnerArrayMap(predicate, thisArg, this, length);
- return ConstructTypedArrayLike(this, array);
-}
-%FunctionSetLength(TypedArrayMap, 1);
-
-
-// ES6 draft 05-05-15, section 22.2.3.24
-function TypedArraySome(f, receiver) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArraySome(f, receiver, this, length);
-}
-%FunctionSetLength(TypedArraySome, 1);
-
-
-// ES6 section 22.2.3.27
-function TypedArrayToLocaleString() {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayToLocaleString(this, length);
-}
-
-
-// ES6 section 22.2.3.28
-function TypedArrayToString() {
- return %_Call(ArrayToString, this);
-}
-
-
-// ES6 section 22.2.3.14
-function TypedArrayJoin(separator) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayJoin(separator, this, length);
-}
-
-
-// ES6 draft 07-15-13, section 22.2.3.19
-function TypedArrayReduce(callback, current) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- return InnerArrayReduce(callback, current, this, length,
- %_ArgumentsLength());
-}
-%FunctionSetLength(TypedArrayReduce, 1);
-
-
-// ES6 draft 07-15-13, section 22.2.3.19
-function TypedArrayReduceRight(callback, current) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- return InnerArrayReduceRight(callback, current, this, length,
- %_ArgumentsLength());
-}
-%FunctionSetLength(TypedArrayReduceRight, 1);
-
-
-function TypedArraySlice(start, end) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
- var len = %_TypedArrayGetLength(this);
-
- var relativeStart = TO_INTEGER(start);
-
- var k;
- if (relativeStart < 0) {
- k = MathMax(len + relativeStart, 0);
- } else {
- k = MathMin(relativeStart, len);
- }
-
- var relativeEnd;
- if (IS_UNDEFINED(end)) {
- relativeEnd = len;
- } else {
- relativeEnd = TO_INTEGER(end);
- }
-
- var final;
- if (relativeEnd < 0) {
- final = MathMax(len + relativeEnd, 0);
- } else {
- final = MathMin(relativeEnd, len);
- }
-
- var count = MathMax(final - k, 0);
- var array = ConstructTypedArrayLike(this, count);
- // The code below is the 'then' branch; the 'else' branch species
- // a memcpy. Because V8 doesn't canonicalize NaN, the difference is
- // unobservable.
- var n = 0;
- while (k < final) {
- var kValue = this[k];
- // TODO(littledan): The spec says to throw on an error in setting;
- // does this throw?
- array[n] = kValue;
- k++;
- n++;
- }
- return array;
-}
-
-
-// ES6 draft 08-24-14, section 22.2.2.2
-function TypedArrayOf() {
- var length = %_ArgumentsLength();
- var array = new this(length);
- for (var i = 0; i < length; i++) {
- array[i] = %_Arguments(i);
- }
- return array;
-}
-
-
-function TypedArrayFrom(source, mapfn, thisArg) {
- // TODO(littledan): Investigate if there is a receiver which could be
- // faster to accumulate on than Array, e.g., a TypedVector.
- var array = %_Call(ArrayFrom, GlobalArray, source, mapfn, thisArg);
- return ConstructTypedArray(this, array);
-}
-%FunctionSetLength(TypedArrayFrom, 1);
-
-// TODO(littledan): Fix the TypedArray proto chain (bug v8:4085).
-macro EXTEND_TYPED_ARRAY(NAME)
- // Set up non-enumerable functions on the object.
- utils.InstallFunctions(GlobalNAME, DONT_ENUM | DONT_DELETE | READ_ONLY, [
- "from", TypedArrayFrom,
- "of", TypedArrayOf
- ]);
-
- // Set up non-enumerable functions on the prototype object.
- utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
- "copyWithin", TypedArrayCopyWithin,
- "every", TypedArrayEvery,
- "fill", TypedArrayFill,
- "filter", TypedArrayFilter,
- "find", TypedArrayFind,
- "findIndex", TypedArrayFindIndex,
- "indexOf", TypedArrayIndexOf,
- "join", TypedArrayJoin,
- "lastIndexOf", TypedArrayLastIndexOf,
- "forEach", TypedArrayForEach,
- "map", TypedArrayMap,
- "reduce", TypedArrayReduce,
- "reduceRight", TypedArrayReduceRight,
- "reverse", TypedArrayReverse,
- "slice", TypedArraySlice,
- "some", TypedArraySome,
- "sort", TypedArraySort,
- "toString", TypedArrayToString,
- "toLocaleString", TypedArrayToLocaleString
- ]);
-endmacro
-
-TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
-
-})
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/hashmap.h
index ee3797fe59..f94def7c3c 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/hashmap.h
@@ -350,6 +350,7 @@ class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HASHMAP_H_
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index c12557a9fc..7ba22fb573 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -68,6 +68,6 @@ class ArrayBufferTracker {
std::map<void*, size_t> live_array_buffers_for_scavenge_;
std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_ARRAY_BUFFER_TRACKER_H_
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index e1f9ef43e7..4e6e6081d7 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -180,5 +180,5 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
}
-}
-}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index e27fa27d83..f7bd8d07d2 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -45,6 +45,7 @@ GCTracer::Event::Event(Type type, const char* gc_reason,
collector_reason(collector_reason),
start_time(0.0),
end_time(0.0),
+ reduce_memory(false),
start_object_size(0),
end_object_size(0),
start_memory_size(0),
@@ -138,6 +139,7 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
}
}
+ current_.reduce_memory = heap_->ShouldReduceMemory();
current_.start_time = start_time;
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->isolate()->memory_allocator()->Size();
@@ -307,6 +309,13 @@ void GCTracer::AddContextDisposalTime(double time) {
}
+void GCTracer::AddCompactionEvent(double duration,
+ intptr_t live_bytes_compacted) {
+ compaction_events_.push_front(
+ CompactionEvent(duration, live_bytes_compacted));
+}
+
+
void GCTracer::AddSurvivalRatio(double promotion_ratio) {
survival_events_.push_front(SurvivalEvent(promotion_ratio));
}
@@ -391,104 +400,209 @@ void GCTracer::Print() const {
void GCTracer::PrintNVP() const {
- PrintIsolate(heap_->isolate(), "[I:%p] %8.0f ms: ", heap_->isolate(),
- heap_->isolate()->time_millis_since_init());
-
double duration = current_.end_time - current_.start_time;
double spent_in_mutator = current_.start_time - previous_.end_time;
-
- PrintF("pause=%.1f ", duration);
- PrintF("mutator=%.1f ", spent_in_mutator);
- PrintF("gc=%s ", current_.TypeName(true));
+ intptr_t allocated_since_last_gc =
+ current_.start_object_size - previous_.end_object_size;
switch (current_.type) {
case Event::SCAVENGER:
- PrintF("scavenge=%.2f ", current_.scopes[Scope::SCAVENGER_SCAVENGE]);
- PrintF("old_new=%.2f ",
- current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS]);
- PrintF("weak=%.2f ", current_.scopes[Scope::SCAVENGER_WEAK]);
- PrintF("roots=%.2f ", current_.scopes[Scope::SCAVENGER_ROOTS]);
- PrintF("code=%.2f ",
- current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES]);
- PrintF("semispace=%.2f ", current_.scopes[Scope::SCAVENGER_SEMISPACE]);
- PrintF("object_groups=%.2f ",
- current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS]);
- PrintF("steps_count=%d ", current_.incremental_marking_steps);
- PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
- PrintF("scavenge_throughput=%" V8_PTR_PREFIX "d ",
- ScavengeSpeedInBytesPerMillisecond());
+ PrintIsolate(heap_->isolate(),
+ "%8.0f ms: "
+ "pause=%.1f "
+ "mutator=%.1f "
+ "gc=%s "
+ "reduce_memory=%d "
+ "scavenge=%.2f "
+ "old_new=%.2f "
+ "weak=%.2f "
+ "roots=%.2f "
+ "code=%.2f "
+ "semispace=%.2f "
+ "object_groups=%.2f "
+ "steps_count=%d "
+ "steps_took=%.1f "
+ "scavenge_throughput=%" V8_PTR_PREFIX
+ "d "
+ "total_size_before=%" V8_PTR_PREFIX
+ "d "
+ "total_size_after=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_before=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_after=%" V8_PTR_PREFIX
+ "d "
+ "allocated=%" V8_PTR_PREFIX
+ "d "
+ "promoted=%" V8_PTR_PREFIX
+ "d "
+ "semi_space_copied=%" V8_PTR_PREFIX
+ "d "
+ "nodes_died_in_new=%d "
+ "nodes_copied_in_new=%d "
+ "nodes_promoted=%d "
+ "promotion_ratio=%.1f%% "
+ "average_survival_ratio=%.1f%% "
+ "promotion_rate=%.1f%% "
+ "semi_space_copy_rate=%.1f%% "
+ "new_space_allocation_throughput=%" V8_PTR_PREFIX
+ "d "
+ "context_disposal_rate=%.1f\n",
+ heap_->isolate()->time_millis_since_init(), duration,
+ spent_in_mutator, current_.TypeName(true),
+ current_.reduce_memory,
+ current_.scopes[Scope::SCAVENGER_SCAVENGE],
+ current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
+ current_.scopes[Scope::SCAVENGER_WEAK],
+ current_.scopes[Scope::SCAVENGER_ROOTS],
+ current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
+ current_.scopes[Scope::SCAVENGER_SEMISPACE],
+ current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
+ current_.incremental_marking_steps,
+ current_.incremental_marking_duration,
+ ScavengeSpeedInBytesPerMillisecond(),
+ current_.start_object_size, current_.end_object_size,
+ current_.start_holes_size, current_.end_holes_size,
+ allocated_since_last_gc, heap_->promoted_objects_size(),
+ heap_->semi_space_copied_object_size(),
+ heap_->nodes_died_in_new_space_,
+ heap_->nodes_copied_in_new_space_, heap_->nodes_promoted_,
+ heap_->promotion_ratio_, AverageSurvivalRatio(),
+ heap_->promotion_rate_, heap_->semi_space_copied_rate_,
+ NewSpaceAllocationThroughputInBytesPerMillisecond(),
+ ContextDisposalRateInMilliseconds());
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
- PrintF("external=%.1f ", current_.scopes[Scope::EXTERNAL]);
- PrintF("mark=%.1f ", current_.scopes[Scope::MC_MARK]);
- PrintF("sweep=%.2f ", current_.scopes[Scope::MC_SWEEP]);
- PrintF("sweepns=%.2f ", current_.scopes[Scope::MC_SWEEP_NEWSPACE]);
- PrintF("sweepos=%.2f ", current_.scopes[Scope::MC_SWEEP_OLDSPACE]);
- PrintF("sweepcode=%.2f ", current_.scopes[Scope::MC_SWEEP_CODE]);
- PrintF("sweepcell=%.2f ", current_.scopes[Scope::MC_SWEEP_CELL]);
- PrintF("sweepmap=%.2f ", current_.scopes[Scope::MC_SWEEP_MAP]);
- PrintF("evacuate=%.1f ", current_.scopes[Scope::MC_EVACUATE_PAGES]);
- PrintF("new_new=%.1f ",
- current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
- PrintF("root_new=%.1f ",
- current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
- PrintF("old_new=%.1f ",
- current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
- PrintF("compaction_ptrs=%.1f ",
- current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
- PrintF("intracompaction_ptrs=%.1f ",
- current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
- PrintF("misc_compaction=%.1f ",
- current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]);
- PrintF("weak_closure=%.1f ", current_.scopes[Scope::MC_WEAKCLOSURE]);
- PrintF("inc_weak_closure=%.1f ",
- current_.scopes[Scope::MC_INCREMENTAL_WEAKCLOSURE]);
- PrintF("weakcollection_process=%.1f ",
- current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]);
- PrintF("weakcollection_clear=%.1f ",
- current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR]);
- PrintF("weakcollection_abort=%.1f ",
- current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT]);
- PrintF("weakcells=%.1f ", current_.scopes[Scope::MC_WEAKCELL]);
- PrintF("nonlive_refs=%.1f ",
- current_.scopes[Scope::MC_NONLIVEREFERENCES]);
-
- PrintF("steps_count=%d ", current_.incremental_marking_steps);
- PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
- PrintF("longest_step=%.1f ", current_.longest_incremental_marking_step);
- PrintF("incremental_marking_throughput=%" V8_PTR_PREFIX "d ",
- IncrementalMarkingSpeedInBytesPerMillisecond());
+ PrintIsolate(heap_->isolate(),
+ "%8.0f ms: "
+ "pause=%.1f "
+ "mutator=%.1f "
+ "gc=%s "
+ "reduce_memory=%d "
+ "external=%.1f "
+ "mark=%.1f "
+ "mark_inc=%.1f "
+ "mark_prepcodeflush=%.1f "
+ "mark_root=%.1f "
+ "mark_topopt=%.1f "
+ "mark_retainmaps=%.1f "
+ "mark_weakclosure=%.1f "
+ "mark_stringtable=%.1f "
+ "mark_weakrefs=%.1f "
+ "mark_globalhandles=%.1f "
+ "mark_codeflush=%.1f "
+ "mark_optimizedcodemaps=%.1f "
+ "store_buffer_clear=%.1f "
+ "slots_buffer_clear=%.1f "
+ "sweep=%.2f "
+ "sweepns=%.2f "
+ "sweepos=%.2f "
+ "sweepcode=%.2f "
+ "sweepcell=%.2f "
+ "sweepmap=%.2f "
+ "sweepaborted=%.2f "
+ "evacuate=%.1f "
+ "new_new=%.1f "
+ "root_new=%.1f "
+ "old_new=%.1f "
+ "compaction_ptrs=%.1f "
+ "intracompaction_ptrs=%.1f "
+ "misc_compaction=%.1f "
+ "inc_weak_closure=%.1f "
+ "weakcollection_process=%.1f "
+ "weakcollection_clear=%.1f "
+ "weakcollection_abort=%.1f "
+ "weakcells=%.1f "
+ "nonlive_refs=%.1f "
+ "steps_count=%d "
+ "steps_took=%.1f "
+ "longest_step=%.1f "
+ "incremental_marking_throughput=%" V8_PTR_PREFIX
+ "d "
+ "total_size_before=%" V8_PTR_PREFIX
+ "d "
+ "total_size_after=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_before=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_after=%" V8_PTR_PREFIX
+ "d "
+ "allocated=%" V8_PTR_PREFIX
+ "d "
+ "promoted=%" V8_PTR_PREFIX
+ "d "
+ "semi_space_copied=%" V8_PTR_PREFIX
+ "d "
+ "nodes_died_in_new=%d "
+ "nodes_copied_in_new=%d "
+ "nodes_promoted=%d "
+ "promotion_ratio=%.1f%% "
+ "average_survival_ratio=%.1f%% "
+ "promotion_rate=%.1f%% "
+ "semi_space_copy_rate=%.1f%% "
+ "new_space_allocation_throughput=%" V8_PTR_PREFIX
+ "d "
+ "context_disposal_rate=%.1f "
+ "compaction_speed=%" V8_PTR_PREFIX "d\n",
+ heap_->isolate()->time_millis_since_init(), duration,
+ spent_in_mutator, current_.TypeName(true),
+ current_.reduce_memory, current_.scopes[Scope::EXTERNAL],
+ current_.scopes[Scope::MC_MARK],
+ current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
+ current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
+ current_.scopes[Scope::MC_MARK_ROOT],
+ current_.scopes[Scope::MC_MARK_TOPOPT],
+ current_.scopes[Scope::MC_MARK_RETAIN_MAPS],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
+ current_.scopes[Scope::MC_MARK_STRING_TABLE],
+ current_.scopes[Scope::MC_MARK_WEAK_REFERENCES],
+ current_.scopes[Scope::MC_MARK_GLOBAL_HANDLES],
+ current_.scopes[Scope::MC_MARK_CODE_FLUSH],
+ current_.scopes[Scope::MC_MARK_OPTIMIZED_CODE_MAPS],
+ current_.scopes[Scope::MC_STORE_BUFFER_CLEAR],
+ current_.scopes[Scope::MC_SLOTS_BUFFER_CLEAR],
+ current_.scopes[Scope::MC_SWEEP],
+ current_.scopes[Scope::MC_SWEEP_NEWSPACE],
+ current_.scopes[Scope::MC_SWEEP_OLDSPACE],
+ current_.scopes[Scope::MC_SWEEP_CODE],
+ current_.scopes[Scope::MC_SWEEP_CELL],
+ current_.scopes[Scope::MC_SWEEP_MAP],
+ current_.scopes[Scope::MC_SWEEP_ABORTED],
+ current_.scopes[Scope::MC_EVACUATE_PAGES],
+ current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS],
+ current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS],
+ current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS],
+ current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED],
+ current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED],
+ current_.scopes[Scope::MC_UPDATE_MISC_POINTERS],
+ current_.scopes[Scope::MC_INCREMENTAL_FINALIZE],
+ current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS],
+ current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR],
+ current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT],
+ current_.scopes[Scope::MC_WEAKCELL],
+ current_.scopes[Scope::MC_NONLIVEREFERENCES],
+ current_.incremental_marking_steps,
+ current_.incremental_marking_duration,
+ current_.longest_incremental_marking_step,
+ IncrementalMarkingSpeedInBytesPerMillisecond(),
+ current_.start_object_size, current_.end_object_size,
+ current_.start_holes_size, current_.end_holes_size,
+ allocated_since_last_gc, heap_->promoted_objects_size(),
+ heap_->semi_space_copied_object_size(),
+ heap_->nodes_died_in_new_space_,
+ heap_->nodes_copied_in_new_space_, heap_->nodes_promoted_,
+ heap_->promotion_ratio_, AverageSurvivalRatio(),
+ heap_->promotion_rate_, heap_->semi_space_copied_rate_,
+ NewSpaceAllocationThroughputInBytesPerMillisecond(),
+ ContextDisposalRateInMilliseconds(),
+ CompactionSpeedInBytesPerMillisecond());
break;
case Event::START:
break;
default:
UNREACHABLE();
}
-
- PrintF("total_size_before=%" V8_PTR_PREFIX "d ", current_.start_object_size);
- PrintF("total_size_after=%" V8_PTR_PREFIX "d ", current_.end_object_size);
- PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", current_.start_holes_size);
- PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", current_.end_holes_size);
-
- intptr_t allocated_since_last_gc =
- current_.start_object_size - previous_.end_object_size;
- PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc);
- PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size());
- PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
- heap_->semi_space_copied_object_size());
- PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_);
- PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
- PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
- PrintF("promotion_ratio=%.1f%% ", heap_->promotion_ratio_);
- PrintF("average_survival_ratio=%.1f%% ", AverageSurvivalRatio());
- PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
- PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
- PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
- NewSpaceAllocationThroughputInBytesPerMillisecond());
- PrintF("context_disposal_rate=%.1f ", ContextDisposalRateInMilliseconds());
-
- PrintF("\n");
}
@@ -603,6 +717,23 @@ intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
}
+intptr_t GCTracer::CompactionSpeedInBytesPerMillisecond() const {
+ if (compaction_events_.size() < kRingBufferMaxSize) return 0.0;
+ intptr_t bytes = 0;
+ double durations = 0.0;
+ CompactionEventBuffer::const_iterator iter = compaction_events_.begin();
+ while (iter != compaction_events_.end()) {
+ bytes += iter->live_bytes_compacted;
+ durations += iter->duration;
+ ++iter;
+ }
+
+ if (durations == 0.0) return 0;
+ // Make sure the result is at least 1.
+ return Max<intptr_t>(static_cast<intptr_t>(bytes / durations + 0.5), 1);
+}
+
+
intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
intptr_t bytes = 0;
double durations = 0.0;
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index de48d23997..c60317f941 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -99,12 +99,26 @@ class GCTracer {
enum ScopeId {
EXTERNAL,
MC_MARK,
+ MC_MARK_FINISH_INCREMENTAL,
+ MC_MARK_PREPARE_CODE_FLUSH,
+ MC_MARK_ROOT,
+ MC_MARK_TOPOPT,
+ MC_MARK_RETAIN_MAPS,
+ MC_MARK_WEAK_CLOSURE,
+ MC_MARK_STRING_TABLE,
+ MC_MARK_WEAK_REFERENCES,
+ MC_MARK_GLOBAL_HANDLES,
+ MC_MARK_CODE_FLUSH,
+ MC_MARK_OPTIMIZED_CODE_MAPS,
+ MC_STORE_BUFFER_CLEAR,
+ MC_SLOTS_BUFFER_CLEAR,
MC_SWEEP,
MC_SWEEP_NEWSPACE,
MC_SWEEP_OLDSPACE,
MC_SWEEP_CODE,
MC_SWEEP_CELL,
MC_SWEEP_MAP,
+ MC_SWEEP_ABORTED,
MC_EVACUATE_PAGES,
MC_UPDATE_NEW_TO_NEW_POINTERS,
MC_UPDATE_ROOT_TO_NEW_POINTERS,
@@ -112,8 +126,7 @@ class GCTracer {
MC_UPDATE_POINTERS_TO_EVACUATED,
MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
MC_UPDATE_MISC_POINTERS,
- MC_INCREMENTAL_WEAKCLOSURE,
- MC_WEAKCLOSURE,
+ MC_INCREMENTAL_FINALIZE,
MC_WEAKCOLLECTION_PROCESS,
MC_WEAKCOLLECTION_CLEAR,
MC_WEAKCOLLECTION_ABORT,
@@ -166,6 +179,18 @@ class GCTracer {
};
+ class CompactionEvent {
+ public:
+ CompactionEvent() : duration(0), live_bytes_compacted(0) {}
+
+ CompactionEvent(double duration, intptr_t live_bytes_compacted)
+ : duration(duration), live_bytes_compacted(live_bytes_compacted) {}
+
+ double duration;
+ intptr_t live_bytes_compacted;
+ };
+
+
class ContextDisposalEvent {
public:
// Default constructor leaves the event uninitialized.
@@ -218,6 +243,9 @@ class GCTracer {
// Timestamp set in the destructor.
double end_time;
+ // Memory reduction flag set.
+ bool reduce_memory;
+
// Size of objects in heap set in constructor.
intptr_t start_object_size;
@@ -299,6 +327,8 @@ class GCTracer {
typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
ContextDisposalEventBuffer;
+ typedef RingBuffer<CompactionEvent, kRingBufferMaxSize> CompactionEventBuffer;
+
typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
static const int kThroughputTimeFrameMs = 5000;
@@ -321,6 +351,8 @@ class GCTracer {
void AddContextDisposalTime(double time);
+ void AddCompactionEvent(double duration, intptr_t live_bytes_compacted);
+
void AddSurvivalRatio(double survival_ratio);
// Log an incremental marking step.
@@ -391,6 +423,10 @@ class GCTracer {
intptr_t ScavengeSpeedInBytesPerMillisecond(
ScavengeSpeedMode mode = kForAllObjects) const;
+ // Compute the average compaction speed in bytes/millisecond.
+ // Returns 0 if not enough events have been recorded.
+ intptr_t CompactionSpeedInBytesPerMillisecond() const;
+
// Compute the average mark-sweep speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
@@ -505,6 +541,9 @@ class GCTracer {
// RingBuffer for context disposal events.
ContextDisposalEventBuffer context_disposal_events_;
+ // RingBuffer for compaction events.
+ CompactionEventBuffer compaction_events_;
+
// RingBuffer for survival events.
SurvivalEventBuffer survival_events_;
@@ -555,7 +594,7 @@ class GCTracer {
DISALLOW_COPY_AND_ASSIGN(GCTracer);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_GC_TRACER_H_
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index cff69b1e17..c6185c6e30 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -11,6 +11,7 @@
#include "src/counters.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/mark-compact.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
#include "src/heap/store-buffer-inl.h"
@@ -19,6 +20,7 @@
#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/type-feedback-vector-inl.h"
namespace v8 {
namespace internal {
@@ -68,6 +70,7 @@ PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#define SYMBOL_ACCESSOR(name, description) \
Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
#define ROOT_ACCESSOR(type, name, camel_name) \
@@ -700,7 +703,7 @@ void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
CHECK((*current)->IsSmi());
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_HEAP_INL_H_
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 89a76062b1..5a135f0b7b 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -52,6 +52,19 @@ struct Heap::StrongRootsList {
StrongRootsList* next;
};
+class IdleScavengeObserver : public InlineAllocationObserver {
+ public:
+ IdleScavengeObserver(Heap& heap, intptr_t step_size)
+ : InlineAllocationObserver(step_size), heap_(heap) {}
+
+ virtual void Step(int bytes_allocated) {
+ heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
+ }
+
+ private:
+ Heap& heap_;
+};
+
Heap::Heap()
: amount_of_external_allocated_memory_(0),
@@ -129,6 +142,7 @@ Heap::Heap()
memory_reducer_(nullptr),
object_stats_(nullptr),
scavenge_job_(nullptr),
+ idle_scavenge_observer_(nullptr),
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
new_space_allocation_counter_(0),
@@ -774,9 +788,9 @@ void Heap::HandleGCRequest() {
current_gc_callback_flags_);
return;
}
- DCHECK(FLAG_overapproximate_weak_closure);
- if (!incremental_marking()->weak_closure_was_overapproximated()) {
- OverApproximateWeakClosure("GC interrupt");
+ DCHECK(FLAG_finalize_marking_incrementally);
+ if (!incremental_marking()->finalize_marking_completed()) {
+ FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
}
}
@@ -786,14 +800,14 @@ void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
}
-void Heap::OverApproximateWeakClosure(const char* gc_reason) {
+void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
- gc_reason);
+ PrintF("[IncrementalMarking] (%s).\n", gc_reason);
}
- GCTracer::Scope gc_scope(tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WEAKCLOSURE);
+ GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
+ HistogramTimerScope incremental_marking_scope(
+ isolate()->counters()->gc_incremental_marking_finalize());
{
GCCallbacksScope scope(this);
@@ -805,7 +819,7 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
}
}
- incremental_marking()->MarkObjectGroups();
+ incremental_marking()->FinalizeIncrementally();
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
@@ -819,6 +833,23 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
}
+HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
+ if (collector == SCAVENGER) {
+ return isolate_->counters()->gc_scavenger();
+ } else {
+ if (!incremental_marking()->IsStopped()) {
+ if (ShouldReduceMemory()) {
+ return isolate_->counters()->gc_finalize_reduce_memory();
+ } else {
+ return isolate_->counters()->gc_finalize();
+ }
+ } else {
+ return isolate_->counters()->gc_compactor();
+ }
+ }
+}
+
+
void Heap::CollectAllGarbage(int flags, const char* gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
@@ -965,9 +996,8 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
GarbageCollectionPrologue();
{
- HistogramTimerScope histogram_timer_scope(
- (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
- : isolate_->counters()->gc_compactor());
+ HistogramTimerScope histogram_timer_scope(GCTypeTimer(collector));
+
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
}
@@ -1075,7 +1105,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
public:
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
@@ -1435,7 +1465,8 @@ void Heap::MarkCompactPrologue() {
class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
public:
explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointers(Object** start, Object** end) {
+
+ void VisitPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
@@ -1485,6 +1516,22 @@ static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
}
+static bool IsUnmodifiedHeapObject(Object** p) {
+ Object* object = *p;
+ DCHECK(object->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (!object->IsJSObject()) return false;
+ Object* obj_constructor = (JSObject::cast(object))->map()->GetConstructor();
+ if (!obj_constructor->IsJSFunction()) return false;
+ JSFunction* constructor = JSFunction::cast(obj_constructor);
+ if (constructor != nullptr &&
+ constructor->initial_map() == heap_object->map()) {
+ return true;
+ }
+ return false;
+}
+
+
void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
StoreBufferEvent event) {
heap->store_buffer_rebuilder_.Callback(page, event);
@@ -1603,6 +1650,12 @@ void Heap::Scavenge() {
promotion_queue_.Initialize();
ScavengeVisitor scavenge_visitor(this);
+
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &IsUnmodifiedHeapObject);
+ }
+
{
// Copy roots.
GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
@@ -1641,7 +1694,14 @@ void Heap::Scavenge() {
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
- {
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnscavengedHeapObject);
+
+ isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
+ &scavenge_visitor);
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ } else {
GCTracer::Scope gc_scope(tracer(),
GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
while (isolate()->global_handles()->IterateObjectGroups(
@@ -1650,14 +1710,14 @@ void Heap::Scavenge() {
}
isolate()->global_handles()->RemoveObjectGroups();
isolate()->global_handles()->RemoveImplicitRefGroups();
- }
- isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
- &IsUnscavengedHeapObject);
+ isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
+ &IsUnscavengedHeapObject);
- isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
- &scavenge_visitor);
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
+ &scavenge_visitor);
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ }
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -1674,8 +1734,9 @@ void Heap::Scavenge() {
// Set age mark.
new_space_.set_age_mark(new_space_.top());
- new_space_.LowerInlineAllocationLimit(
- new_space_.inline_allocation_limit_step());
+ // We start a new step without accounting the objects copied into to space
+ // as those are not allocations.
+ new_space_.UpdateInlineAllocationLimitStep();
array_buffer_tracker()->FreeDead(true);
@@ -2614,11 +2675,12 @@ void Heap::CreateInitialObjects() {
{
HandleScope scope(isolate());
-#define SYMBOL_INIT(name) \
- { \
- Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
- Handle<Object> symbol(isolate()->factory()->NewPrivateSymbol(name##d)); \
- roots_[k##name##RootIndex] = *symbol; \
+#define SYMBOL_INIT(name) \
+ { \
+ Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
+ Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
+ symbol->set_name(*name##d); \
+ roots_[k##name##RootIndex] = *symbol; \
}
PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
#undef SYMBOL_INIT
@@ -2633,6 +2695,15 @@ void Heap::CreateInitialObjects() {
roots_[k##name##RootIndex] = *name;
PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
#undef SYMBOL_INIT
+
+#define SYMBOL_INIT(name, description) \
+ Handle<Symbol> name = factory->NewSymbol(); \
+ Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
+ name->set_is_well_known_symbol(true); \
+ name->set_name(*name##d); \
+ roots_[k##name##RootIndex] = *name;
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
}
CreateFixedStubs();
@@ -2686,18 +2757,32 @@ void Heap::CreateInitialObjects() {
set_microtask_queue(empty_fixed_array());
{
- FeedbackVectorSlotKind kinds[] = {FeedbackVectorSlotKind::LOAD_IC,
- FeedbackVectorSlotKind::KEYED_LOAD_IC,
- FeedbackVectorSlotKind::STORE_IC,
- FeedbackVectorSlotKind::KEYED_STORE_IC};
- StaticFeedbackVectorSpec spec(0, 4, kinds);
+ StaticFeedbackVectorSpec spec;
+ FeedbackVectorSlot load_ic_slot = spec.AddLoadICSlot();
+ FeedbackVectorSlot keyed_load_ic_slot = spec.AddKeyedLoadICSlot();
+ FeedbackVectorSlot store_ic_slot = spec.AddStoreICSlot();
+ FeedbackVectorSlot keyed_store_ic_slot = spec.AddKeyedStoreICSlot();
+
+ DCHECK_EQ(load_ic_slot,
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
+ DCHECK_EQ(keyed_load_ic_slot,
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ DCHECK_EQ(store_ic_slot,
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
+ DCHECK_EQ(keyed_store_ic_slot,
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+
+ Handle<TypeFeedbackMetadata> dummy_metadata =
+ TypeFeedbackMetadata::New(isolate(), &spec);
Handle<TypeFeedbackVector> dummy_vector =
- factory->NewTypeFeedbackVector(&spec);
- for (int i = 0; i < 4; i++) {
- dummy_vector->Set(FeedbackVectorICSlot(0),
- *TypeFeedbackVector::MegamorphicSentinel(isolate()),
- SKIP_WRITE_BARRIER);
- }
+ TypeFeedbackVector::New(isolate(), dummy_metadata);
+
+ Object* megamorphic = *TypeFeedbackVector::MegamorphicSentinel(isolate());
+ dummy_vector->Set(load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+ dummy_vector->Set(keyed_load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+ dummy_vector->Set(store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+ dummy_vector->Set(keyed_store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+
set_dummy_vector(*dummy_vector);
}
@@ -2735,6 +2820,8 @@ void Heap::CreateInitialObjects() {
set_weak_stack_trace_list(Smi::FromInt(0));
+ set_noscript_shared_function_infos(Smi::FromInt(0));
+
// Will be filled in by Interpreter::Initialize().
set_interpreter_table(
*interpreter::Interpreter::CreateUninitializedInterpreterTable(
@@ -2777,6 +2864,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kDetachedContextsRootIndex:
case kWeakObjectToCodeTableRootIndex:
case kRetainedMapsRootIndex:
+ case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
@@ -2977,6 +3065,7 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
filler->set_map_no_write_barrier(
reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
} else {
+ DCHECK_GT(size, 2 * kPointerSize);
filler->set_map_no_write_barrier(
reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
FreeSpace::cast(filler)->nobarrier_set_size(size);
@@ -3365,7 +3454,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
if (constructor->IsJSFunction() &&
JSFunction::cast(constructor)->IsInobjectSlackTrackingInProgress()) {
// We might want to shrink the object later.
- DCHECK(obj->GetInternalFieldCount() == 0);
+ DCHECK_EQ(0, obj->GetInternalFieldCount());
filler = Heap::one_pointer_filler_map();
} else {
filler = Heap::undefined_value();
@@ -3383,7 +3472,6 @@ AllocationResult Heap::AllocateJSObjectFromMap(
// Both types of global objects should be allocated using
// AllocateGlobalObject to be properly initialized.
DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
FixedArray* properties = empty_fixed_array();
@@ -3412,7 +3500,7 @@ AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
#ifdef DEBUG
// Make sure result is NOT a global object if valid.
HeapObject* obj = nullptr;
- DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
+ DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
#endif
return allocation;
}
@@ -3999,11 +4087,12 @@ void Heap::ReduceNewSpaceSize() {
void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
- if (FLAG_overapproximate_weak_closure && incremental_marking()->IsMarking() &&
+ if (FLAG_finalize_marking_incrementally &&
+ incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
- (!incremental_marking()->weak_closure_was_overapproximated() &&
+ (!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_deque()->IsEmpty()))) {
- OverApproximateWeakClosure(comment);
+ FinalizeIncrementalMarking(comment);
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_deque()->IsEmpty())) {
CollectAllGarbage(current_gc_flags_, comment);
@@ -4016,14 +4105,14 @@ bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
size_t final_incremental_mark_compact_speed_in_bytes_per_ms =
static_cast<size_t>(
tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
- if (FLAG_overapproximate_weak_closure &&
+ if (FLAG_finalize_marking_incrementally &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
- (!incremental_marking()->weak_closure_was_overapproximated() &&
+ (!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_deque()->IsEmpty() &&
gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
static_cast<size_t>(idle_time_in_ms))))) {
- OverApproximateWeakClosure(
- "Idle notification: overapproximate weak closure");
+ FinalizeIncrementalMarking(
+ "Idle notification: finalize incremental marking");
return true;
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_deque()->IsEmpty() &&
@@ -4031,7 +4120,7 @@ bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
static_cast<size_t>(idle_time_in_ms), size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
CollectAllGarbage(current_gc_flags_,
- "idle notification: finalize incremental");
+ "idle notification: finalize incremental marking");
return true;
}
return false;
@@ -4136,22 +4225,6 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
}
-void Heap::CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
- double now_ms) {
- if (idle_time_in_ms >= GCIdleTimeHandler::kMinBackgroundIdleTime) {
- MemoryReducer::Event event;
- event.type = MemoryReducer::kBackgroundIdleNotification;
- event.time_ms = now_ms;
- event.can_start_incremental_gc = incremental_marking()->IsStopped() &&
- incremental_marking()->CanBeActivated();
- memory_reducer_->NotifyBackgroundIdleNotification(event);
- optimize_for_memory_usage_ = true;
- } else {
- optimize_for_memory_usage_ = false;
- }
-}
-
-
double Heap::MonotonicallyIncreasingTimeInMs() {
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
static_cast<double>(base::Time::kMillisecondsPerSecond);
@@ -4176,8 +4249,6 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
double start_ms = MonotonicallyIncreasingTimeInMs();
double idle_time_in_ms = deadline_in_ms - start_ms;
- CheckAndNotifyBackgroundIdleNotification(idle_time_in_ms, start_ms);
-
tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
OldGenerationAllocationCounter());
@@ -4309,11 +4380,16 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
bool Heap::RootIsImmortalImmovable(int root_index) {
switch (root_index) {
-#define CASE(name) \
- case Heap::k##name##RootIndex: \
+#define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
+ IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
+#undef IMMORTAL_IMMOVABLE_ROOT
+#define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
+ INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
+#undef INTERNALIZED_STRING
+#define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
+ STRING_TYPE_LIST(STRING_TYPE)
+#undef STRING_TYPE
return true;
- IMMORTAL_IMMOVABLE_ROOT_LIST(CASE);
-#undef CASE
default:
return false;
}
@@ -4714,7 +4790,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
// We rely on being able to allocate new arrays in paged spaces.
DCHECK(Page::kMaxRegularHeapObjectSize >=
(JSArray::kSize +
- FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
+ FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
code_range_size_ = code_range_size * MB;
@@ -4785,7 +4861,11 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
if (stats->js_stacktrace != NULL) {
FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
- isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
+ if (gc_state() == Heap::NOT_IN_GC) {
+ isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
+ } else {
+ accumulator.Add("Cannot get stack trace in GC.");
+ }
}
}
@@ -4967,17 +5047,6 @@ void Heap::DisableInlineAllocation() {
}
-void Heap::LowerInlineAllocationLimit(intptr_t step) {
- new_space()->LowerInlineAllocationLimit(step);
-}
-
-
-void Heap::ResetInlineAllocationLimit() {
- new_space()->LowerInlineAllocationLimit(
- ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
-}
-
-
V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
@@ -5086,7 +5155,9 @@ bool Heap::SetUp() {
mark_compact_collector()->SetUp();
- ResetInlineAllocationLimit();
+ idle_scavenge_observer_ = new IdleScavengeObserver(
+ *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
+ new_space()->AddInlineAllocationObserver(idle_scavenge_observer_);
return true;
}
@@ -5185,6 +5256,10 @@ void Heap::TearDown() {
PrintAlloctionsHash();
}
+ new_space()->RemoveInlineAllocationObserver(idle_scavenge_observer_);
+ delete idle_scavenge_observer_;
+ idle_scavenge_observer_ = nullptr;
+
delete scavenge_collector_;
scavenge_collector_ = nullptr;
@@ -5343,7 +5418,7 @@ void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
class PrintHandleVisitor : public ObjectVisitor {
public:
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++)
PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
reinterpret_cast<void*>(*p));
@@ -5362,10 +5437,10 @@ void Heap::PrintHandles() {
class CheckHandleCountVisitor : public ObjectVisitor {
public:
CheckHandleCountVisitor() : handle_count_(0) {}
- ~CheckHandleCountVisitor() {
+ ~CheckHandleCountVisitor() override {
CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
handle_count_ += end - start;
}
@@ -5512,7 +5587,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
public:
MarkingVisitor() : marking_stack_(10) {}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
@@ -5621,7 +5696,8 @@ Object* const PathTracer::kAnyGlobalObject = NULL;
class PathTracer::MarkVisitor : public ObjectVisitor {
public:
explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
- void VisitPointers(Object** start, Object** end) {
+
+ void VisitPointers(Object** start, Object** end) override {
// Scan all HeapObject pointers in [start, end)
for (Object** p = start; !tracer_->found() && (p < end); p++) {
if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
@@ -5636,7 +5712,8 @@ class PathTracer::MarkVisitor : public ObjectVisitor {
class PathTracer::UnmarkVisitor : public ObjectVisitor {
public:
explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
- void VisitPointers(Object** start, Object** end) {
+
+ void VisitPointers(Object** start, Object** end) override {
// Scan all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index cb18ab5611..92f0ded036 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -178,7 +178,7 @@ namespace internal {
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
- V(FixedArray, dummy_vector, DummyVector) \
+ V(TypeFeedbackVector, dummy_vector, DummyVector) \
V(FixedArray, detached_contexts, DetachedContexts) \
V(ArrayList, retained_maps, RetainedMaps) \
V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
@@ -187,6 +187,7 @@ namespace internal {
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, code_stub_context, CodeStubContext) \
V(JSObject, code_stub_exports_object, CodeStubExportsObject) \
+ V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, interpreter_table, InterpreterTable) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
@@ -208,102 +209,106 @@ namespace internal {
SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
-#define INTERNALIZED_STRING_LIST(V) \
- V(Object_string, "Object") \
- V(proto_string, "__proto__") \
- V(arguments_string, "arguments") \
- V(Arguments_string, "Arguments") \
- V(caller_string, "caller") \
- V(boolean_string, "boolean") \
- V(Boolean_string, "Boolean") \
- V(callee_string, "callee") \
- V(constructor_string, "constructor") \
- V(default_string, "default") \
- V(dot_result_string, ".result") \
- V(eval_string, "eval") \
- V(float32x4_string, "float32x4") \
- V(Float32x4_string, "Float32x4") \
- V(int32x4_string, "int32x4") \
- V(Int32x4_string, "Int32x4") \
- V(uint32x4_string, "uint32x4") \
- V(Uint32x4_string, "Uint32x4") \
- V(bool32x4_string, "bool32x4") \
- V(Bool32x4_string, "Bool32x4") \
- V(int16x8_string, "int16x8") \
- V(Int16x8_string, "Int16x8") \
- V(uint16x8_string, "uint16x8") \
- V(Uint16x8_string, "Uint16x8") \
- V(bool16x8_string, "bool16x8") \
- V(Bool16x8_string, "Bool16x8") \
- V(int8x16_string, "int8x16") \
- V(Int8x16_string, "Int8x16") \
- V(uint8x16_string, "uint8x16") \
- V(Uint8x16_string, "Uint8x16") \
- V(bool8x16_string, "bool8x16") \
- V(Bool8x16_string, "Bool8x16") \
- V(function_string, "function") \
- V(Function_string, "Function") \
- V(length_string, "length") \
- V(name_string, "name") \
- V(null_string, "null") \
- V(number_string, "number") \
- V(Number_string, "Number") \
- V(nan_string, "NaN") \
- V(source_string, "source") \
- V(source_url_string, "source_url") \
- V(source_mapping_url_string, "source_mapping_url") \
- V(this_string, "this") \
- V(global_string, "global") \
- V(ignore_case_string, "ignoreCase") \
- V(multiline_string, "multiline") \
- V(sticky_string, "sticky") \
- V(unicode_string, "unicode") \
- V(harmony_tolength_string, "harmony_tolength") \
- V(input_string, "input") \
- V(index_string, "index") \
- V(last_index_string, "lastIndex") \
- V(object_string, "object") \
- V(prototype_string, "prototype") \
- V(string_string, "string") \
- V(String_string, "String") \
- V(symbol_string, "symbol") \
- V(Symbol_string, "Symbol") \
- V(Map_string, "Map") \
- V(Set_string, "Set") \
- V(WeakMap_string, "WeakMap") \
- V(WeakSet_string, "WeakSet") \
- V(for_string, "for") \
- V(for_api_string, "for_api") \
- V(Date_string, "Date") \
- V(char_at_string, "CharAt") \
- V(undefined_string, "undefined") \
- V(valueOf_string, "valueOf") \
- V(stack_string, "stack") \
- V(toString_string, "toString") \
- V(toJSON_string, "toJSON") \
- V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(illegal_access_string, "illegal access") \
- V(cell_value_string, "%cell_value") \
- V(illegal_argument_string, "illegal argument") \
- V(closure_string, "(closure)") \
- V(dot_string, ".") \
- V(compare_ic_string, "==") \
- V(strict_compare_ic_string, "===") \
- V(infinity_string, "Infinity") \
- V(minus_infinity_string, "-Infinity") \
- V(query_colon_string, "(?:)") \
- V(Generator_string, "Generator") \
- V(throw_string, "throw") \
- V(done_string, "done") \
- V(value_string, "value") \
- V(next_string, "next") \
- V(byte_length_string, "byteLength") \
- V(byte_offset_string, "byteOffset") \
- V(minus_zero_string, "-0") \
- V(Array_string, "Array") \
- V(Error_string, "Error") \
- V(RegExp_string, "RegExp")
+#define INTERNALIZED_STRING_LIST(V) \
+ V(anonymous_string, "anonymous") \
+ V(arguments_string, "arguments") \
+ V(Arguments_string, "Arguments") \
+ V(Array_string, "Array") \
+ V(bool16x8_string, "bool16x8") \
+ V(Bool16x8_string, "Bool16x8") \
+ V(bool32x4_string, "bool32x4") \
+ V(Bool32x4_string, "Bool32x4") \
+ V(bool8x16_string, "bool8x16") \
+ V(Bool8x16_string, "Bool8x16") \
+ V(boolean_string, "boolean") \
+ V(Boolean_string, "Boolean") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(callee_string, "callee") \
+ V(caller_string, "caller") \
+ V(cell_value_string, "%cell_value") \
+ V(char_at_string, "CharAt") \
+ V(closure_string, "(closure)") \
+ V(compare_ic_string, "==") \
+ V(configurable_string, "configurable") \
+ V(constructor_string, "constructor") \
+ V(Date_string, "Date") \
+ V(default_string, "default") \
+ V(done_string, "done") \
+ V(dot_result_string, ".result") \
+ V(dot_string, ".") \
+ V(enumerable_string, "enumerable") \
+ V(Error_string, "Error") \
+ V(eval_string, "eval") \
+ V(false_string, "false") \
+ V(float32x4_string, "float32x4") \
+ V(Float32x4_string, "Float32x4") \
+ V(for_api_string, "for_api") \
+ V(for_string, "for") \
+ V(function_string, "function") \
+ V(Function_string, "Function") \
+ V(Generator_string, "Generator") \
+ V(get_string, "get") \
+ V(global_string, "global") \
+ V(illegal_access_string, "illegal access") \
+ V(illegal_argument_string, "illegal argument") \
+ V(index_string, "index") \
+ V(infinity_string, "Infinity") \
+ V(input_string, "input") \
+ V(int16x8_string, "int16x8") \
+ V(Int16x8_string, "Int16x8") \
+ V(int32x4_string, "int32x4") \
+ V(Int32x4_string, "Int32x4") \
+ V(int8x16_string, "int8x16") \
+ V(Int8x16_string, "Int8x16") \
+ V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
+ V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
+ V(last_index_string, "lastIndex") \
+ V(length_string, "length") \
+ V(Map_string, "Map") \
+ V(minus_infinity_string, "-Infinity") \
+ V(minus_zero_string, "-0") \
+ V(name_string, "name") \
+ V(nan_string, "NaN") \
+ V(next_string, "next") \
+ V(null_string, "null") \
+ V(number_string, "number") \
+ V(Number_string, "Number") \
+ V(object_string, "object") \
+ V(Object_string, "Object") \
+ V(private_api_string, "private_api") \
+ V(proto_string, "__proto__") \
+ V(prototype_string, "prototype") \
+ V(query_colon_string, "(?:)") \
+ V(RegExp_string, "RegExp") \
+ V(set_string, "set") \
+ V(Set_string, "Set") \
+ V(source_mapping_url_string, "source_mapping_url") \
+ V(source_string, "source") \
+ V(source_url_string, "source_url") \
+ V(stack_string, "stack") \
+ V(strict_compare_ic_string, "===") \
+ V(string_string, "string") \
+ V(String_string, "String") \
+ V(symbol_string, "symbol") \
+ V(Symbol_string, "Symbol") \
+ V(this_string, "this") \
+ V(throw_string, "throw") \
+ V(toJSON_string, "toJSON") \
+ V(toString_string, "toString") \
+ V(true_string, "true") \
+ V(uint16x8_string, "uint16x8") \
+ V(Uint16x8_string, "Uint16x8") \
+ V(uint32x4_string, "uint32x4") \
+ V(Uint32x4_string, "Uint32x4") \
+ V(uint8x16_string, "uint8x16") \
+ V(Uint8x16_string, "Uint8x16") \
+ V(undefined_string, "undefined") \
+ V(valueOf_string, "valueOf") \
+ V(value_string, "value") \
+ V(WeakMap_string, "WeakMap") \
+ V(WeakSet_string, "WeakSet") \
+ V(writable_string, "writable")
#define PRIVATE_SYMBOL_LIST(V) \
V(array_iteration_kind_symbol) \
@@ -331,8 +336,10 @@ namespace internal {
V(nonexistent_symbol) \
V(nonextensible_symbol) \
V(normal_ic_symbol) \
+ V(not_mapped_symbol) \
V(observed_symbol) \
V(premonomorphic_symbol) \
+ V(promise_combined_deferred_symbol) \
V(promise_debug_marker_symbol) \
V(promise_has_handler_symbol) \
V(promise_on_resolve_symbol) \
@@ -346,15 +353,24 @@ namespace internal {
V(string_iterator_next_index_symbol) \
V(uninitialized_symbol)
-#define PUBLIC_SYMBOL_LIST(V) \
- V(has_instance_symbol, Symbol.hasInstance) \
- V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
- V(is_regexp_symbol, Symbol.isRegExp) \
- V(iterator_symbol, Symbol.iterator) \
- V(to_primitive_symbol, Symbol.toPrimitive) \
- V(to_string_tag_symbol, Symbol.toStringTag) \
+#define PUBLIC_SYMBOL_LIST(V) \
+ V(has_instance_symbol, Symbol.hasInstance) \
+ V(iterator_symbol, Symbol.iterator) \
+ V(match_symbol, Symbol.match) \
+ V(replace_symbol, Symbol.replace) \
+ V(search_symbol, Symbol.search) \
+ V(split_symbol, Symbol.split) \
+ V(to_primitive_symbol, Symbol.toPrimitive) \
V(unscopables_symbol, Symbol.unscopables)
+// Well-Known Symbols are "Public" symbols, which have a bit set which causes
+// them to produce an undefined value when a load results in a failed access
+// check. Because this behaviour is not specified properly as of yet, it only
+// applies to a subset of spec-defined Well-Known Symbols.
+#define WELL_KNOWN_SYMBOL_LIST(V) \
+ V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
+ V(to_string_tag_symbol, Symbol.toStringTag)
+
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers. This list is not complete and has omissions.
#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
@@ -428,6 +444,7 @@ class GCIdleTimeHeapState;
class GCTracer;
class HeapObjectsFilter;
class HeapStats;
+class HistogramTimer;
class Isolate;
class MemoryReducer;
class ObjectStats;
@@ -565,13 +582,14 @@ class Heap {
#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
#undef SYMBOL_INDEX_DECLARATION
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
+ STRUCT_LIST(DECLARE_STRUCT_MAP)
#undef DECLARE_STRUCT_MAP
- kStringTableRootIndex,
+ kStringTableRootIndex,
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
@@ -605,25 +623,6 @@ class Heap {
Heap* heap_;
};
- // An optional version of the above lock that can be used for some critical
- // sections on the mutator thread; only safe since the GC currently does not
- // do concurrent compaction.
- class OptionalRelocationLock {
- public:
- OptionalRelocationLock(Heap* heap, bool concurrent)
- : heap_(heap), concurrent_(concurrent) {
- if (concurrent_) heap_->relocation_mutex_.Lock();
- }
-
- ~OptionalRelocationLock() {
- if (concurrent_) heap_->relocation_mutex_.Unlock();
- }
-
- private:
- Heap* heap_;
- bool concurrent_;
- };
-
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
struct Chunk {
@@ -1015,6 +1014,8 @@ class Heap {
bool HasHighFragmentation();
bool HasHighFragmentation(intptr_t used, intptr_t committed);
+ void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
+ void SetOptimizeForMemoryUsage() { optimize_for_memory_usage_ = true; }
bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
// ===========================================================================
@@ -1125,6 +1126,7 @@ class Heap {
#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
Object* root(RootListIndex index) { return roots_[index]; }
@@ -1165,6 +1167,10 @@ class Heap {
roots_[kStringTableRootIndex] = value;
}
+ void SetRootNoScriptSharedFunctionInfos(Object* value) {
+ roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
+ }
+
// Set the stack limit in the roots_ array. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
@@ -1792,8 +1798,6 @@ class Heap {
void IdleNotificationEpilogue(GCIdleTimeAction action,
GCIdleTimeHeapState heap_state, double start_ms,
double deadline_in_ms);
- void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
- double now_ms);
inline void UpdateAllocationsHash(HeapObject* object);
inline void UpdateAllocationsHash(uint32_t value);
@@ -1806,7 +1810,15 @@ class Heap {
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
// objects that die later.
- void OverApproximateWeakClosure(const char* gc_reason);
+ void FinalizeIncrementalMarking(const char* gc_reason);
+
+ // Returns the timer used for a given GC type.
+ // - GCScavenger: young generation GC
+ // - GCCompactor: full GC
+ // - GCFinalzeMC: finalization of incremental full GC
+ // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
+ // memory reduction
+ HistogramTimer* GCTypeTimer(GarbageCollector collector);
// ===========================================================================
// Actual GC. ================================================================
@@ -1884,13 +1896,6 @@ class Heap {
double mutator_speed);
// ===========================================================================
- // Inline allocation. ========================================================
- // ===========================================================================
-
- void LowerInlineAllocationLimit(intptr_t step);
- void ResetInlineAllocationLimit();
-
- // ===========================================================================
// Idle notification. ========================================================
// ===========================================================================
@@ -2277,6 +2282,8 @@ class Heap {
ScavengeJob* scavenge_job_;
+ InlineAllocationObserver* idle_scavenge_observer_;
+
// These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_;
size_t crankshaft_codegen_bytes_generated_;
@@ -2347,6 +2354,7 @@ class Heap {
friend class GCCallbacksScope;
friend class GCTracer;
friend class HeapIterator;
+ friend class IdleScavengeObserver;
friend class IncrementalMarking;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
@@ -2417,14 +2425,14 @@ class AlwaysAllocateScope {
// objects in a heap space but above the allocation pointer.
class VerifyPointersVisitor : public ObjectVisitor {
public:
- inline void VisitPointers(Object** start, Object** end);
+ inline void VisitPointers(Object** start, Object** end) override;
};
// Verify that all objects are Smis.
class VerifySmisVisitor : public ObjectVisitor {
public:
- inline void VisitPointers(Object** start, Object** end);
+ inline void VisitPointers(Object** start, Object** end) override;
};
@@ -2685,7 +2693,7 @@ class PathTracer : public ObjectVisitor {
object_stack_(20),
no_allocation() {}
- virtual void VisitPointers(Object** start, Object** end);
+ void VisitPointers(Object** start, Object** end) override;
void Reset();
void TracePathFrom(Object** root);
@@ -2715,7 +2723,7 @@ class PathTracer : public ObjectVisitor {
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
};
#endif // DEBUG
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_HEAP_H_
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 5988426fd5..0d55b83a9d 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -6,42 +6,11 @@
#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
#include "src/heap/incremental-marking.h"
-#include "src/heap/mark-compact.h"
namespace v8 {
namespace internal {
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot,
- Object* value) {
- HeapObject* value_heap_obj = HeapObject::cast(value);
- MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- if (chunk->IsLeftOfProgressBar(slot)) {
- WhiteToGreyAndPush(value_heap_obj, value_bit);
- RestartIfNotMarking();
- } else {
- return false;
- }
- } else {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- return false;
- }
- } else {
- return false;
- }
- }
- if (!is_compacting_) return false;
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- return Marking::IsBlack(obj_bit);
-}
-
-
void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
Object* value) {
if (IsMarking() && value->IsHeapObject()) {
@@ -52,7 +21,9 @@ void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value) {
- if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+ if (IsMarking()) {
+ RecordWriteOfCodeEntrySlow(host, slot, value);
+ }
}
@@ -64,57 +35,7 @@ void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
}
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
- if (IsMarking()) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- chunk->set_progress_bar(0);
- }
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
- }
-}
-
-
-void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
- MarkBit mark_bit) {
- DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
- DCHECK(obj->Size() >= 2 * kPointerSize);
- DCHECK(IsMarking());
- Marking::BlackToGrey(mark_bit);
- int obj_size = obj->Size();
- MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
- bytes_scanned_ -= obj_size;
- int64_t old_bytes_rescanned = bytes_rescanned_;
- bytes_rescanned_ = old_bytes_rescanned + obj_size;
- if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
- if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
- // If we have queued twice the heap size for rescanning then we are
- // going around in circles, scanning the same objects again and again
- // as the program mutates the heap faster than we can incrementally
- // trace it. In this case we switch to non-incremental marking in
- // order to finish off this marking phase.
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(
- heap()->isolate(),
- "Hurrying incremental marking because of lack of progress\n");
- }
- marking_speed_ = kMaxMarkingSpeed;
- }
- }
-
- heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
-}
-
-
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
- Marking::WhiteToGrey(mark_bit);
- heap_->mark_compact_collector()->marking_deque()->Push(obj);
-}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_INCREMENTAL_MARKING_INL_H_
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 43e8b7628f..7e8e5f251f 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -53,7 +53,7 @@ void IncrementalMarkingJob::ScheduleIdleTask(Heap* heap) {
void IncrementalMarkingJob::ScheduleDelayedTask(Heap* heap) {
- if (!delayed_task_pending_) {
+ if (!delayed_task_pending_ && FLAG_memory_reducer) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
delayed_task_pending_ = true;
made_progress_since_last_delayed_task_ = false;
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index fad46c1246..c998139a92 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -75,7 +75,7 @@ class IncrementalMarkingJob {
bool delayed_task_pending_;
bool made_progress_since_last_delayed_task_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_INCREMENTAL_MARKING_JOB_H_
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index cbc26516bb..94d8d946f1 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -17,7 +17,6 @@
namespace v8 {
namespace internal {
-
IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_MARKING,
@@ -27,6 +26,7 @@ IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
+ observer_(*this, kAllocatedThreshold),
state_(STOPPED),
is_compacting_(false),
steps_count_(0),
@@ -42,20 +42,47 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
no_marking_scope_depth_(0),
unscanned_bytes_of_large_object_(0),
was_activated_(false),
- weak_closure_was_overapproximated_(false),
- weak_closure_approximation_rounds_(0),
+ finalize_marking_completed_(false),
+ incremental_marking_finalization_rounds_(0),
request_type_(COMPLETE_MARKING) {}
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot,
Object* value) {
- if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
+ HeapObject* value_heap_obj = HeapObject::cast(value);
+ MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
+ if (Marking::IsWhite(value_bit)) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
- // Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ if (chunk->IsLeftOfProgressBar(slot)) {
+ WhiteToGreyAndPush(value_heap_obj, value_bit);
+ RestartIfNotMarking();
+ } else {
+ return false;
+ }
+ } else {
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
+ return false;
+ }
+ } else {
+ return false;
}
}
+ if (!is_compacting_) return false;
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ return Marking::IsBlack(obj_bit);
+}
+
+
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
+ Object* value) {
+ if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
+ // Object is not going to be rescanned we need to record the slot.
+ heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
+ }
}
@@ -134,6 +161,58 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
}
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+ if (IsMarking()) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ chunk->set_progress_bar(0);
+ }
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
+ }
+ }
+}
+
+
+void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
+ MarkBit mark_bit) {
+ DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+ DCHECK(obj->Size() >= 2 * kPointerSize);
+ DCHECK(IsMarking());
+ Marking::BlackToGrey(mark_bit);
+ int obj_size = obj->Size();
+ MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
+ bytes_scanned_ -= obj_size;
+ int64_t old_bytes_rescanned = bytes_rescanned_;
+ bytes_rescanned_ = old_bytes_rescanned + obj_size;
+ if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
+ if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
+ // If we have queued twice the heap size for rescanning then we are
+ // going around in circles, scanning the same objects again and again
+ // as the program mutates the heap faster than we can incrementally
+ // trace it. In this case we switch to non-incremental marking in
+ // order to finish off this marking phase.
+ if (FLAG_trace_incremental_marking) {
+ PrintIsolate(
+ heap()->isolate(),
+ "Hurrying incremental marking because of lack of progress\n");
+ }
+ marking_speed_ = kMaxMarkingSpeed;
+ }
+ }
+
+ heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
+}
+
+
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
+ Marking::WhiteToGrey(mark_bit);
+ heap_->mark_compact_collector()->marking_deque()->Push(obj);
+}
+
+
static void MarkObjectGreyDoNotEnqueue(Object* obj) {
if (obj->IsHeapObject()) {
HeapObject* heap_obj = HeapObject::cast(obj);
@@ -270,9 +349,9 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
IncrementalMarking* incremental_marking)
: heap_(incremental_marking->heap()) {}
- void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+ void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
@@ -474,6 +553,8 @@ void IncrementalMarking::Start(const char* reason) {
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
DCHECK(!heap_->isolate()->serializer_enabled());
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking_start());
ResetStepCounters();
was_activated_ = true;
@@ -487,7 +568,8 @@ void IncrementalMarking::Start(const char* reason) {
state_ = SWEEPING;
}
- heap_->LowerInlineAllocationLimit(kAllocatedThreshold);
+ heap_->new_space()->AddInlineAllocationObserver(&observer_);
+
incremental_marking_job()->Start(heap_);
}
@@ -541,33 +623,56 @@ void IncrementalMarking::StartMarking() {
}
-void IncrementalMarking::MarkObjectGroups() {
- DCHECK(FLAG_overapproximate_weak_closure);
- DCHECK(!weak_closure_was_overapproximated_);
+void IncrementalMarking::MarkRoots() {
+ DCHECK(FLAG_finalize_marking_incrementally);
+ DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
- int old_marking_deque_top =
- heap_->mark_compact_collector()->marking_deque()->top();
+ IncrementalMarkingRootMarkingVisitor visitor(this);
+ heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+}
- heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
+
+void IncrementalMarking::MarkObjectGroups() {
+ DCHECK(FLAG_finalize_marking_incrementally);
+ DCHECK(!finalize_marking_completed_);
+ DCHECK(IsMarking());
IncrementalMarkingRootMarkingVisitor visitor(this);
+ heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
heap_->isolate()->global_handles()->IterateObjectGroups(
&visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
+ heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
+ heap_->isolate()->global_handles()->RemoveObjectGroups();
+}
+
+
+void IncrementalMarking::FinalizeIncrementally() {
+ DCHECK(FLAG_finalize_marking_incrementally);
+ DCHECK(!finalize_marking_completed_);
+ DCHECK(IsMarking());
+
+ int old_marking_deque_top =
+ heap_->mark_compact_collector()->marking_deque()->top();
+
+ // After finishing incremental marking, we try to discover all unmarked
+ // objects to reduce the marking load in the final pause.
+ // 1) We scan and mark the roots again to find all changes to the root set.
+ // 2) We mark the object groups.
+ MarkRoots();
+ MarkObjectGroups();
int marking_progress =
abs(old_marking_deque_top -
heap_->mark_compact_collector()->marking_deque()->top());
- ++weak_closure_approximation_rounds_;
- if ((weak_closure_approximation_rounds_ >=
- FLAG_max_object_groups_marking_rounds) ||
- (marking_progress < FLAG_min_progress_during_object_groups_marking)) {
- weak_closure_was_overapproximated_ = true;
+ ++incremental_marking_finalization_rounds_;
+ if ((incremental_marking_finalization_rounds_ >=
+ FLAG_max_incremental_marking_finalization_rounds) ||
+ (marking_progress <
+ FLAG_min_progress_during_incremental_marking_finalization)) {
+ finalize_marking_completed_ = true;
}
-
- heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
- heap_->isolate()->global_handles()->RemoveObjectGroups();
}
@@ -739,7 +844,8 @@ void IncrementalMarking::Stop() {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Stopping.\n");
}
- heap_->ResetInlineAllocationLimit();
+
+ heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
if (IsMarking()) {
@@ -767,7 +873,8 @@ void IncrementalMarking::Finalize() {
Hurry();
state_ = STOPPED;
is_compacting_ = false;
- heap_->ResetInlineAllocationLimit();
+
+ heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
PatchIncrementalMarkingRecordWriteStubs(heap_,
@@ -778,13 +885,15 @@ void IncrementalMarking::Finalize() {
}
-void IncrementalMarking::OverApproximateWeakClosure(CompletionAction action) {
- DCHECK(FLAG_overapproximate_weak_closure);
- DCHECK(!weak_closure_was_overapproximated_);
+void IncrementalMarking::FinalizeMarking(CompletionAction action) {
+ DCHECK(FLAG_finalize_marking_incrementally);
+ DCHECK(!finalize_marking_completed_);
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] requesting weak closure overapproximation.\n");
+ PrintF(
+ "[IncrementalMarking] requesting finalization of incremental "
+ "marking.\n");
}
- request_type_ = OVERAPPROXIMATION;
+ request_type_ = FINALIZATION;
if (action == GC_VIA_STACK_GUARD) {
heap_->isolate()->stack_guard()->RequestGC();
}
@@ -811,8 +920,8 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
void IncrementalMarking::Epilogue() {
was_activated_ = false;
- weak_closure_was_overapproximated_ = false;
- weak_closure_approximation_rounds_ = 0;
+ finalize_marking_completed_ = false;
+ incremental_marking_finalization_rounds_ = 0;
}
@@ -990,9 +1099,9 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
if (completion == FORCE_COMPLETION ||
IsIdleMarkingDelayCounterLimitReached()) {
- if (FLAG_overapproximate_weak_closure &&
- !weak_closure_was_overapproximated_) {
- OverApproximateWeakClosure(action);
+ if (FLAG_finalize_marking_incrementally &&
+ !finalize_marking_completed_) {
+ FinalizeMarking(action);
} else {
MarkingComplete(action);
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 010392875e..3ab0f8d6c4 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -8,6 +8,7 @@
#include "src/cancelable-task.h"
#include "src/execution.h"
#include "src/heap/incremental-marking-job.h"
+#include "src/heap/spaces.h"
#include "src/objects.h"
namespace v8 {
@@ -27,7 +28,7 @@ class IncrementalMarking {
enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
- enum GCRequestType { COMPLETE_MARKING, OVERAPPROXIMATION };
+ enum GCRequestType { COMPLETE_MARKING, FINALIZATION };
struct StepActions {
StepActions(CompletionAction complete_action_,
@@ -56,12 +57,12 @@ class IncrementalMarking {
bool should_hurry() { return should_hurry_; }
void set_should_hurry(bool val) { should_hurry_ = val; }
- bool weak_closure_was_overapproximated() const {
- return weak_closure_was_overapproximated_;
+ bool finalize_marking_completed() const {
+ return finalize_marking_completed_;
}
void SetWeakClosureWasOverApproximatedForTesting(bool val) {
- weak_closure_was_overapproximated_ = val;
+ finalize_marking_completed_ = val;
}
inline bool IsStopped() { return state() == STOPPED; }
@@ -73,8 +74,7 @@ class IncrementalMarking {
inline bool IsComplete() { return state() == COMPLETE; }
inline bool IsReadyToOverApproximateWeakClosure() const {
- return request_type_ == OVERAPPROXIMATION &&
- !weak_closure_was_overapproximated_;
+ return request_type_ == FINALIZATION && !finalize_marking_completed_;
}
GCRequestType request_type() const { return request_type_; }
@@ -87,7 +87,7 @@ class IncrementalMarking {
void Start(const char* reason = nullptr);
- void MarkObjectGroups();
+ void FinalizeIncrementally();
void UpdateMarkingDequeAfterScavenge();
@@ -97,7 +97,7 @@ class IncrementalMarking {
void Stop();
- void OverApproximateWeakClosure(CompletionAction action);
+ void FinalizeMarking(CompletionAction action);
void MarkingComplete(CompletionAction action);
@@ -174,11 +174,11 @@ class IncrementalMarking {
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
void RecordCodeTargetPatch(Address pc, HeapObject* value);
- inline void RecordWrites(HeapObject* obj);
+ void RecordWrites(HeapObject* obj);
- inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+ void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
- inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+ void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
@@ -215,6 +215,21 @@ class IncrementalMarking {
}
private:
+ class Observer : public InlineAllocationObserver {
+ public:
+ Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
+ : InlineAllocationObserver(step_size),
+ incremental_marking_(incremental_marking) {}
+
+ virtual void Step(int bytes_allocated) {
+ incremental_marking_.Step(bytes_allocated,
+ IncrementalMarking::GC_VIA_STACK_GUARD);
+ }
+
+ private:
+ IncrementalMarking& incremental_marking_;
+ };
+
int64_t SpaceLeftInOldSpace();
void SpeedUp();
@@ -223,6 +238,9 @@ class IncrementalMarking {
void StartMarking();
+ void MarkRoots();
+ void MarkObjectGroups();
+
void ActivateIncrementalWriteBarrier(PagedSpace* space);
static void ActivateIncrementalWriteBarrier(NewSpace* space);
void ActivateIncrementalWriteBarrier();
@@ -246,6 +264,8 @@ class IncrementalMarking {
Heap* heap_;
+ Observer observer_;
+
State state_;
bool is_compacting_;
@@ -266,9 +286,9 @@ class IncrementalMarking {
bool was_activated_;
- bool weak_closure_was_overapproximated_;
+ bool finalize_marking_completed_;
- int weak_closure_approximation_rounds_;
+ int incremental_marking_finalization_rounds_;
GCRequestType request_type_;
@@ -276,7 +296,7 @@ class IncrementalMarking {
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_INCREMENTAL_MARKING_H_
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 6e3ebd7fc7..a539c64b14 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -99,14 +99,6 @@ void CodeFlusher::AddCandidate(JSFunction* function) {
}
-void CodeFlusher::AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
- SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
- optimized_code_map_holder_head_ = code_map_holder;
- }
-}
-
-
JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
return reinterpret_cast<JSFunction**>(
HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
@@ -148,26 +140,6 @@ void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
}
-
-SharedFunctionInfo* CodeFlusher::GetNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
- return reinterpret_cast<SharedFunctionInfo*>(next_map);
-}
-
-
-void CodeFlusher::SetNextCodeMap(SharedFunctionInfo* holder,
- SharedFunctionInfo* next_holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
-}
-
-
-void CodeFlusher::ClearNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 9e317e7d08..ffda9f159d 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -74,7 +74,7 @@ class VerifyMarkingVisitor : public ObjectVisitor {
public:
explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -83,7 +83,7 @@ class VerifyMarkingVisitor : public ObjectVisitor {
}
}
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
Object* p = rinfo->target_object();
@@ -91,7 +91,7 @@ class VerifyMarkingVisitor : public ObjectVisitor {
}
}
- void VisitCell(RelocInfo* rinfo) {
+ void VisitCell(RelocInfo* rinfo) override {
Code* code = rinfo->host();
DCHECK(rinfo->rmode() == RelocInfo::CELL);
if (!code->IsWeakObject(rinfo->target_cell())) {
@@ -168,7 +168,7 @@ static void VerifyMarking(Heap* heap) {
class VerifyEvacuationVisitor : public ObjectVisitor {
public:
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -286,8 +286,8 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
TraceFragmentation(heap()->map_space());
}
- heap()->old_space()->EvictEvacuationCandidatesFromFreeLists();
- heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+ heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
+ heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
compacting_ = evacuation_candidates_.length() > 0;
}
@@ -297,12 +297,20 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
- heap_->store_buffer()->ClearInvalidStoreBufferEntries();
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_STORE_BUFFER_CLEAR);
+ heap_->store_buffer()->ClearInvalidStoreBufferEntries();
+ }
- int number_of_pages = evacuation_candidates_.length();
- for (int i = 0; i < number_of_pages; i++) {
- Page* p = evacuation_candidates_[i];
- SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SLOTS_BUFFER_CLEAR);
+ int number_of_pages = evacuation_candidates_.length();
+ for (int i = 0; i < number_of_pages; i++) {
+ Page* p = evacuation_candidates_[i];
+ SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
+ }
}
}
@@ -547,6 +555,14 @@ void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
}
+void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
+ if (heap()->concurrent_sweeping_enabled() && !IsSweepingCompleted()) {
+ SweepInParallel(heap()->paged_space(space->identity()), 0);
+ space->RefillFreeList();
+ }
+}
+
+
void MarkCompactCollector::EnsureSweepingCompleted() {
DCHECK(sweeping_in_progress_ == true);
@@ -566,12 +582,9 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
- RefillFreeList(heap()->paged_space(OLD_SPACE));
- RefillFreeList(heap()->paged_space(CODE_SPACE));
- RefillFreeList(heap()->paged_space(MAP_SPACE));
- heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
- heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
- heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes();
+ heap()->old_space()->RefillFreeList();
+ heap()->code_space()->RefillFreeList();
+ heap()->map_space()->RefillFreeList();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
@@ -591,27 +604,6 @@ bool MarkCompactCollector::IsSweepingCompleted() {
}
-void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
- FreeList* free_list;
-
- if (space == heap()->old_space()) {
- free_list = free_list_old_space_.get();
- } else if (space == heap()->code_space()) {
- free_list = free_list_code_space_.get();
- } else if (space == heap()->map_space()) {
- free_list = free_list_map_space_.get();
- } else {
- // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
- // to only refill them for the old space.
- return;
- }
-
- intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
- space->AddToAccountingStats(freed_bytes);
- space->DecrementUnsweptFreeBytes(freed_bytes);
-}
-
-
void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
// This is only used when resizing an object.
DCHECK(MemoryChunk::FromAddress(old_start) ==
@@ -702,8 +694,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int candidate_count = 0;
int total_live_bytes = 0;
- bool reduce_memory =
- heap()->ShouldReduceMemory() || heap()->HasLowAllocationRate();
+ bool reduce_memory = heap()->ShouldReduceMemory();
if (FLAG_manual_evacuation_candidates_selection) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
@@ -993,85 +984,6 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
}
-void CodeFlusher::ProcessOptimizedCodeMaps() {
- STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
-
- SharedFunctionInfo* holder = optimized_code_map_holder_head_;
- SharedFunctionInfo* next_holder;
-
- while (holder != NULL) {
- next_holder = GetNextCodeMap(holder);
- ClearNextCodeMap(holder);
-
- // Process context-dependent entries in the optimized code map.
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- int new_length = SharedFunctionInfo::kEntriesStart;
- int old_length = code_map->length();
- for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
- i += SharedFunctionInfo::kEntryLength) {
- // Each entry contains [ context, code, literals, ast-id ] as fields.
- STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
- Context* context =
- Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
- HeapObject* code = HeapObject::cast(
- code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
- FixedArray* literals = FixedArray::cast(
- code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
- Smi* ast_id =
- Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
- if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
- if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
- if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
- // Move every slot in the entry and record slots when needed.
- code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
- code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
- code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
- code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
- Object** code_slot = code_map->RawFieldOfElementAt(
- new_length + SharedFunctionInfo::kCachedCodeOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(
- code_map, code_slot, *code_slot);
- Object** context_slot = code_map->RawFieldOfElementAt(
- new_length + SharedFunctionInfo::kContextOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(
- code_map, context_slot, *context_slot);
- Object** literals_slot = code_map->RawFieldOfElementAt(
- new_length + SharedFunctionInfo::kLiteralsOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(
- code_map, literals_slot, *literals_slot);
- new_length += SharedFunctionInfo::kEntryLength;
- }
-
- // Process context-independent entry in the optimized code map.
- Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
- if (shared_object->IsCode()) {
- Code* shared_code = Code::cast(shared_object);
- if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
- code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
- } else {
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
- Object** slot =
- code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
- isolate_->heap()->mark_compact_collector()->RecordSlot(code_map, slot,
- *slot);
- }
- }
-
- // Trim the optimized code map if entries have been removed.
- if (new_length < old_length) {
- holder->TrimOptimizedCodeMap(old_length - new_length);
- }
-
- holder = next_holder;
- }
-
- optimized_code_map_holder_head_ = NULL;
-}
-
-
void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
// Make sure previous flushing decisions are revisited.
isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
@@ -1142,44 +1054,6 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
}
-void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- FixedArray* code_map =
- FixedArray::cast(code_map_holder->optimized_code_map());
- DCHECK(!code_map->get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
-
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->RecordWrites(code_map);
- isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing abandons code-map: ");
- code_map_holder->ShortPrint();
- PrintF("]\n");
- }
-
- SharedFunctionInfo* holder = optimized_code_map_holder_head_;
- SharedFunctionInfo* next_holder;
- if (holder == code_map_holder) {
- next_holder = GetNextCodeMap(code_map_holder);
- optimized_code_map_holder_head_ = next_holder;
- ClearNextCodeMap(code_map_holder);
- } else {
- while (holder != NULL) {
- next_holder = GetNextCodeMap(holder);
-
- if (next_holder == code_map_holder) {
- next_holder = GetNextCodeMap(code_map_holder);
- SetNextCodeMap(holder, next_holder);
- ClearNextCodeMap(code_map_holder);
- break;
- }
-
- holder = next_holder;
- }
- }
-}
-
-
void CodeFlusher::EvictJSFunctionCandidates() {
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
@@ -1204,18 +1078,6 @@ void CodeFlusher::EvictSharedFunctionInfoCandidates() {
}
-void CodeFlusher::EvictOptimizedCodeMaps() {
- SharedFunctionInfo* holder = optimized_code_map_holder_head_;
- SharedFunctionInfo* next_holder;
- while (holder != NULL) {
- next_holder = GetNextCodeMap(holder);
- EvictOptimizedCodeMap(holder);
- holder = next_holder;
- }
- DCHECK(optimized_code_map_holder_head_ == NULL);
-}
-
-
void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
Heap* heap = isolate_->heap();
@@ -1437,11 +1299,11 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) VisitPointer(p);
}
- void VisitPointer(Object** slot) {
+ void VisitPointer(Object** slot) override {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
@@ -1469,8 +1331,9 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
MarkBit code_mark = Marking::MarkBitFrom(code);
MarkObject(code, code_mark);
if (frame->is_optimized()) {
- MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
- frame->LookupCode());
+ Code* optimized_code = frame->LookupCode();
+ MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
+ MarkObject(optimized_code, optimized_code_mark);
}
}
}
@@ -1511,15 +1374,15 @@ class RootMarkingVisitor : public ObjectVisitor {
explicit RootMarkingVisitor(Heap* heap)
: collector_(heap->mark_compact_collector()) {}
- void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+ void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
// Skip the weak next code link in a code object, which is visited in
// ProcessTopOptimizedFrame.
- void VisitNextCodeLink(Object** p) {}
+ void VisitNextCodeLink(Object** p) override {}
private:
void MarkObjectByPointer(Object** p) {
@@ -1554,7 +1417,7 @@ class StringTableCleaner : public ObjectVisitor {
public:
explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
- virtual void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
Object* o = *p;
@@ -1724,8 +1587,8 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
if (!new_space->AddFreshPage()) {
// Shouldn't happen. We are sweeping linearly, and to-space
// has the same number of pages as from-space, so there is
- // always room.
- UNREACHABLE();
+ // always room unless we are in an OOM situation.
+ FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
}
allocation = new_space->AllocateRaw(size, alignment);
DCHECK(!allocation.IsRetry());
@@ -2112,14 +1975,18 @@ void MarkCompactCollector::MarkLiveObjects() {
// with the C stack limit check.
PostponeInterruptsScope postpone(isolate());
- IncrementalMarking* incremental_marking = heap_->incremental_marking();
- if (was_marked_incrementally_) {
- incremental_marking->Finalize();
- } else {
- // Abort any pending incremental activities e.g. incremental sweeping.
- incremental_marking->Stop();
- if (marking_deque_.in_use()) {
- marking_deque_.Uninitialize(true);
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
+ IncrementalMarking* incremental_marking = heap_->incremental_marking();
+ if (was_marked_incrementally_) {
+ incremental_marking->Finalize();
+ } else {
+ // Abort any pending incremental activities e.g. incremental sweeping.
+ incremental_marking->Stop();
+ if (marking_deque_.in_use()) {
+ marking_deque_.Uninitialize(true);
+ }
}
}
@@ -2131,20 +1998,36 @@ void MarkCompactCollector::MarkLiveObjects() {
EnsureMarkingDequeIsCommittedAndInitialize(
MarkCompactCollector::kMaxMarkingDequeSize);
- PrepareForCodeFlushing();
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
+ PrepareForCodeFlushing();
+ }
RootMarkingVisitor root_visitor(heap());
- MarkRoots(&root_visitor);
- ProcessTopOptimizedFrame(&root_visitor);
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOT);
+ MarkRoots(&root_visitor);
+ }
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_TOPOPT);
+ ProcessTopOptimizedFrame(&root_visitor);
+ }
// Retaining dying maps should happen before or during ephemeral marking
// because a map could keep the key of an ephemeron alive. Note that map
// aging is imprecise: maps that are kept alive only by ephemerons will age.
- RetainMaps();
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_RETAIN_MAPS);
+ RetainMaps();
+ }
{
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCLOSURE);
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
@@ -2181,31 +2064,55 @@ void MarkCompactCollector::MarkLiveObjects() {
void MarkCompactCollector::AfterMarking() {
- // Prune the string table removing all strings only pointed to by the
- // string table. Cannot use string_table() here because the string
- // table is marked.
- StringTable* string_table = heap()->string_table();
- InternalizedStringTableCleaner internalized_visitor(heap());
- string_table->IterateElements(&internalized_visitor);
- string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_STRING_TABLE);
+
+ // Prune the string table removing all strings only pointed to by the
+ // string table. Cannot use string_table() here because the string
+ // table is marked.
+ StringTable* string_table = heap()->string_table();
+ InternalizedStringTableCleaner internalized_visitor(heap());
+ string_table->IterateElements(&internalized_visitor);
+ string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
+
+ ExternalStringTableCleaner external_visitor(heap());
+ heap()->external_string_table_.Iterate(&external_visitor);
+ heap()->external_string_table_.CleanUp();
+ }
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_REFERENCES);
- ExternalStringTableCleaner external_visitor(heap());
- heap()->external_string_table_.Iterate(&external_visitor);
- heap()->external_string_table_.CleanUp();
+ // Process the weak references.
+ MarkCompactWeakObjectRetainer mark_compact_object_retainer;
+ heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
+ }
- // Process the weak references.
- MarkCompactWeakObjectRetainer mark_compact_object_retainer;
- heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_GLOBAL_HANDLES);
- // Remove object groups after marking phase.
- heap()->isolate()->global_handles()->RemoveObjectGroups();
- heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
+ // Remove object groups after marking phase.
+ heap()->isolate()->global_handles()->RemoveObjectGroups();
+ heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
+ }
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_CODE_FLUSH);
code_flusher_->ProcessCandidates();
}
+ // Process and clear all optimized code maps.
+ if (!FLAG_flush_optimized_code_cache) {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_OPTIMIZED_CODE_MAPS);
+ ProcessAndClearOptimizedCodeMaps();
+ }
+
if (FLAG_track_gc_object_stats) {
if (FLAG_trace_gc_object_stats) {
heap()->object_stats_->TraceObjectStats();
@@ -2215,6 +2122,72 @@ void MarkCompactCollector::AfterMarking() {
}
+void MarkCompactCollector::ProcessAndClearOptimizedCodeMaps() {
+ SharedFunctionInfo::Iterator iterator(isolate());
+ while (SharedFunctionInfo* shared = iterator.Next()) {
+ if (shared->optimized_code_map()->IsSmi()) continue;
+
+ // Process context-dependent entries in the optimized code map.
+ FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
+ int new_length = SharedFunctionInfo::kEntriesStart;
+ int old_length = code_map->length();
+ for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
+ i += SharedFunctionInfo::kEntryLength) {
+ // Each entry contains [ context, code, literals, ast-id ] as fields.
+ STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
+ Context* context =
+ Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
+ HeapObject* code = HeapObject::cast(
+ code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+ FixedArray* literals = FixedArray::cast(
+ code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
+ Smi* ast_id =
+ Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
+ if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
+ if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
+ if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
+ // Move every slot in the entry and record slots when needed.
+ code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
+ code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
+ code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
+ code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
+ Object** code_slot = code_map->RawFieldOfElementAt(
+ new_length + SharedFunctionInfo::kCachedCodeOffset);
+ RecordSlot(code_map, code_slot, *code_slot);
+ Object** context_slot = code_map->RawFieldOfElementAt(
+ new_length + SharedFunctionInfo::kContextOffset);
+ RecordSlot(code_map, context_slot, *context_slot);
+ Object** literals_slot = code_map->RawFieldOfElementAt(
+ new_length + SharedFunctionInfo::kLiteralsOffset);
+ RecordSlot(code_map, literals_slot, *literals_slot);
+ new_length += SharedFunctionInfo::kEntryLength;
+ }
+
+ // Process context-independent entry in the optimized code map.
+ Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
+ if (shared_object->IsCode()) {
+ Code* shared_code = Code::cast(shared_object);
+ if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
+ code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
+ } else {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
+ Object** slot =
+ code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
+ RecordSlot(code_map, slot, *slot);
+ }
+ }
+
+ // Trim the optimized code map if entries have been removed.
+ if (new_length < old_length) {
+ shared->TrimOptimizedCodeMap(old_length - new_length);
+ }
+ }
+}
+
+
void MarkCompactCollector::ClearNonLiveReferences() {
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_NONLIVEREFERENCES);
@@ -2668,8 +2641,9 @@ void MarkCompactCollector::MigrateObject(
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
- DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(dest != LO_SPACE);
if (dest == OLD_SPACE) {
+ DCHECK_OBJECT_SIZE(size);
DCHECK(evacuation_slots_buffer != nullptr);
DCHECK(IsAligned(size, kPointerSize));
switch (src->ContentType()) {
@@ -2693,12 +2667,14 @@ void MarkCompactCollector::MigrateObject(
evacuation_slots_buffer);
}
} else if (dest == CODE_SPACE) {
+ DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
DCHECK(evacuation_slots_buffer != nullptr);
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
heap()->MoveBlock(dst_addr, src_addr, size);
RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
+ DCHECK_OBJECT_SIZE(size);
DCHECK(evacuation_slots_buffer == nullptr);
DCHECK(dest == NEW_SPACE);
heap()->MoveBlock(dst_addr, src_addr, size);
@@ -2842,13 +2818,13 @@ class PointersUpdatingVisitor : public ObjectVisitor {
public:
explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointer(Object** p) { UpdatePointer(p); }
+ void VisitPointer(Object** p) override { UpdatePointer(p); }
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) UpdatePointer(p);
}
- void VisitCell(RelocInfo* rinfo) {
+ void VisitCell(RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::CELL);
Object* cell = rinfo->target_cell();
Object* old_cell = cell;
@@ -2858,7 +2834,7 @@ class PointersUpdatingVisitor : public ObjectVisitor {
}
}
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
Object* target = rinfo->target_object();
Object* old_target = target;
@@ -2870,7 +2846,7 @@ class PointersUpdatingVisitor : public ObjectVisitor {
}
}
- void VisitCodeTarget(RelocInfo* rinfo) {
+ void VisitCodeTarget(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* old_target = target;
@@ -2880,7 +2856,7 @@ class PointersUpdatingVisitor : public ObjectVisitor {
}
}
- void VisitCodeAgeSequence(RelocInfo* rinfo) {
+ void VisitCodeAgeSequence(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Object* stub = rinfo->code_age_stub();
DCHECK(stub != NULL);
@@ -2890,7 +2866,7 @@ class PointersUpdatingVisitor : public ObjectVisitor {
}
}
- void VisitDebugTarget(RelocInfo* rinfo) {
+ void VisitDebugTarget(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence());
Object* target =
@@ -2907,20 +2883,12 @@ class PointersUpdatingVisitor : public ObjectVisitor {
HeapObject* heap_obj = HeapObject::cast(obj);
-// TODO(ishell): remove, once crbug/454297 is caught.
-#if V8_TARGET_ARCH_64_BIT
-#ifndef V8_OS_AIX // no point checking on AIX as full 64 range is supported
- const uintptr_t kBoundary = V8_UINT64_C(1) << 48;
- STATIC_ASSERT(kBoundary > 0);
- if (reinterpret_cast<uintptr_t>(heap_obj->address()) >= kBoundary) {
- CheckLayoutDescriptorAndDie(heap, slot);
- }
-#endif
-#endif
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
DCHECK(heap->InFromSpace(heap_obj) ||
- MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+ MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
+ Page::FromAddress(heap_obj->address())
+ ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
HeapObject* target = map_word.ToForwardingAddress();
base::NoBarrier_CompareAndSwap(
reinterpret_cast<base::AtomicWord*>(slot),
@@ -2934,100 +2902,10 @@ class PointersUpdatingVisitor : public ObjectVisitor {
private:
inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
- static void CheckLayoutDescriptorAndDie(Heap* heap, Object** slot);
-
Heap* heap_;
};
-#if V8_TARGET_ARCH_64_BIT
-// TODO(ishell): remove, once crbug/454297 is caught.
-void PointersUpdatingVisitor::CheckLayoutDescriptorAndDie(Heap* heap,
- Object** slot) {
- const int kDataBufferSize = 128;
- uintptr_t data[kDataBufferSize] = {0};
- int index = 0;
- data[index++] = 0x10aaaaaaaaUL; // begin marker
-
- data[index++] = reinterpret_cast<uintptr_t>(slot);
- data[index++] = 0x15aaaaaaaaUL;
-
- Address slot_address = reinterpret_cast<Address>(slot);
-
- uintptr_t space_owner_id = 0xb001;
- if (heap->new_space()->ToSpaceContains(slot_address)) {
- space_owner_id = 1;
- } else if (heap->new_space()->FromSpaceContains(slot_address)) {
- space_owner_id = 2;
- } else if (heap->old_space()->ContainsSafe(slot_address)) {
- space_owner_id = 3;
- } else if (heap->code_space()->ContainsSafe(slot_address)) {
- space_owner_id = 4;
- } else if (heap->map_space()->ContainsSafe(slot_address)) {
- space_owner_id = 5;
- } else {
- // Lo space or other.
- space_owner_id = 6;
- }
- data[index++] = space_owner_id;
- data[index++] = 0x20aaaaaaaaUL;
-
- // Find map word lying near before the slot address (usually the map word is
- // at -3 words from the slot but just in case we look up further.
- Object** map_slot = slot;
- bool found = false;
- const int kMaxDistanceToMap = 64;
- for (int i = 0; i < kMaxDistanceToMap; i++, map_slot--) {
- Address map_address = reinterpret_cast<Address>(*map_slot);
- if (heap->map_space()->ContainsSafe(map_address)) {
- found = true;
- break;
- }
- }
- data[index++] = found;
- data[index++] = 0x30aaaaaaaaUL;
- data[index++] = reinterpret_cast<uintptr_t>(map_slot);
- data[index++] = 0x35aaaaaaaaUL;
-
- if (found) {
- Address obj_address = reinterpret_cast<Address>(map_slot);
- Address end_of_page =
- reinterpret_cast<Address>(Page::FromAddress(obj_address)) +
- Page::kPageSize;
- Address end_address =
- Min(obj_address + kPointerSize * kMaxDistanceToMap, end_of_page);
- int size = static_cast<int>(end_address - obj_address);
- data[index++] = size / kPointerSize;
- data[index++] = 0x40aaaaaaaaUL;
- memcpy(&data[index], reinterpret_cast<void*>(map_slot), size);
- index += size / kPointerSize;
- data[index++] = 0x50aaaaaaaaUL;
-
- HeapObject* object = HeapObject::FromAddress(obj_address);
- data[index++] = reinterpret_cast<uintptr_t>(object);
- data[index++] = 0x60aaaaaaaaUL;
-
- Map* map = object->map();
- data[index++] = reinterpret_cast<uintptr_t>(map);
- data[index++] = 0x70aaaaaaaaUL;
-
- LayoutDescriptor* layout_descriptor = map->layout_descriptor();
- data[index++] = reinterpret_cast<uintptr_t>(layout_descriptor);
- data[index++] = 0x80aaaaaaaaUL;
-
- memcpy(&data[index], reinterpret_cast<void*>(map->address()), Map::kSize);
- index += Map::kSize / kPointerSize;
- data[index++] = 0x90aaaaaaaaUL;
- }
-
- data[index++] = 0xeeeeeeeeeeUL;
- DCHECK(index < kDataBufferSize);
- base::OS::PrintError("Data: %p\n", static_cast<void*>(data));
- base::OS::Abort();
-}
-#endif
-
-
void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
PointersUpdatingVisitor v(heap_);
size_t buffer_size = buffer->Size();
@@ -3082,8 +2960,6 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
int object_size) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
-
OldSpace* old_space = heap()->old_space();
HeapObject* target = nullptr;
@@ -3336,7 +3212,6 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
int offsets[16];
-
for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
Address cell_base = it.CurrentCellBase();
MarkBit::CellType* cell = it.CurrentCell();
@@ -3378,23 +3253,52 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
int MarkCompactCollector::NumberOfParallelCompactionTasks() {
if (!FLAG_parallel_compaction) return 1;
- // We cap the number of parallel compaction tasks by
+ // Compute the number of needed tasks based on a target compaction time, the
+ // profiled compaction speed and marked live memory.
+ //
+ // The number of parallel compaction tasks is limited by:
+ // - #evacuation pages
// - (#cores - 1)
- // - a value depending on the list of evacuation candidates
// - a hard limit
- const int kPagesPerCompactionTask = 4;
+ const double kTargetCompactionTimeInMs = 1;
const int kMaxCompactionTasks = 8;
- return Min(kMaxCompactionTasks,
- Min(1 + evacuation_candidates_.length() / kPagesPerCompactionTask,
- Max(1, base::SysInfo::NumberOfProcessors() - 1)));
+
+ intptr_t compaction_speed =
+ heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ if (compaction_speed == 0) return 1;
+
+ intptr_t live_bytes = 0;
+ for (Page* page : evacuation_candidates_) {
+ live_bytes += page->LiveBytes();
+ }
+
+ const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
+ const int tasks =
+ 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
+ kTargetCompactionTimeInMs);
+ const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
+ const int tasks_capped_cores = Min(cores, tasks_capped_pages);
+ const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
+ return tasks_capped_hard;
}
void MarkCompactCollector::EvacuatePagesInParallel() {
- if (evacuation_candidates_.length() == 0) return;
+ const int num_pages = evacuation_candidates_.length();
+ if (num_pages == 0) return;
+ // Used for trace summary.
+ intptr_t live_bytes = 0;
+ intptr_t compaction_speed = 0;
+ if (FLAG_trace_fragmentation) {
+ for (Page* page : evacuation_candidates_) {
+ live_bytes += page->LiveBytes();
+ }
+ compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ }
const int num_tasks = NumberOfParallelCompactionTasks();
+
// Set up compaction spaces.
CompactionSpaceCollection** compaction_spaces_for_tasks =
new CompactionSpaceCollection*[num_tasks];
@@ -3402,11 +3306,10 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
}
- compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
- heap()->old_space());
- compaction_spaces_for_tasks[0]
- ->Get(CODE_SPACE)
- ->MoveOverFreeMemory(heap()->code_space());
+ heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
+ num_tasks);
+ heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
+ num_tasks);
compaction_in_progress_ = true;
// Kick off parallel tasks.
@@ -3418,24 +3321,26 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
// Contribute in main thread. Counter and signal are in principal not needed.
- concurrent_compaction_tasks_active_++;
EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
- pending_compaction_tasks_semaphore_.Signal();
WaitUntilCompactionCompleted();
+ double compaction_duration = 0.0;
+ intptr_t compacted_memory = 0;
// Merge back memory (compacted and unused) from compaction spaces.
for (int i = 0; i < num_tasks; i++) {
heap()->old_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
+ compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
+ compaction_duration += compaction_spaces_for_tasks[i]->duration();
delete compaction_spaces_for_tasks[i];
}
delete[] compaction_spaces_for_tasks;
+ heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
// Finalize sequentially.
- const int num_pages = evacuation_candidates_.length();
int abandoned_pages = 0;
for (int i = 0; i < num_pages; i++) {
Page* p = evacuation_candidates_[i];
@@ -3453,7 +3358,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// - Leave the page in the list of pages of a space since we could not
// fully evacuate it.
DCHECK(p->IsEvacuationCandidate());
- p->SetFlag(Page::RESCAN_ON_EVACUATION);
+ p->SetFlag(Page::COMPACTION_WAS_ABORTED);
abandoned_pages++;
break;
case MemoryChunk::kCompactingFinalize:
@@ -3471,17 +3376,15 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
- if (num_pages > 0) {
- if (FLAG_trace_fragmentation) {
- if (abandoned_pages != 0) {
- PrintF(
- " Abandoned (at least partially) %d out of %d page compactions due"
- " to lack of memory\n",
- abandoned_pages, num_pages);
- } else {
- PrintF(" Compacted %d pages\n", num_pages);
- }
- }
+ if (FLAG_trace_fragmentation) {
+ PrintIsolate(isolate(),
+ "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
+ "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
+ "d compaction_speed=%" V8_PTR_PREFIX "d\n",
+ isolate()->time_millis_since_init(), FLAG_parallel_compaction,
+ num_pages, abandoned_pages, num_tasks,
+ base::SysInfo::NumberOfProcessors(), live_bytes,
+ compaction_speed);
}
}
@@ -3509,11 +3412,15 @@ void MarkCompactCollector::EvacuatePages(
if (p->IsEvacuationCandidate()) {
DCHECK_EQ(p->parallel_compaction_state().Value(),
MemoryChunk::kCompactingInProgress);
+ double start = heap()->MonotonicallyIncreasingTimeInMs();
+ intptr_t live_bytes = p->LiveBytes();
if (EvacuateLiveObjectsFromPage(
p, compaction_spaces->Get(p->owner()->identity()),
evacuation_slots_buffer)) {
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingFinalize);
+ compaction_spaces->ReportCompactionProgress(
+ heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
} else {
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingAborted);
@@ -3559,7 +3466,6 @@ static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
DCHECK(free_list == NULL);
return space->Free(start, size);
} else {
- // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
return size - free_list->Free(start, size);
}
}
@@ -3696,6 +3602,57 @@ void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
}
+void MarkCompactCollector::VisitLiveObjects(Page* page,
+ ObjectVisitor* visitor) {
+ // First pass on aborted pages.
+ int offsets[16];
+ for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
+ if (*cell == 0) continue;
+ int live_objects = MarkWordToObjectStarts(*cell, offsets);
+ for (int i = 0; i < live_objects; i++) {
+ Address object_addr = cell_base + offsets[i] * kPointerSize;
+ HeapObject* live_object = HeapObject::FromAddress(object_addr);
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
+ Map* map = live_object->synchronized_map();
+ int size = live_object->SizeFromMap(map);
+ live_object->IterateBody(map->instance_type(), size, visitor);
+ }
+ }
+}
+
+
+void MarkCompactCollector::SweepAbortedPages() {
+ // Second pass on aborted pages.
+ for (int i = 0; i < evacuation_candidates_.length(); i++) {
+ Page* p = evacuation_candidates_[i];
+ if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+ switch (space->identity()) {
+ case OLD_SPACE:
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+ break;
+ case CODE_SPACE:
+ if (FLAG_zap_code_space) {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(space, NULL, p, nullptr);
+ } else {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Heap::RelocationLock relocation_lock(heap());
@@ -3789,13 +3746,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
// code objects from non-updated pc pointing into evacuation candidate.
SkipList* list = p->skip_list();
if (list != NULL) list->Clear();
- }
- if (p->IsEvacuationCandidate() &&
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- // Case where we've aborted compacting a page. Clear the flag here to
- // avoid release the page later on.
- p->ClearEvacuationCandidate();
+ // First pass on aborted pages, fixing up all live objects.
+ if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ // Clearing the evacuation candidate flag here has the effect of
+ // stopping recording of slots for it in the following pointer
+ // update phases.
+ p->ClearEvacuationCandidate();
+ VisitLiveObjects(p, &updating_visitor);
+ }
}
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
@@ -3831,17 +3790,26 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
}
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
+ heap_->string_table()->Iterate(&updating_visitor);
- heap_->string_table()->Iterate(&updating_visitor);
+ // Update pointers from external string table.
+ heap_->UpdateReferencesInExternalStringTable(
+ &UpdateReferenceInExternalStringTableEntry);
- // Update pointers from external string table.
- heap_->UpdateReferencesInExternalStringTable(
- &UpdateReferenceInExternalStringTableEntry);
+ EvacuationWeakObjectRetainer evacuation_object_retainer;
+ heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
+ }
- EvacuationWeakObjectRetainer evacuation_object_retainer;
- heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_ABORTED);
+ // After updating all pointers, we can finally sweep the aborted pages,
+ // effectively overriding any forward pointers.
+ SweepAbortedPages();
+ }
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
@@ -3872,6 +3840,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
p->ResetLiveBytes();
+ CHECK(p->WasSwept());
space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
@@ -4363,9 +4332,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
reinterpret_cast<intptr_t>(p));
}
- // Adjust unswept free bytes because releasing a page expects said
- // counter to be accurate for unswept pages.
- space->IncreaseUnsweptFreeBytes(p);
space->ReleasePage(p);
continue;
}
@@ -4399,7 +4365,8 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
reinterpret_cast<intptr_t>(p));
}
p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
- space->IncreaseUnsweptFreeBytes(p);
+ int to_sweep = p->area_size() - p->LiveBytes();
+ space->accounting_stats_.ShrinkSpace(to_sweep);
}
space->set_end_of_unswept_pages(p);
break;
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 724650c1c4..c489eaf3f4 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -263,10 +263,9 @@ class MarkingDeque {
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in three ways:
+// be unreachable. Code objects can be referenced in two ways:
// - SharedFunctionInfo references unoptimized code.
// - JSFunction references either unoptimized or optimized code.
-// - OptimizedCodeMap references optimized code.
// We are not allowed to flush unoptimized code for functions that got
// optimized or inlined into optimized code, because we might bailout
// into the unoptimized code again during deoptimization.
@@ -274,26 +273,21 @@ class CodeFlusher {
public:
explicit CodeFlusher(Isolate* isolate)
: isolate_(isolate),
- jsfunction_candidates_head_(NULL),
- shared_function_info_candidates_head_(NULL),
- optimized_code_map_holder_head_(NULL) {}
+ jsfunction_candidates_head_(nullptr),
+ shared_function_info_candidates_head_(nullptr) {}
inline void AddCandidate(SharedFunctionInfo* shared_info);
inline void AddCandidate(JSFunction* function);
- inline void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
- void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictCandidate(SharedFunctionInfo* shared_info);
void EvictCandidate(JSFunction* function);
void ProcessCandidates() {
- ProcessOptimizedCodeMaps();
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
void EvictAllCandidates() {
- EvictOptimizedCodeMaps();
EvictJSFunctionCandidates();
EvictSharedFunctionInfoCandidates();
}
@@ -301,10 +295,8 @@ class CodeFlusher {
void IteratePointersToFromSpace(ObjectVisitor* v);
private:
- void ProcessOptimizedCodeMaps();
void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates();
- void EvictOptimizedCodeMaps();
void EvictJSFunctionCandidates();
void EvictSharedFunctionInfoCandidates();
@@ -321,15 +313,9 @@ class CodeFlusher {
SharedFunctionInfo* next_candidate);
static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
- static inline SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder);
- static inline void SetNextCodeMap(SharedFunctionInfo* holder,
- SharedFunctionInfo* next_holder);
- static inline void ClearNextCodeMap(SharedFunctionInfo* holder);
-
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
SharedFunctionInfo* shared_function_info_candidates_head_;
- SharedFunctionInfo* optimized_code_map_holder_head_;
DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
@@ -457,17 +443,24 @@ class MarkCompactCollector {
// size of the maximum continuous freed memory chunk.
int SweepInParallel(Page* page, PagedSpace* space);
+ // Ensures that sweeping is finished.
+ //
+ // Note: Can only be called safely from main thread.
void EnsureSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
+ // Help out in sweeping the corresponding space and refill memory that has
+ // been regained.
+ //
+ // Note: Thread-safe.
+ void SweepAndRefill(CompactionSpace* space);
+
// If sweeper threads are not active this method will return true. If
// this is a latency issue we should be smarter here. Otherwise, it will
// return true if the sweeper threads are done processing the pages.
bool IsSweepingCompleted();
- void RefillFreeList(PagedSpace* space);
-
// Checks if sweeping is in progress right now on any space.
bool sweeping_in_progress() { return sweeping_in_progress_; }
@@ -512,6 +505,20 @@ class MarkCompactCollector {
// address range.
void RemoveObjectSlots(Address start_slot, Address end_slot);
+ //
+ // Free lists filled by sweeper and consumed by corresponding spaces
+ // (including compaction spaces).
+ //
+ base::SmartPointer<FreeList>& free_list_old_space() {
+ return free_list_old_space_;
+ }
+ base::SmartPointer<FreeList>& free_list_code_space() {
+ return free_list_code_space_;
+ }
+ base::SmartPointer<FreeList>& free_list_map_space() {
+ return free_list_map_space_;
+ }
+
private:
class CompactionTask;
class SweeperTask;
@@ -673,10 +680,14 @@ class MarkCompactCollector {
// collections when incremental marking is aborted.
void AbortWeakCollections();
-
void ProcessAndClearWeakCells();
void AbortWeakCells();
+ // After all reachable objects have been marked, those entries within
+ // optimized code maps that became unreachable are removed, potentially
+ // trimming or clearing out the entire optimized code map.
+ void ProcessAndClearOptimizedCodeMaps();
+
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
// a non-compacting collection.
@@ -716,6 +727,10 @@ class MarkCompactCollector {
void EvacuateNewSpaceAndCandidates();
+ void VisitLiveObjects(Page* page, ObjectVisitor* visitor);
+
+ void SweepAbortedPages();
+
void ReleaseEvacuationCandidates();
// Moves the pages of the evacuation_candidates_ list to the end of their
@@ -850,7 +865,7 @@ class EvacuationScope BASE_EMBEDDED {
const char* AllocationSpaceName(AllocationSpace space);
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_MARK_COMPACT_H_
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 45d6bd3d7f..9fadd08dca 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -13,7 +13,7 @@
namespace v8 {
namespace internal {
-const int MemoryReducer::kLongDelayMs = 5000;
+const int MemoryReducer::kLongDelayMs = 8000;
const int MemoryReducer::kShortDelayMs = 500;
const int MemoryReducer::kWatchdogDelayMs = 100000;
const int MemoryReducer::kMaxNumberOfGCs = 3;
@@ -24,14 +24,27 @@ MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
void MemoryReducer::TimerTask::RunInternal() {
+ const double kJsCallsPerMsThreshold = 0.25;
Heap* heap = memory_reducer_->heap();
Event event;
double time_ms = heap->MonotonicallyIncreasingTimeInMs();
heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
heap->OldGenerationAllocationCounter());
+ double js_call_rate = memory_reducer_->SampleAndGetJsCallsPerMs(time_ms);
+ bool low_allocation_rate = heap->HasLowAllocationRate();
+ bool is_idle = js_call_rate < kJsCallsPerMsThreshold && low_allocation_rate;
+ bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(heap->isolate(), "Memory reducer: call rate %.3lf, %s, %s\n",
+ js_call_rate, low_allocation_rate ? "low alloc" : "high alloc",
+ optimize_for_memory ? "background" : "foreground");
+ }
event.type = kTimer;
event.time_ms = time_ms;
- event.low_allocation_rate = heap->HasLowAllocationRate();
+ // The memory reducer will start incremental markig if
+ // 1) mutator is likely idle: js call rate is low and allocation rate is low.
+ // 2) mutator is in background: optimize for memory flag is set.
+ event.should_start_incremental_gc = is_idle || optimize_for_memory;
event.can_start_incremental_gc =
heap->incremental_marking()->IsStopped() &&
heap->incremental_marking()->CanBeActivated();
@@ -39,6 +52,16 @@ void MemoryReducer::TimerTask::RunInternal() {
}
+double MemoryReducer::SampleAndGetJsCallsPerMs(double time_ms) {
+ unsigned int counter = heap()->isolate()->js_calls_from_api_counter();
+ unsigned int call_delta = counter - js_calls_counter_;
+ double time_delta_ms = time_ms - js_calls_sample_time_ms_;
+ js_calls_counter_ = counter;
+ js_calls_sample_time_ms_ = time_ms;
+ return time_delta_ms > 0 ? call_delta / time_delta_ms : 0;
+}
+
+
void MemoryReducer::NotifyTimer(const Event& event) {
DCHECK_EQ(kTimer, event.type);
DCHECK_EQ(kWait, state_.action);
@@ -51,8 +74,8 @@ void MemoryReducer::NotifyTimer(const Event& event) {
state_.started_gcs);
}
if (heap()->ShouldOptimizeForMemoryUsage()) {
- // Do full GC if memory usage has higher priority than latency. This is
- // important for background tabs that do not send idle notifications.
+ // TODO(ulan): Remove this once crbug.com/552305 is fixed.
+ // Do full GC if memory usage has higher priority than latency.
heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
"memory reducer");
} else {
@@ -76,7 +99,7 @@ void MemoryReducer::NotifyTimer(const Event& event) {
"Memory reducer: finalize incremental marking");
}
// Re-schedule the timer.
- ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
state_.next_gc_start_ms - event.time_ms);
@@ -91,7 +114,7 @@ void MemoryReducer::NotifyMarkCompact(const Event& event) {
state_ = Step(state_, event);
if (old_action != kWait && state_.action == kWait) {
// If we are transitioning to the WAIT state, start the timer.
- ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
}
if (old_action == kRun) {
if (FLAG_trace_gc_verbose) {
@@ -109,34 +132,7 @@ void MemoryReducer::NotifyContextDisposed(const Event& event) {
state_ = Step(state_, event);
if (old_action != kWait && state_.action == kWait) {
// If we are transitioning to the WAIT state, start the timer.
- ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
- }
-}
-
-
-void MemoryReducer::NotifyBackgroundIdleNotification(const Event& event) {
- DCHECK_EQ(kBackgroundIdleNotification, event.type);
- Action old_action = state_.action;
- int old_started_gcs = state_.started_gcs;
- state_ = Step(state_, event);
- if (old_action == kWait && state_.action == kWait &&
- old_started_gcs + 1 == state_.started_gcs) {
- DCHECK(heap()->incremental_marking()->IsStopped());
- // TODO(ulan): Replace it with incremental marking GC once
- // chromium:490559 is fixed.
- if (event.time_ms > state_.last_gc_time_ms + kLongDelayMs) {
- heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
- "memory reducer background GC");
- } else {
- DCHECK(FLAG_incremental_marking);
- heap()->StartIdleIncrementalMarking();
- if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap()->isolate(),
- "Memory reducer: started GC #%d"
- " (background idle)\n",
- state_.started_gcs);
- }
- }
+ ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
}
}
@@ -150,12 +146,12 @@ bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
// For specification of this function see the comment for MemoryReducer class.
MemoryReducer::State MemoryReducer::Step(const State& state,
const Event& event) {
- if (!FLAG_incremental_marking) {
+ if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
return State(kDone, 0, 0, state.last_gc_time_ms);
}
switch (state.action) {
case kDone:
- if (event.type == kTimer || event.type == kBackgroundIdleNotification) {
+ if (event.type == kTimer) {
return state;
} else {
DCHECK(event.type == kContextDisposed || event.type == kMarkCompact);
@@ -171,7 +167,8 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
if (state.started_gcs >= kMaxNumberOfGCs) {
return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
} else if (event.can_start_incremental_gc &&
- (event.low_allocation_rate || WatchdogGC(state, event))) {
+ (event.should_start_incremental_gc ||
+ WatchdogGC(state, event))) {
if (state.next_gc_start_ms <= event.time_ms) {
return State(kRun, state.started_gcs + 1, 0.0,
state.last_gc_time_ms);
@@ -182,14 +179,6 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
state.last_gc_time_ms);
}
- case kBackgroundIdleNotification:
- if (event.can_start_incremental_gc &&
- state.started_gcs < kMaxNumberOfGCs) {
- return State(kWait, state.started_gcs + 1,
- event.time_ms + kLongDelayMs, state.last_gc_time_ms);
- } else {
- return state;
- }
case kMarkCompact:
return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
event.time_ms);
@@ -212,8 +201,10 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
}
-void MemoryReducer::ScheduleTimer(double delay_ms) {
+void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
DCHECK(delay_ms > 0);
+ // Record the time and the js call counter.
+ SampleAndGetJsCallsPerMs(time_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
@@ -225,5 +216,5 @@ void MemoryReducer::ScheduleTimer(double delay_ms) {
void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
-} // internal
-} // v8
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index f98cb045e3..9213613c07 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -96,23 +96,21 @@ class MemoryReducer {
double last_gc_time_ms;
};
- enum EventType {
- kTimer,
- kMarkCompact,
- kContextDisposed,
- kBackgroundIdleNotification
- };
+ enum EventType { kTimer, kMarkCompact, kContextDisposed };
struct Event {
EventType type;
double time_ms;
- bool low_allocation_rate;
bool next_gc_likely_to_collect_more;
+ bool should_start_incremental_gc;
bool can_start_incremental_gc;
};
explicit MemoryReducer(Heap* heap)
- : heap_(heap), state_(kDone, 0, 0.0, 0.0) {}
+ : heap_(heap),
+ state_(kDone, 0, 0.0, 0.0),
+ js_calls_counter_(0),
+ js_calls_sample_time_ms_(0.0) {}
// Callbacks.
void NotifyMarkCompact(const Event& event);
void NotifyContextDisposed(const Event& event);
@@ -121,7 +119,7 @@ class MemoryReducer {
// the incoming event.
static State Step(const State& state, const Event& event);
// Posts a timer task that will call NotifyTimer after the given delay.
- void ScheduleTimer(double delay_ms);
+ void ScheduleTimer(double time_ms, double delay_ms);
void TearDown();
static const int kLongDelayMs;
static const int kShortDelayMs;
@@ -150,8 +148,16 @@ class MemoryReducer {
static bool WatchdogGC(const State& state, const Event& event);
+ // Returns the rate of JS calls initiated from the API.
+ double SampleAndGetJsCallsPerMs(double time_ms);
+
Heap* heap_;
State state_;
+ unsigned int js_calls_counter_;
+ double js_calls_sample_time_ms_;
+
+ // Used in cctest.
+ friend class HeapTester;
DISALLOW_COPY_AND_ASSIGN(MemoryReducer);
};
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 55734fd463..d6a189a98d 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -63,7 +63,11 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
- table_.Register(kVisitJSFunction, &VisitJSFunction);
+ // Don't visit code entry. We are using this visitor only during scavenges.
+ table_.Register(
+ kVisitJSFunction,
+ &FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
+ int>::Visit);
table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
@@ -428,6 +432,13 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
code->MakeOlder(heap->mark_compact_collector()->marking_parity());
}
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (collector->is_code_flushing_enabled()) {
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ // Visit all unoptimized code objects to prevent flushing them.
+ MarkInlinedFunctionsCode(heap, code);
+ }
+ }
code->CodeIterateBody<StaticVisitor>(heap);
}
@@ -443,23 +454,22 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (FLAG_cleanup_code_caches_at_gc) {
shared->ClearTypeFeedbackInfoAtGCTime();
}
- if ((FLAG_flush_optimized_code_cache ||
- heap->isolate()->serializer_enabled()) &&
- !shared->optimized_code_map()->IsSmi()) {
- // Always flush the optimized code map if requested by flag.
- shared->ClearOptimizedCodeMap();
- }
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
+ if (FLAG_flush_optimized_code_cache) {
+ if (!shared->optimized_code_map()->IsSmi()) {
+ // Always flush the optimized code map if requested by flag.
+ shared->ClearOptimizedCodeMap();
+ }
+ } else {
if (!shared->optimized_code_map()->IsSmi()) {
- // Add the shared function info holding an optimized code map to
- // the code flusher for processing of code maps after marking.
- collector->code_flusher()->AddOptimizedCodeMap(shared);
// Treat some references within the code map weakly by marking the
- // code map itself but not pushing it onto the marking deque.
+ // code map itself but not pushing it onto the marking deque. The
+ // map will be processed after marking.
FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
MarkOptimizedCodeMap(heap, code_map);
}
+ }
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (collector->is_code_flushing_enabled()) {
if (IsFlushable(heap, shared)) {
// This function's code looks flushable. But we have to postpone
// the decision until we see all functions that point to the same
@@ -472,12 +482,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
VisitSharedFunctionInfoWeakCode(heap, object);
return;
}
- } else {
- if (!shared->optimized_code_map()->IsSmi()) {
- // Flush optimized code map on major GCs without code flushing,
- // needed because cached code doesn't contain breakpoints.
- shared->ClearOptimizedCodeMap();
- }
}
VisitSharedFunctionInfoStrongCode(heap, object);
}
@@ -507,17 +511,14 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
VisitSharedFunctionInfoWeakCode(heap, shared);
}
// Treat the reference to the code object weakly.
- VisitJSFunctionWeakCode(heap, object);
+ VisitJSFunctionWeakCode(map, object);
return;
} else {
// Visit all unoptimized code objects to prevent flushing them.
StaticVisitor::MarkObject(heap, function->shared()->code());
- if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
- MarkInlinedFunctionsCode(heap, function->code());
- }
}
}
- VisitJSFunctionStrongCode(heap, object);
+ VisitJSFunctionStrongCode(map, object);
}
@@ -656,20 +657,21 @@ void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(Heap* heap,
// For optimized functions we should retain both non-optimized version
// of its code and non-optimized version of all inlined functions.
// This is required to support bailing out from inlined code.
- DeoptimizationInputData* const data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- FixedArray* const literals = data->LiteralArray();
- int const inlined_count = data->InlinedFunctionCount()->value();
- for (int i = 0; i < inlined_count; ++i) {
- StaticVisitor::MarkObject(
- heap, SharedFunctionInfo::cast(literals->get(i))->code());
+ if (code->deoptimization_data() != heap->empty_fixed_array()) {
+ DeoptimizationInputData* const data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ FixedArray* const literals = data->LiteralArray();
+ int const inlined_count = data->InlinedFunctionCount()->value();
+ for (int i = 0; i < inlined_count; ++i) {
+ StaticVisitor::MarkObject(
+ heap, SharedFunctionInfo::cast(literals->get(i))->code());
+ }
}
}
-inline static bool IsValidNonBuiltinContext(Object* context) {
- return context->IsContext() &&
- !Context::cast(context)->global_object()->IsJSBuiltinsObject();
+inline static bool HasValidNonBuiltinContext(JSFunction* function) {
+ return function->context()->IsContext() && !function->shared()->IsBuiltin();
}
@@ -693,7 +695,7 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
}
// The function must have a valid context and not be a builtin.
- if (!IsValidNonBuiltinContext(function->context())) {
+ if (!HasValidNonBuiltinContext(function)) {
return false;
}
@@ -803,42 +805,20 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
- Heap* heap, HeapObject* object) {
- Object** start_slot =
- HeapObject::RawField(object, JSFunction::kPropertiesOffset);
- Object** end_slot =
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
-
- VisitCodeEntry(heap, object,
- object->address() + JSFunction::kCodeEntryOffset);
- STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
- JSFunction::kPrototypeOrInitialMapOffset);
-
- start_slot =
- HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
- end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
+ Map* map, HeapObject* object) {
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ JSFunction::BodyDescriptorStrongCode,
+ void> JSFunctionStrongCodeBodyVisitor;
+ JSFunctionStrongCodeBodyVisitor::Visit(map, object);
}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
- Heap* heap, HeapObject* object) {
- Object** start_slot =
- HeapObject::RawField(object, JSFunction::kPropertiesOffset);
- Object** end_slot =
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
-
- // Skip visiting kCodeEntryOffset as it is treated weakly here.
- STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
- JSFunction::kPrototypeOrInitialMapOffset);
-
- start_slot =
- HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
- end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
+ Map* map, HeapObject* object) {
+ typedef FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
+ void> JSFunctionWeakCodeBodyVisitor;
+ JSFunctionWeakCodeBodyVisitor::Visit(map, object);
}
@@ -902,7 +882,7 @@ void Code::CodeIterateBody(Heap* heap) {
it.rinfo()->template Visit<StaticVisitor>(heap);
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_OBJECTS_VISITING_INL_H_
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 902a96a644..3d6cb73095 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -133,7 +133,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_ARRAY_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
@@ -232,7 +231,6 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
JSObject::BodyDescriptor::IterateBody(this, object_size, v);
break;
@@ -240,8 +238,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
JSArrayBuffer::JSArrayBufferIterateBody(this, v);
break;
case JS_FUNCTION_TYPE:
- reinterpret_cast<JSFunction*>(this)
- ->JSFunctionIterateBody(object_size, v);
+ JSFunction::BodyDescriptor::IterateBody(this, object_size, v);
break;
case ODDBALL_TYPE:
Oddball::BodyDescriptor::IterateBody(this, v);
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 1eba88731b..787410d76f 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -188,79 +188,34 @@ class VisitorDispatchTable {
};
-template <typename StaticVisitor>
-class BodyVisitorBase : public AllStatic {
- public:
- INLINE(static void IteratePointers(Heap* heap, HeapObject* object,
- int start_offset, int end_offset)) {
- DCHECK(!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout());
- IterateRawPointers(heap, object, start_offset, end_offset);
- }
-
- INLINE(static void IterateBody(Heap* heap, HeapObject* object,
- int start_offset, int end_offset)) {
- if (!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout()) {
- IterateRawPointers(heap, object, start_offset, end_offset);
- } else {
- IterateBodyUsingLayoutDescriptor(heap, object, start_offset, end_offset);
- }
- }
-
- private:
- INLINE(static void IterateRawPointers(Heap* heap, HeapObject* object,
- int start_offset, int end_offset)) {
- StaticVisitor::VisitPointers(heap, object,
- HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- }
-
- static void IterateBodyUsingLayoutDescriptor(Heap* heap, HeapObject* object,
- int start_offset,
- int end_offset) {
- DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(start_offset, kPointerSize) &&
- IsAligned(end_offset, kPointerSize));
-
- LayoutDescriptorHelper helper(object->map());
- DCHECK(!helper.all_fields_tagged());
- for (int offset = start_offset; offset < end_offset;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
- IterateRawPointers(heap, object, offset, end_of_region_offset);
- }
- offset = end_of_region_offset;
- }
- }
-};
-
-
template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+class FlexibleBodyVisitor : public AllStatic {
public:
INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
int object_size = BodyDescriptor::SizeOf(map, object);
- BodyVisitorBase<StaticVisitor>::IterateBody(
- map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+ BodyDescriptor::template IterateBody<StaticVisitor>(object, object_size);
return static_cast<ReturnType>(object_size);
}
+ // This specialization is only suitable for objects containing pointer fields.
template <int object_size>
static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
DCHECK(BodyDescriptor::SizeOf(map, object) == object_size);
- BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+ DCHECK(!FLAG_unbox_double_fields || map->HasFastPointerLayout());
+ StaticVisitor::VisitPointers(
+ object->GetHeap(), object,
+ HeapObject::RawField(object, BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, object_size));
return static_cast<ReturnType>(object_size);
}
};
template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+class FixedBodyVisitor : public AllStatic {
public:
INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
- BodyVisitorBase<StaticVisitor>::IterateBody(map->GetHeap(), object,
- BodyDescriptor::kStartOffset,
- BodyDescriptor::kEndOffset);
+ BodyDescriptor::template IterateBody<StaticVisitor>(object);
return static_cast<ReturnType>(BodyDescriptor::kSize);
}
};
@@ -296,22 +251,15 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
}
- private:
- INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
- Heap* heap = map->GetHeap();
- VisitPointers(heap, object,
- HeapObject::RawField(object, JSFunction::kPropertiesOffset),
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
-
- // Don't visit code entry. We are using this visitor only during scavenges.
-
- VisitPointers(
- heap, object, HeapObject::RawField(
- object, JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
- return JSFunction::kSize;
+ // Although we are using the JSFunction body descriptor which does not
+ // visit the code entry, compiler wants it to be accessible.
+ // See JSFunction::BodyDescriptorImpl.
+ INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
+ Address entry_address)) {
+ UNREACHABLE();
}
+ private:
INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
@@ -340,7 +288,7 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
}
INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
- return FreeSpace::cast(object)->Size();
+ return FreeSpace::cast(object)->size();
}
INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
@@ -415,10 +363,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Skip the weak next code link in a code object.
INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {}
- // Mark non-optimize code for functions inlined into the given optimized
- // code. This will prevent it from being flushed.
- static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
-
protected:
INLINE(static void VisitMap(Map* map, HeapObject* object));
INLINE(static void VisitCode(Map* map, HeapObject* object));
@@ -442,6 +386,10 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// references, possibly treating some entries weak.
static void MarkOptimizedCodeMap(Heap* heap, FixedArray* code_map);
+ // Mark non-optimized code for functions inlined into the given optimized
+ // code. This will prevent it from being flushed.
+ static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
+
// Code flushing support.
INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
@@ -450,8 +398,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// references to code objects either strongly or weakly.
static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
- static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object);
- static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object);
+ static void VisitJSFunctionStrongCode(Map* map, HeapObject* object);
+ static void VisitJSFunctionWeakCode(Map* map, HeapObject* object);
class DataObjectVisitor {
public:
@@ -491,7 +439,7 @@ class WeakObjectRetainer;
// access the next-element pointers.
template <class T>
Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_OBJECTS_VISITING_H_
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index c3804436fb..faf90face5 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -112,5 +112,5 @@ void ScavengeJob::ScheduleIdleTask(Heap* heap) {
}
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index c9e508ec52..56299a154b 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -74,7 +74,7 @@ class ScavengeJob {
bool idle_task_rescheduled_;
int bytes_allocated_since_the_last_task_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_SCAVENGE_JOB_H_
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 74ed665c3f..31f1ee55b7 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -216,7 +216,7 @@ class ScavengingVisitor : public StaticVisitorBase {
template <ObjectContents object_contents, AllocationAlignment alignment>
static inline void EvacuateObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
- SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
SLOW_DCHECK(object->Size() == object_size);
Heap* heap = map->GetHeap();
@@ -236,7 +236,7 @@ class ScavengingVisitor : public StaticVisitorBase {
// If promotion failed, we try to copy the object to the other semi-space
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
- UNREACHABLE();
+ FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 44da98c86c..b180879db2 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -48,8 +48,8 @@ class ScavengeVisitor : public ObjectVisitor {
public:
explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointer(Object** p);
- void VisitPointers(Object** start, Object** end);
+ void VisitPointer(Object** p) override;
+ void VisitPointers(Object** start, Object** end) override;
private:
inline void ScavengePointer(Object** p);
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index a12ed6f296..76011768fa 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -133,7 +133,12 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
}
if (!obj->IsFiller()) {
- DCHECK_OBJECT_SIZE(obj_size);
+ if (obj->IsCode()) {
+ DCHECK_EQ(space_, space_->heap()->code_space());
+ DCHECK_CODEOBJECT_SIZE(obj_size, space_);
+ } else {
+ DCHECK_OBJECT_SIZE(obj_size);
+ }
return obj;
}
}
@@ -188,7 +193,7 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
page->mutex_ = new base::Mutex();
- DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
+ DCHECK(page->area_size() <= kAllocatableMemory);
DCHECK(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
@@ -446,7 +451,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
Address top = allocation_info_.top();
- if (allocation_info_.limit() - top < size_in_bytes) {
+ if (allocation_info_.limit() < top + size_in_bytes) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
return AllocationResult::Retry();
@@ -487,7 +492,7 @@ intptr_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_SPACES_INL_H_
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index cd8a72951c..a5e2760bb0 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -924,7 +924,7 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
- static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+ static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
}
chunk->IncrementLiveBytes(by);
}
@@ -954,7 +954,6 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
: Space(heap, space, executable),
free_list_(this),
- unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
@@ -983,6 +982,101 @@ void PagedSpace::TearDown() {
}
+void PagedSpace::AddMemory(Address start, intptr_t size) {
+ accounting_stats_.ExpandSpace(static_cast<int>(size));
+ Free(start, static_cast<int>(size));
+}
+
+
+FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
+ FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
+ if (free_space != nullptr) {
+ accounting_stats_.DecreaseCapacity(free_space->size());
+ }
+ return free_space;
+}
+
+
+void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
+ int num, intptr_t limit) {
+ DCHECK_GT(num, 0);
+ DCHECK(other != nullptr);
+
+ if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
+
+ EmptyAllocationInfo();
+
+ bool memory_available = true;
+ bool spaces_need_memory = true;
+ FreeSpace* node = nullptr;
+ CompactionSpace* current_space = nullptr;
+ // Iterate over spaces and memory as long as we have memory and there are
+ // spaces in need of some.
+ while (memory_available && spaces_need_memory) {
+ spaces_need_memory = false;
+ // Round-robin over all spaces.
+ for (int i = 0; i < num; i++) {
+ current_space = other[i]->Get(identity());
+ if (current_space->free_list()->Available() < limit) {
+ // Space has not reached its limit. Try to get some memory.
+ spaces_need_memory = true;
+ node = TryRemoveMemory(limit - current_space->free_list()->Available());
+ if (node != nullptr) {
+ CHECK(current_space->identity() == identity());
+ current_space->AddMemory(node->address(), node->size());
+ } else {
+ memory_available = false;
+ break;
+ }
+ }
+ }
+ }
+}
+
+
+void PagedSpace::RefillFreeList() {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ FreeList* free_list = nullptr;
+ if (this == heap()->old_space()) {
+ free_list = collector->free_list_old_space().get();
+ } else if (this == heap()->code_space()) {
+ free_list = collector->free_list_code_space().get();
+ } else if (this == heap()->map_space()) {
+ free_list = collector->free_list_map_space().get();
+ } else {
+ // Any PagedSpace might invoke RefillFreeList. We filter all but our old
+ // generation spaces out.
+ return;
+ }
+ DCHECK(free_list != nullptr);
+ intptr_t added = free_list_.Concatenate(free_list);
+ accounting_stats_.IncreaseCapacity(added);
+}
+
+
+void CompactionSpace::RefillFreeList() {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ FreeList* free_list = nullptr;
+ if (identity() == OLD_SPACE) {
+ free_list = collector->free_list_old_space().get();
+ } else if (identity() == CODE_SPACE) {
+ free_list = collector->free_list_code_space().get();
+ } else {
+ // Compaction spaces only represent old or code space.
+ UNREACHABLE();
+ }
+ DCHECK(free_list != nullptr);
+ intptr_t refilled = 0;
+ while (refilled < kCompactionMemoryWanted) {
+ FreeSpace* node =
+ free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
+ if (node == nullptr) return;
+ refilled += node->size();
+ AddMemory(node->address(), node->size());
+ }
+}
+
+
void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to
@@ -992,29 +1086,33 @@ void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
// Move over the free list. Concatenate makes sure that the source free list
// gets properly reset after moving over all nodes.
- intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
+ intptr_t added = free_list_.Concatenate(other->free_list());
// Moved memory is not recorded as allocated memory, but rather increases and
- // decreases capacity of the corresponding spaces. Used size and waste size
- // are maintained by the receiving space upon allocating and freeing blocks.
- other->accounting_stats_.DecreaseCapacity(freed_bytes);
- accounting_stats_.IncreaseCapacity(freed_bytes);
+ // decreases capacity of the corresponding spaces.
+ other->accounting_stats_.DecreaseCapacity(added);
+ accounting_stats_.IncreaseCapacity(added);
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Unmerged fields:
// area_size_
- // allocation_info_
- // end_of_unswept_pages_
- // unswept_free_bytes_
// anchor_
MoveOverFreeMemory(other);
// Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_);
- other->accounting_stats_.Reset();
+ other->accounting_stats_.Clear();
+
+ // The linear allocation area of {other} should be destroyed now.
+ DCHECK(other->top() == nullptr);
+ DCHECK(other->limit() == nullptr);
+
+ DCHECK(other->end_of_unswept_pages_ == nullptr);
+
+ AccountCommitted(other->CommittedMemory());
// Move over pages.
PageIterator it(other);
@@ -1094,6 +1192,8 @@ bool PagedSpace::Expand() {
executable());
if (p == NULL) return false;
+ AccountCommitted(static_cast<intptr_t>(p->size()));
+
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
@@ -1138,8 +1238,6 @@ void PagedSpace::ReleasePage(Page* page) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
DCHECK_EQ(AreaSize(), static_cast<int>(size));
- } else {
- DecreaseUnsweptFreeBytes(page);
}
if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
@@ -1160,6 +1258,7 @@ void PagedSpace::ReleasePage(Page* page) {
page->Unlink();
}
+ AccountUncommitted(static_cast<intptr_t>(page->size()));
heap()->QueueMemoryChunkForFree(page);
DCHECK(Capacity() > 0);
@@ -1410,7 +1509,7 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
allocation_info_.set_limit(Min(new_top, high));
- } else if (inline_allocation_limit_step_ == 0) {
+ } else if (top_on_previous_step_ == 0) {
// Normal limit is the end of the current page.
allocation_info_.set_limit(to_space_.page_high());
} else {
@@ -1502,12 +1601,40 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
+void NewSpace::UpdateInlineAllocationLimitStep() {
+ intptr_t step = 0;
+ for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
+ InlineAllocationObserver* observer = inline_allocation_observers_[i];
+ step = step ? Min(step, observer->step_size()) : observer->step_size();
+ }
+ inline_allocation_limit_step_ = step;
+ top_on_previous_step_ = step ? allocation_info_.top() : 0;
+ UpdateInlineAllocationLimit(0);
+}
+
+
+void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
+ inline_allocation_observers_.Add(observer);
+ UpdateInlineAllocationLimitStep();
+}
+
+
+void NewSpace::RemoveInlineAllocationObserver(
+ InlineAllocationObserver* observer) {
+ bool removed = inline_allocation_observers_.RemoveElement(observer);
+ // Only used in assertion. Suppress unused variable warning.
+ static_cast<void>(removed);
+ DCHECK(removed);
+ UpdateInlineAllocationLimitStep();
+}
+
+
void NewSpace::InlineAllocationStep(Address top, Address new_top) {
if (top_on_previous_step_) {
int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
- heap()->ScheduleIdleScavengeIfNeeded(bytes_allocated);
- heap()->incremental_marking()->Step(bytes_allocated,
- IncrementalMarking::GC_VIA_STACK_GUARD);
+ for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
+ inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated);
+ }
top_on_previous_step_ = new_top;
}
}
@@ -1586,7 +1713,6 @@ void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
total_capacity_ = initial_capacity;
target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
- maximum_committed_ = 0;
committed_ = false;
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
@@ -1609,6 +1735,7 @@ bool SemiSpace::Commit() {
start_, total_capacity_, executable())) {
return false;
}
+ AccountCommitted(total_capacity_);
NewSpacePage* current = anchor();
for (int i = 0; i < pages; i++) {
@@ -1632,6 +1759,8 @@ bool SemiSpace::Uncommit() {
total_capacity_)) {
return false;
}
+ AccountUncommitted(total_capacity_);
+
anchor()->set_next_page(anchor());
anchor()->set_prev_page(anchor());
@@ -1668,6 +1797,7 @@ bool SemiSpace::GrowTo(int new_capacity) {
start_ + total_capacity_, delta, executable())) {
return false;
}
+ AccountCommitted(static_cast<intptr_t>(delta));
SetCapacity(new_capacity);
NewSpacePage* last_page = anchor()->prev_page();
DCHECK(last_page != anchor());
@@ -1698,6 +1828,7 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
return false;
}
+ AccountUncommitted(static_cast<intptr_t>(delta));
int pages_after = new_capacity / Page::kPageSize;
NewSpacePage* new_last_page =
@@ -1783,9 +1914,6 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
void SemiSpace::SetCapacity(int new_capacity) {
total_capacity_ = new_capacity;
- if (total_capacity_ > maximum_committed_) {
- maximum_committed_ = total_capacity_;
- }
}
@@ -2066,12 +2194,6 @@ size_t NewSpace::CommittedPhysicalMemory() {
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
if (category->top() != NULL) {
- // This is safe (not going to deadlock) since Concatenate operations
- // are never performed on the same free lists at the same time in
- // reverse order. Furthermore, we only lock if the PagedSpace containing
- // the free list is know to be globally available, i.e., not local.
- if (!this->owner()->owner()->is_local()) mutex()->Lock();
- if (!category->owner()->owner()->is_local()) category->mutex()->Lock();
DCHECK(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
@@ -2080,40 +2202,46 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
category->end()->set_next(top());
}
set_top(category->top());
- base::NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
- if (!category->owner()->owner()->is_local()) category->mutex()->Unlock();
- if (!this->owner()->owner()->is_local()) mutex()->Unlock();
}
return free_bytes;
}
void FreeListCategory::Reset() {
- set_top(NULL);
- set_end(NULL);
- set_available(0);
+ set_top(nullptr);
+ set_end(nullptr);
+ available_ = 0;
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
- int sum = 0;
- FreeSpace* t = top();
- FreeSpace** n = &t;
- while (*n != NULL) {
- if (Page::FromAddress((*n)->address()) == p) {
- FreeSpace* free_space = *n;
- sum += free_space->Size();
- *n = (*n)->next();
- } else {
- n = (*n)->next_address();
+ intptr_t sum = 0;
+ FreeSpace* prev_node = nullptr;
+ for (FreeSpace* cur_node = top(); cur_node != nullptr;
+ cur_node = cur_node->next()) {
+ Page* page_for_node = Page::FromAddress(cur_node->address());
+ if (page_for_node == p) {
+ // FreeSpace node on eviction page found, unlink it.
+ int size = cur_node->size();
+ sum += size;
+ DCHECK((prev_node != nullptr) || (top() == cur_node));
+ if (cur_node == top()) {
+ set_top(cur_node->next());
+ }
+ if (cur_node == end()) {
+ set_end(prev_node);
+ }
+ if (prev_node != nullptr) {
+ prev_node->set_next(cur_node->next());
+ }
+ continue;
}
+ prev_node = cur_node;
}
- set_top(t);
- if (top() == NULL) {
- set_end(NULL);
- }
+ DCHECK_EQ(p->available_in_free_list(type_), sum);
+ p->add_available_in_free_list(type_, -sum);
available_ -= sum;
return sum;
}
@@ -2131,25 +2259,25 @@ bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
FreeSpace* node = top();
+ if (node == nullptr) return nullptr;
- if (node == NULL) return NULL;
-
- while (node != NULL &&
- Page::FromAddress(node->address())->IsEvacuationCandidate()) {
- available_ -= node->Size();
+ Page* page = Page::FromAddress(node->address());
+ while ((node != nullptr) && page->IsEvacuationCandidate()) {
+ available_ -= node->size();
+ page->add_available_in_free_list(type_, -(node->Size()));
node = node->next();
}
- if (node != NULL) {
+ if (node != nullptr) {
set_top(node->next());
*node_size = node->Size();
available_ -= *node_size;
} else {
- set_top(NULL);
+ set_top(nullptr);
}
- if (top() == NULL) {
- set_end(NULL);
+ if (top() == nullptr) {
+ set_end(nullptr);
}
return node;
@@ -2159,15 +2287,52 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
int* node_size) {
FreeSpace* node = PickNodeFromList(node_size);
- if (node != NULL && *node_size < size_in_bytes) {
+ if ((node != nullptr) && (*node_size < size_in_bytes)) {
Free(node, *node_size);
*node_size = 0;
- return NULL;
+ return nullptr;
}
return node;
}
+FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
+ int* node_size) {
+ FreeSpace* prev_non_evac_node = nullptr;
+ for (FreeSpace* cur_node = top(); cur_node != nullptr;
+ cur_node = cur_node->next()) {
+ int size = cur_node->size();
+ Page* page_for_node = Page::FromAddress(cur_node->address());
+
+ if ((size >= size_in_bytes) || page_for_node->IsEvacuationCandidate()) {
+ // The node is either large enough or contained in an evacuation
+ // candidate. In both cases we need to unlink it from the list.
+ available_ -= size;
+ if (cur_node == top()) {
+ set_top(cur_node->next());
+ }
+ if (cur_node == end()) {
+ set_end(prev_non_evac_node);
+ }
+ if (prev_non_evac_node != nullptr) {
+ prev_non_evac_node->set_next(cur_node->next());
+ }
+ // For evacuation candidates we continue.
+ if (page_for_node->IsEvacuationCandidate()) {
+ page_for_node->add_available_in_free_list(type_, -size);
+ continue;
+ }
+ // Otherwise we have a large enough node and can return.
+ *node_size = size;
+ return cur_node;
+ }
+
+ prev_non_evac_node = cur_node;
+ }
+ return nullptr;
+}
+
+
void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
free_space->set_next(top());
set_top(free_space);
@@ -2194,22 +2359,38 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
FreeList::FreeList(PagedSpace* owner)
: owner_(owner),
- heap_(owner->heap()),
- small_list_(this),
- medium_list_(this),
- large_list_(this),
- huge_list_(this) {
+ wasted_bytes_(0),
+ small_list_(this, kSmall),
+ medium_list_(this, kMedium),
+ large_list_(this, kLarge),
+ huge_list_(this, kHuge) {
Reset();
}
-intptr_t FreeList::Concatenate(FreeList* free_list) {
- intptr_t free_bytes = 0;
- free_bytes += small_list_.Concatenate(free_list->small_list());
- free_bytes += medium_list_.Concatenate(free_list->medium_list());
- free_bytes += large_list_.Concatenate(free_list->large_list());
- free_bytes += huge_list_.Concatenate(free_list->huge_list());
- return free_bytes;
+intptr_t FreeList::Concatenate(FreeList* other) {
+ intptr_t usable_bytes = 0;
+ intptr_t wasted_bytes = 0;
+
+ // This is safe (not going to deadlock) since Concatenate operations
+ // are never performed on the same free lists at the same time in
+ // reverse order. Furthermore, we only lock if the PagedSpace containing
+ // the free list is know to be globally available, i.e., not local.
+ if (!owner()->is_local()) mutex_.Lock();
+ if (!other->owner()->is_local()) other->mutex()->Lock();
+
+ wasted_bytes = other->wasted_bytes_;
+ wasted_bytes_ += wasted_bytes;
+ other->wasted_bytes_ = 0;
+
+ usable_bytes += small_list_.Concatenate(other->GetFreeListCategory(kSmall));
+ usable_bytes += medium_list_.Concatenate(other->GetFreeListCategory(kMedium));
+ usable_bytes += large_list_.Concatenate(other->GetFreeListCategory(kLarge));
+ usable_bytes += huge_list_.Concatenate(other->GetFreeListCategory(kHuge));
+
+ if (!other->owner()->is_local()) other->mutex()->Unlock();
+ if (!owner()->is_local()) mutex_.Unlock();
+ return usable_bytes + wasted_bytes;
}
@@ -2218,19 +2399,21 @@ void FreeList::Reset() {
medium_list_.Reset();
large_list_.Reset();
huge_list_.Reset();
+ ResetStats();
}
int FreeList::Free(Address start, int size_in_bytes) {
if (size_in_bytes == 0) return 0;
- heap_->CreateFillerObjectAt(start, size_in_bytes);
+ owner()->heap()->CreateFillerObjectAt(start, size_in_bytes);
Page* page = Page::FromAddress(start);
// Early return to drop too-small blocks on the floor.
if (size_in_bytes <= kSmallListMin) {
page->add_non_available_small_blocks(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
return size_in_bytes;
}
@@ -2251,89 +2434,46 @@ int FreeList::Free(Address start, int size_in_bytes) {
page->add_available_in_huge_free_list(size_in_bytes);
}
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
return 0;
}
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
+ FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
+ if (node != nullptr) {
+ Page::FromAddress(node->address())
+ ->add_available_in_free_list(category, -(*node_size));
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ }
+ return node;
+}
+
+
FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
- FreeSpace* node = NULL;
- Page* page = NULL;
+ FreeSpace* node = nullptr;
+ Page* page = nullptr;
if (size_in_bytes <= kSmallAllocationMax) {
- node = small_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_small_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
- return node;
- }
+ node = FindNodeIn(kSmall, node_size);
+ if (node != nullptr) return node;
}
if (size_in_bytes <= kMediumAllocationMax) {
- node = medium_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_medium_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
- return node;
- }
+ node = FindNodeIn(kMedium, node_size);
+ if (node != nullptr) return node;
}
if (size_in_bytes <= kLargeAllocationMax) {
- node = large_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_large_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
- return node;
- }
- }
-
- int huge_list_available = huge_list_.available();
- FreeSpace* top_node = huge_list_.top();
- for (FreeSpace** cur = &top_node; *cur != NULL;
- cur = (*cur)->next_address()) {
- FreeSpace* cur_node = *cur;
- while (cur_node != NULL &&
- Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- int size = cur_node->Size();
- huge_list_available -= size;
- page = Page::FromAddress(cur_node->address());
- page->add_available_in_huge_free_list(-size);
- cur_node = cur_node->next();
- }
-
- *cur = cur_node;
- if (cur_node == NULL) {
- huge_list_.set_end(NULL);
- break;
- }
-
- int size = cur_node->Size();
- if (size >= size_in_bytes) {
- // Large enough node found. Unlink it from the list.
- node = *cur;
- *cur = node->next();
- *node_size = size;
- huge_list_available -= size;
- page = Page::FromAddress(node->address());
- page->add_available_in_huge_free_list(-size);
- break;
- }
+ node = FindNodeIn(kLarge, node_size);
+ if (node != nullptr) return node;
}
- huge_list_.set_top(top_node);
- if (huge_list_.top() == NULL) {
- huge_list_.set_end(NULL);
- }
- huge_list_.set_available(huge_list_available);
-
- if (node != NULL) {
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ node = huge_list_.SearchForNodeInList(size_in_bytes, node_size);
+ if (node != nullptr) {
+ page = Page::FromAddress(node->address());
+ page->add_available_in_large_free_list(-(*node_size));
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2360,7 +2500,38 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
}
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+
+FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
+ hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ FreeSpace* node = nullptr;
+ int node_size = 0;
+ // Try to find a node that fits exactly.
+ node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
+ // If no node could be found get as much memory as possible.
+ if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
+ if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
+ if (node != nullptr) {
+ // We round up the size to (kSmallListMin + kPointerSize) to (a) have a
+ // size larger then the minimum size required for FreeSpace, and (b) to get
+ // a block that can actually be freed into some FreeList later on.
+ if (hint_size_in_bytes <= kSmallListMin) {
+ hint_size_in_bytes = kSmallListMin + kPointerSize;
+ }
+ // Give back left overs that were not required by {size_in_bytes}.
+ intptr_t left_over = node_size - hint_size_in_bytes;
+
+ // Do not bother to return anything below {kSmallListMin} as it would be
+ // immediately discarded anyways.
+ if (left_over > kSmallListMin) {
+ Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
+ node->set_size(static_cast<int>(hint_size_in_bytes));
+ }
+ }
return node;
}
@@ -2440,17 +2611,11 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
intptr_t FreeList::EvictFreeListItems(Page* p) {
intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
- p->set_available_in_huge_free_list(0);
-
if (sum < p->area_size()) {
sum += small_list_.EvictFreeListItemsInList(p) +
medium_list_.EvictFreeListItemsInList(p) +
large_list_.EvictFreeListItemsInList(p);
- p->set_available_in_small_free_list(0);
- p->set_available_in_medium_free_list(0);
- p->set_available_in_large_free_list(0);
}
-
return sum;
}
@@ -2484,9 +2649,6 @@ intptr_t FreeListCategory::SumFreeList() {
}
-static const int kVeryLongFreeList = 500;
-
-
int FreeListCategory::FreeListLength() {
int length = 0;
FreeSpace* cur = top();
@@ -2499,12 +2661,14 @@ int FreeListCategory::FreeListLength() {
}
+bool FreeListCategory::IsVeryLong() {
+ return FreeListLength() == kVeryLongFreeList;
+}
+
+
bool FreeList::IsVeryLong() {
- if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
- return false;
+ return small_list_.IsVeryLong() || medium_list_.IsVeryLong() ||
+ large_list_.IsVeryLong() || huge_list_.IsVeryLong();
}
@@ -2529,20 +2693,13 @@ void PagedSpace::PrepareForMarkCompact() {
// on the first allocation after the sweep.
EmptyAllocationInfo();
- // This counter will be increased for pages which will be swept by the
- // sweeper threads.
- unswept_free_bytes_ = 0;
-
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
}
intptr_t PagedSpace::SizeOfObjects() {
- DCHECK(!FLAG_concurrent_sweeping ||
- heap()->mark_compact_collector()->sweeping_in_progress() ||
- (unswept_free_bytes_ == 0));
- const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
+ const intptr_t size = Size() - (limit() - top());
DCHECK_GE(size, 0);
USE(size);
return size;
@@ -2568,7 +2725,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
}
-void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
+void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
if (allocation_info_.top() >= allocation_info_.limit()) return;
if (Page::FromAllocationTop(allocation_info_.top())
@@ -2578,14 +2735,13 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
static_cast<int>(allocation_info_.limit() - allocation_info_.top());
heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.set_top(nullptr);
+ allocation_info_.set_limit(nullptr);
}
}
-HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
- int size_in_bytes) {
+HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
@@ -2595,7 +2751,17 @@ HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
// entries.
return free_list_.Allocate(size_in_bytes);
}
- return NULL;
+ return nullptr;
+}
+
+
+HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ collector->SweepAndRefill(this);
+ return free_list_.Allocate(size_in_bytes);
+ }
+ return nullptr;
}
@@ -2607,22 +2773,17 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
- collector->RefillFreeList(this);
+ RefillFreeList();
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
- int free_chunk = collector->SweepInParallel(this, size_in_bytes);
- collector->RefillFreeList(this);
- if (free_chunk >= size_in_bytes) {
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- // We should be able to allocate an object here since we just freed that
- // much memory.
- DCHECK(object != NULL);
- if (object != NULL) return object;
- }
+ collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes);
+ RefillFreeList();
+ object = free_list_.Allocate(size_in_bytes);
+ if (object != nullptr) return object;
}
// Free list allocation failed and there is no next page. Fail if we have
@@ -2632,21 +2793,21 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
- HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
return object;
}
// Try to expand the space and allocate in the new next page.
if (Expand()) {
DCHECK((CountTotalPages() > 1) ||
- (size_in_bytes <= free_list_.available()));
+ (size_in_bytes <= free_list_.Available()));
return free_list_.Allocate(size_in_bytes);
}
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
- return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ return SweepAndRetryAllocation(size_in_bytes);
}
@@ -2799,11 +2960,10 @@ void PagedSpace::ReportStatistics() {
// -----------------------------------------------------------------------------
// MapSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
+#ifdef VERIFY_HEAP
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
+#endif
// -----------------------------------------------------------------------------
@@ -2843,7 +3003,6 @@ LargeObjectSpace::~LargeObjectSpace() {}
bool LargeObjectSpace::SetUp() {
first_page_ = NULL;
size_ = 0;
- maximum_committed_ = 0;
page_count_ = 0;
objects_size_ = 0;
chunk_map_.Clear();
@@ -2881,15 +3040,12 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
DCHECK(page->area_size() >= object_size);
size_ += static_cast<int>(page->size());
+ AccountCommitted(static_cast<intptr_t>(page->size()));
objects_size_ += object_size;
page_count_++;
page->set_next_page(first_page_);
first_page_ = page;
- if (size_ > maximum_committed_) {
- maximum_committed_ = size_;
- }
-
// Register all MemoryChunk::kAlignment-aligned chunks covered by
// this large page in the chunk map.
uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
@@ -2993,6 +3149,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
heap()->isolate());
size_ -= static_cast<int>(page->size());
+ AccountUncommitted(static_cast<intptr_t>(page->size()));
objects_size_ -= object->Size();
page_count_--;
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 95e3b7c602..df3adebe1d 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -19,6 +19,7 @@
namespace v8 {
namespace internal {
+class CompactionSpaceCollection;
class Isolate;
// -----------------------------------------------------------------------------
@@ -86,6 +87,9 @@ class Isolate;
#define DCHECK_OBJECT_SIZE(size) \
DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
+#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
+ DCHECK((0 < size) && (size <= code_space->AreaSize()))
+
#define DCHECK_PAGE_OFFSET(offset) \
DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
@@ -264,7 +268,7 @@ class Bitmap {
// Clears all bits starting from {cell_base_index} up to and excluding
// {index}. Note that {cell_base_index} is required to be cell aligned.
void ClearRange(uint32_t cell_base_index, uint32_t index) {
- DCHECK_EQ(IndexInCell(cell_base_index), 0);
+ DCHECK_EQ(IndexInCell(cell_base_index), 0u);
DCHECK_GE(index, cell_base_index);
uint32_t start_cell_index = IndexToCell(cell_base_index);
uint32_t end_cell_index = IndexToCell(index);
@@ -323,6 +327,10 @@ class MemoryChunk {
// still has to be performed.
PRE_FREED,
+ // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
+ // has been aborted and needs special handling by the sweeper.
+ COMPACTION_WAS_ABORTED,
+
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
@@ -773,6 +781,9 @@ class MemoryChunk {
};
+enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
+
+
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
@@ -832,11 +843,16 @@ class Page : public MemoryChunk {
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
- // Maximum object size that fits in a page. Objects larger than that size
- // are allocated in large object space and are never moved in memory. This
- // also applies to new space allocation, since objects are never migrated
- // from new space to large object space. Takes double alignment into account.
- static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
+ // Maximum object size that gets allocated into regular pages. Objects larger
+ // than that size are allocated in large object space and are never moved in
+ // memory. This also applies to new space allocation, since objects are never
+ // migrated from new space to large object space. Takes double alignment into
+ // account.
+ // TODO(hpayer): This limit should be way smaller but we currently have
+ // short living objects >256K.
+ static const int kMaxRegularHeapObjectSize = 600 * KB;
+
+ static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
@@ -874,6 +890,42 @@ class Page : public MemoryChunk {
#undef FRAGMENTATION_STATS_ACCESSORS
+ void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) {
+ switch (type) {
+ case kSmall:
+ add_available_in_small_free_list(bytes);
+ break;
+ case kMedium:
+ add_available_in_medium_free_list(bytes);
+ break;
+ case kLarge:
+ add_available_in_large_free_list(bytes);
+ break;
+ case kHuge:
+ add_available_in_huge_free_list(bytes);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ intptr_t available_in_free_list(FreeListCategoryType type) {
+ switch (type) {
+ case kSmall:
+ return available_in_small_free_list();
+ case kMedium:
+ return available_in_medium_free_list();
+ case kLarge:
+ return available_in_large_free_list();
+ case kHuge:
+ return available_in_huge_free_list();
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return 0;
+ }
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -904,7 +956,11 @@ class LargePage : public MemoryChunk {
class Space : public Malloced {
public:
Space(Heap* heap, AllocationSpace id, Executability executable)
- : heap_(heap), id_(id), executable_(executable) {}
+ : heap_(heap),
+ id_(id),
+ executable_(executable),
+ committed_(0),
+ max_committed_(0) {}
virtual ~Space() {}
@@ -916,6 +972,12 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
+ // Return the total amount committed memory for this space, i.e., allocatable
+ // memory and page headers.
+ virtual intptr_t CommittedMemory() { return committed_; }
+
+ virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
+
// Returns allocated size.
virtual intptr_t Size() = 0;
@@ -923,9 +985,6 @@ class Space : public Malloced {
// (e.g. see LargeObjectSpace).
virtual intptr_t SizeOfObjects() { return Size(); }
- // Return the total amount of memory committed for new space.
- virtual intptr_t CommittedMemory() = 0;
-
// Approximate amount of physical memory committed for this space.
virtual size_t CommittedPhysicalMemory() = 0;
@@ -944,10 +1003,29 @@ class Space : public Malloced {
virtual void Print() = 0;
#endif
+ protected:
+ void AccountCommitted(intptr_t bytes) {
+ DCHECK_GE(bytes, 0);
+ committed_ += bytes;
+ if (committed_ > max_committed_) {
+ max_committed_ = committed_;
+ }
+ }
+
+ void AccountUncommitted(intptr_t bytes) {
+ DCHECK_GE(bytes, 0);
+ committed_ -= bytes;
+ DCHECK_GE(committed_, 0);
+ }
+
private:
Heap* heap_;
AllocationSpace id_;
Executability executable_;
+
+ // Keeps track of committed memory in a space.
+ intptr_t committed_;
+ intptr_t max_committed_;
};
@@ -1168,7 +1246,7 @@ class MemoryAllocator {
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
+ return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
}
// Returns an indication of whether a pointer is in a space that has
@@ -1244,7 +1322,7 @@ class MemoryAllocator {
static int PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
- : Page::kMaxRegularHeapObjectSize;
+ : Page::kAllocatableMemory;
}
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
@@ -1344,7 +1422,7 @@ class HeapObjectIterator : public ObjectIterator {
// skipping the special garbage section of which there is one per space.
// Returns NULL when the iteration has ended.
inline HeapObject* Next();
- virtual inline HeapObject* next_object();
+ inline HeapObject* next_object() override;
private:
enum PageMode { kOnePageOnly, kAllPagesInSpace };
@@ -1441,19 +1519,11 @@ class AllocationInfo {
// An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (i.e., not
-// including page bookkeeping structures) currently in the space. The 'size'
-// of a space is the number of allocated bytes, the 'waste' in the space is
-// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (e.g. small blocks due
-// to internal fragmentation, top of page areas in map space), and the bytes
-// 'available' is the number of unallocated bytes that are not waste. The
-// capacity is the sum of size, waste, and available.
//
// The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in
-// conjunction with capacity, or else they always balance increases and
-// decreases to the non-capacity stats.
+// functions increase or decrease one of the non-capacity stats in conjunction
+// with capacity, or else they always balance increases and decreases to the
+// non-capacity stats.
class AllocationStats BASE_EMBEDDED {
public:
AllocationStats() { Clear(); }
@@ -1463,26 +1533,20 @@ class AllocationStats BASE_EMBEDDED {
capacity_ = 0;
max_capacity_ = 0;
size_ = 0;
- waste_ = 0;
}
- void ClearSizeWaste() {
- size_ = capacity_;
- waste_ = 0;
- }
+ void ClearSize() { size_ = capacity_; }
- // Reset the allocation statistics (i.e., available = capacity with no
- // wasted or allocated bytes).
+ // Reset the allocation statistics (i.e., available = capacity with no wasted
+ // or allocated bytes).
void Reset() {
size_ = 0;
- waste_ = 0;
}
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
intptr_t MaxCapacity() { return max_capacity_; }
intptr_t Size() { return size_; }
- intptr_t Waste() { return waste_; }
// Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed,
@@ -1514,20 +1578,13 @@ class AllocationStats BASE_EMBEDDED {
// Free allocated bytes, making them available (size -> available).
void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
- DCHECK(size_ >= 0);
- }
-
- // Waste free bytes (available -> waste).
- void WasteBytes(int size_in_bytes) {
- DCHECK(size_in_bytes >= 0);
- waste_ += size_in_bytes;
+ DCHECK_GE(size_, 0);
}
// Merge {other} into {this}.
void Merge(const AllocationStats& other) {
capacity_ += other.capacity_;
size_ += other.size_;
- waste_ += other.waste_;
if (other.max_capacity_ > max_capacity_) {
max_capacity_ = other.max_capacity_;
}
@@ -1536,127 +1593,119 @@ class AllocationStats BASE_EMBEDDED {
void DecreaseCapacity(intptr_t size_in_bytes) {
capacity_ -= size_in_bytes;
DCHECK_GE(capacity_, 0);
+ DCHECK_GE(capacity_, size_);
}
void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
private:
+ // |capacity_|: The number of object-area bytes (i.e., not including page
+ // bookkeeping structures) currently in the space.
intptr_t capacity_;
+
+ // |max_capacity_|: The maximum capacity ever observed.
intptr_t max_capacity_;
+
+ // |size_|: The number of allocated bytes.
intptr_t size_;
- intptr_t waste_;
};
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-
-// The free list category holds a pointer to the top element and a pointer to
-// the end element of the linked list of free memory blocks.
+// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
- explicit FreeListCategory(FreeList* owner)
- : top_(0), end_(NULL), available_(0), owner_(owner) {}
-
+ explicit FreeListCategory(FreeList* owner, FreeListCategoryType type)
+ : type_(type),
+ top_(nullptr),
+ end_(nullptr),
+ available_(0),
+ owner_(owner) {}
+
+ // Concatenates {category} into {this}.
+ //
+ // Note: Thread-safe.
intptr_t Concatenate(FreeListCategory* category);
void Reset();
void Free(FreeSpace* node, int size_in_bytes);
+ // Pick a node from the list.
FreeSpace* PickNodeFromList(int* node_size);
+
+ // Pick a node from the list and compare it against {size_in_bytes}. If the
+ // node's size is greater or equal return the node and null otherwise.
FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
+ // Search for a node of size {size_in_bytes}.
+ FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
+
intptr_t EvictFreeListItemsInList(Page* p);
bool ContainsPageFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
- FreeSpace* top() const {
- return reinterpret_cast<FreeSpace*>(base::NoBarrier_Load(&top_));
- }
+ bool IsEmpty() { return top() == nullptr; }
- void set_top(FreeSpace* top) {
- base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
- }
-
- FreeSpace* end() const { return end_; }
- void set_end(FreeSpace* end) { end_ = end; }
-
- int* GetAvailableAddress() { return &available_; }
+ FreeList* owner() { return owner_; }
int available() const { return available_; }
- void set_available(int available) { available_ = available; }
-
- base::Mutex* mutex() { return &mutex_; }
-
- bool IsEmpty() { return top() == 0; }
#ifdef DEBUG
intptr_t SumFreeList();
int FreeListLength();
+ bool IsVeryLong();
#endif
- FreeList* owner() { return owner_; }
-
private:
- // top_ points to the top FreeSpace* in the free list category.
- base::AtomicWord top_;
+ // For debug builds we accurately compute free lists lengths up until
+ // {kVeryLongFreeList} by manually walking the list.
+ static const int kVeryLongFreeList = 500;
+
+ FreeSpace* top() { return top_.Value(); }
+ void set_top(FreeSpace* top) { top_.SetValue(top); }
+
+ FreeSpace* end() const { return end_; }
+ void set_end(FreeSpace* end) { end_ = end; }
+
+ // |type_|: The type of this free list category.
+ FreeListCategoryType type_;
+
+ // |top_|: Points to the top FreeSpace* in the free list category.
+ AtomicValue<FreeSpace*> top_;
+
+ // |end_|: Points to the end FreeSpace* in the free list category.
FreeSpace* end_;
- base::Mutex mutex_;
- // Total available bytes in all blocks of this free list category.
+ // |available_|: Total available bytes in all blocks of this free list
+ // category.
int available_;
+ // |owner_|: The owning free list of this category.
FreeList* owner_;
};
-
-// The free list for the old space. The free list is organized in such a way
-// as to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
+// A free list maintaining free blocks of memory. The free list is organized in
+// a way to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which
-// is divided up into rough categories to cut down on waste. Having finer
+// find a new space to allocate from. This is done with the free list, which is
+// divided up into rough categories to cut down on waste. Having finer
// categories would scatter allocation more.
-// The old space free list is organized in categories.
-// 1-31 words: Such small free areas are discarded for efficiency reasons.
-// They can be reclaimed by the compactor. However the distance between top
-// and limit may be this small.
-// 32-255 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 1-31 words in size. These
-// spaces are called small.
-// 256-2047 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 32-255 words in size. These
-// spaces are called medium.
-// 1048-16383 words: There is a list of spaces this large. It is used for top
-// and limit when the object we need to allocate is 256-2047 words in size.
-// These spaces are call large.
-// At least 16384 words. This list is for objects of 2048 words or larger.
-// Empty pages are added to this list. These spaces are called huge.
+// The free list is organized in categories as follows:
+// 1-31 words (too small): Such small free areas are discarded for efficiency
+// reasons. They can be reclaimed by the compactor. However the distance
+// between top and limit may be this small.
+// 32-255 words (small): Used for allocating free space between 1-31 words in
+// size.
+// 256-2047 words (medium): Used for allocating free space between 32-255 words
+// in size.
+// 1048-16383 words (large): Used for allocating free space between 256-2047
+// words in size.
+// At least 16384 words (huge): This list is for objects of 2048 words or
+// larger. Empty pages are also added to this list.
class FreeList {
public:
- explicit FreeList(PagedSpace* owner);
-
- intptr_t Concatenate(FreeList* free_list);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() {
- return small_list_.available() + medium_list_.available() +
- large_list_.available() + huge_list_.available();
- }
-
- // Place a node on the free list. The block of size 'size_in_bytes'
- // starting at 'start' is placed on the free list. The return value is the
- // number of bytes that have been lost due to internal fragmentation by
- // freeing the block. Bookkeeping information will be written to the block,
- // i.e., its contents will be destroyed. The start address should be word
- // aligned, and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes);
-
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
static inline int GuaranteedAllocatable(int maximum_freed) {
@@ -1672,40 +1721,71 @@ class FreeList {
return maximum_freed;
}
- // Allocate a block of size 'size_in_bytes' from the free list. The block
- // is unitialized. A failure is returned if no block is available. The
- // number of bytes lost to fragmentation is returned in the output parameter
- // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
+ explicit FreeList(PagedSpace* owner);
+
+ // The method concatenates {other} into {this} and returns the added bytes,
+ // including waste.
+ //
+ // Note: Thread-safe.
+ intptr_t Concatenate(FreeList* other);
+
+ // Adds a node on the free list. The block of size {size_in_bytes} starting
+ // at {start} is placed on the free list. The return value is the number of
+ // bytes that were not added to the free list, because they freed memory block
+ // was too small. Bookkeeping information will be written to the block, i.e.,
+ // its contents will be destroyed. The start address should be word aligned,
+ // and the size should be a non-zero multiple of the word size.
+ int Free(Address start, int size_in_bytes);
+
+ // Allocate a block of size {size_in_bytes} from the free list. The block is
+ // unitialized. A failure is returned if no block is available. The size
+ // should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+ // Clear the free list.
+ void Reset();
+
+ void ResetStats() { wasted_bytes_ = 0; }
+
+ // Return the number of bytes available on the free list.
+ intptr_t Available() {
+ return small_list_.available() + medium_list_.available() +
+ large_list_.available() + huge_list_.available();
+ }
+
+ // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
+ // size in the free list category exactly matching the size. If no suitable
+ // node could be found, the method falls back to retrieving a {FreeSpace}
+ // from the large or huge free list category.
+ //
+ // Can be used concurrently.
+ MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
+
bool IsEmpty() {
return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
large_list_.IsEmpty() && huge_list_.IsEmpty();
}
-#ifdef DEBUG
- void Zap();
- intptr_t SumFreeLists();
- bool IsVeryLong();
-#endif
-
// Used after booting the VM.
void RepairLists(Heap* heap);
intptr_t EvictFreeListItems(Page* p);
bool ContainsPageFreeListItems(Page* p);
- FreeListCategory* small_list() { return &small_list_; }
- FreeListCategory* medium_list() { return &medium_list_; }
- FreeListCategory* large_list() { return &large_list_; }
- FreeListCategory* huge_list() { return &huge_list_; }
-
PagedSpace* owner() { return owner_; }
+ intptr_t wasted_bytes() { return wasted_bytes_; }
+ base::Mutex* mutex() { return &mutex_; }
+
+#ifdef DEBUG
+ void Zap();
+ intptr_t SumFreeLists();
+ bool IsVeryLong();
+#endif
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
+ static const int kMaxBlockSize = Page::kAllocatableMemory;
static const int kSmallListMin = 0x1f * kPointerSize;
static const int kSmallListMax = 0xff * kPointerSize;
@@ -1716,9 +1796,28 @@ class FreeList {
static const int kLargeAllocationMax = kMediumListMax;
FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+ FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
+
+ FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
+ switch (category) {
+ case kSmall:
+ return &small_list_;
+ case kMedium:
+ return &medium_list_;
+ case kLarge:
+ return &large_list_;
+ case kHuge:
+ return &huge_list_;
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
PagedSpace* owner_;
- Heap* heap_;
+ base::Mutex mutex_;
+ intptr_t wasted_bytes_;
FreeListCategory small_list_;
FreeListCategory medium_list_;
FreeListCategory large_list_;
@@ -1773,10 +1872,12 @@ STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
class PagedSpace : public Space {
public:
+ static const intptr_t kCompactionMemoryWanted = 500 * KB;
+
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
- virtual ~PagedSpace() { TearDown(); }
+ ~PagedSpace() override { TearDown(); }
// Set up the space using the given address range of virtual memory (from
// the memory allocator's initial chunk) if possible. If the block of
@@ -1811,13 +1912,6 @@ class PagedSpace : public Space {
// Current capacity without growing (Size() + Available()).
intptr_t Capacity() { return accounting_stats_.Capacity(); }
- // Total amount of memory committed for this space. For paged
- // spaces this equals the capacity.
- intptr_t CommittedMemory() override { return Capacity(); }
-
- // The maximum amount of memory ever committed for this space.
- intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
-
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -1829,7 +1923,8 @@ class PagedSpace : public Space {
// discovered during the sweeping they are subtracted from the size and added
// to the available and wasted totals.
void ClearStats() {
- accounting_stats_.ClearSizeWaste();
+ accounting_stats_.ClearSize();
+ free_list_.ResetStats();
ResetFreeListStatistics();
}
@@ -1842,7 +1937,7 @@ class PagedSpace : public Space {
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
- intptr_t Available() override { return free_list_.available(); }
+ intptr_t Available() override { return free_list_.Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
@@ -1855,9 +1950,8 @@ class PagedSpace : public Space {
intptr_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
- // due to being too small to use for allocation. They do not include the
- // free bytes that were not found at all due to lazy sweeping.
- virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+ // due to being too small to use for allocation.
+ virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); }
@@ -1896,7 +1990,6 @@ class PagedSpace : public Space {
int Free(Address start, int size_in_bytes) {
int wasted = free_list_.Free(start, size_in_bytes);
accounting_stats_.DeallocateBytes(size_in_bytes);
- accounting_stats_.WasteBytes(wasted);
return size_in_bytes - wasted;
}
@@ -1959,22 +2052,6 @@ class PagedSpace : public Space {
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
}
- void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
-
- void IncreaseUnsweptFreeBytes(Page* p) {
- DCHECK(ShouldBeSweptBySweeperThreads(p));
- unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
- }
-
- void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
-
- void DecreaseUnsweptFreeBytes(Page* p) {
- DCHECK(ShouldBeSweptBySweeperThreads(p));
- unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
- }
-
- void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
-
// This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait
// for the sweeper threads to finish sweeping.
@@ -1988,7 +2065,7 @@ class PagedSpace : public Space {
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
- void EvictEvacuationCandidatesFromFreeLists();
+ void EvictEvacuationCandidatesFromLinearAllocationArea();
bool CanExpand(size_t size);
@@ -1998,15 +2075,26 @@ class PagedSpace : public Space {
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; }
+ virtual bool is_local() { return false; }
+
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
- void MoveOverFreeMemory(PagedSpace* other);
+ void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
+ intptr_t limit = kCompactionMemoryWanted);
- virtual bool is_local() { return false; }
+ // Refills the free list from the corresponding free list filled by the
+ // sweeper.
+ virtual void RefillFreeList();
protected:
+ void AddMemory(Address start, intptr_t size);
+
+ FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
+
+ void MoveOverFreeMemory(PagedSpace* other);
+
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
@@ -2037,7 +2125,7 @@ class PagedSpace : public Space {
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
- MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
+ MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
@@ -2057,10 +2145,6 @@ class PagedSpace : public Space {
// Normal allocation information.
AllocationInfo allocation_info_;
- // The number of free bytes which could be reclaimed by advancing the
- // concurrent sweeper threads.
- intptr_t unswept_free_bytes_;
-
// The sweeper threads iterate over the list of pointer and data space pages
// and sweep these pages concurrently. They will stop sweeping after the
// end_of_unswept_pages_ page.
@@ -2071,6 +2155,9 @@ class PagedSpace : public Space {
friend class MarkCompactCollector;
friend class PageIterator;
+
+ // Used in cctest.
+ friend class HeapTester;
};
@@ -2124,7 +2211,7 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
- static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
+ static const int kAreaSize = Page::kAllocatableMemory;
inline NewSpacePage* next_page() {
return static_cast<NewSpacePage*>(next_chunk());
@@ -2286,11 +2373,6 @@ class SemiSpace : public Space {
intptr_t SizeOfObjects() override { return Size(); }
- intptr_t CommittedMemory() override {
- UNREACHABLE();
- return 0;
- }
-
intptr_t Available() override {
UNREACHABLE();
return 0;
@@ -2335,9 +2417,6 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
- // Returns the maximum amount of memory ever committed by the semi space.
- size_t MaximumCommittedMemory() { return maximum_committed_; }
-
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -2357,8 +2436,6 @@ class SemiSpace : public Space {
int maximum_total_capacity_;
int initial_total_capacity_;
- intptr_t maximum_committed_;
-
// The start address of the space.
Address start_;
// Used to govern object promotion during mark-compact collection.
@@ -2393,7 +2470,7 @@ class SemiSpaceIterator : public ObjectIterator {
inline HeapObject* Next();
// Implementation of the ObjectIterator functions.
- virtual inline HeapObject* next_object();
+ inline HeapObject* next_object() override;
private:
void Initialize(Address start, Address end);
@@ -2432,6 +2509,43 @@ class NewSpacePageIterator BASE_EMBEDDED {
NewSpacePage* last_page_;
};
+// -----------------------------------------------------------------------------
+// Allows observation of inline allocation in the new space.
+class InlineAllocationObserver {
+ public:
+ explicit InlineAllocationObserver(intptr_t step_size)
+ : step_size_(step_size), bytes_to_next_step_(step_size) {
+ DCHECK(step_size >= kPointerSize && (step_size & kHeapObjectTagMask) == 0);
+ }
+ virtual ~InlineAllocationObserver() {}
+
+ private:
+ intptr_t step_size() const { return step_size_; }
+
+ // Pure virtual method provided by the subclasses that gets called when more
+ // than step_size byte have been allocated.
+ virtual void Step(int bytes_allocated) = 0;
+
+ // Called each time the new space does an inline allocation step. This may be
+ // more frequently than the step_size we are monitoring (e.g. when there are
+ // multiple observers, or when page or space boundary is encountered.) The
+ // Step method is only called once more than step_size bytes have been
+ // allocated.
+ void InlineAllocationStep(int bytes_allocated) {
+ bytes_to_next_step_ -= bytes_allocated;
+ if (bytes_to_next_step_ <= 0) {
+ Step(static_cast<int>(step_size_ - bytes_to_next_step_));
+ bytes_to_next_step_ = step_size_;
+ }
+ }
+
+ intptr_t step_size_;
+ intptr_t bytes_to_next_step_;
+
+ friend class NewSpace;
+
+ DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
+};
// -----------------------------------------------------------------------------
// The young generation space.
@@ -2512,16 +2626,15 @@ class NewSpace : public Space {
return to_space_.TotalCapacity();
}
- // Return the total amount of memory committed for new space.
+ // Committed memory for NewSpace is the committed memory of both semi-spaces
+ // combined.
intptr_t CommittedMemory() override {
- if (from_space_.is_committed()) return 2 * Capacity();
- return TotalCapacity();
+ return from_space_.CommittedMemory() + to_space_.CommittedMemory();
}
- // Return the total amount of memory committed for new space.
- intptr_t MaximumCommittedMemory() {
- return to_space_.MaximumCommittedMemory() +
- from_space_.MaximumCommittedMemory();
+ intptr_t MaximumCommittedMemory() override {
+ return from_space_.MaximumCommittedMemory() +
+ to_space_.MaximumCommittedMemory();
}
// Approximate amount of physical memory committed for this space.
@@ -2622,10 +2735,21 @@ class NewSpace : public Space {
void ResetAllocationInfo();
void UpdateInlineAllocationLimit(int size_in_bytes);
- void LowerInlineAllocationLimit(intptr_t step) {
- inline_allocation_limit_step_ = step;
+ void UpdateInlineAllocationLimitStep();
+
+ // Allows observation of inline allocation. The observer->Step() method gets
+ // called after every step_size bytes have been allocated (approximately).
+ // This works by adjusting the allocation limit to a lower value and adjusting
+ // it after each step.
+ void AddInlineAllocationObserver(InlineAllocationObserver* observer);
+
+ // Removes a previously installed observer.
+ void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
+
+ void DisableInlineAllocationSteps() {
+ inline_allocation_limit_step_ = 0;
+ top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
- top_on_previous_step_ = step ? allocation_info_.top() : 0;
}
// Get the extent of the inactive semispace (for use as a marking stack,
@@ -2695,10 +2819,6 @@ class NewSpace : public Space {
bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
- inline intptr_t inline_allocation_limit_step() {
- return inline_allocation_limit_step_;
- }
-
SemiSpace* active_space() { return &to_space_; }
private:
@@ -2724,11 +2844,13 @@ class NewSpace : public Space {
// mark-compact collection.
AllocationInfo allocation_info_;
- // When incremental marking is active we will set allocation_info_.limit
- // to be lower than actual limit and then will gradually increase it
- // in steps to guarantee that we do incremental marking steps even
- // when all allocation is performed from inlined generated code.
+ // When inline allocation stepping is active, either because of incremental
+ // marking or because of idle scavenge, we 'interrupt' inline allocation every
+ // once in a while. This is done by setting allocation_info_.limit to be lower
+ // than the actual limit and and increasing it in steps to guarantee that the
+ // observers are notified periodically.
intptr_t inline_allocation_limit_step_;
+ List<InlineAllocationObserver*> inline_allocation_observers_;
Address top_on_previous_step_;
@@ -2738,8 +2860,7 @@ class NewSpace : public Space {
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
// If we are doing inline allocation in steps, this method performs the 'step'
- // operation. Right now incremental marking is the only consumer of inline
- // allocation steps. top is the memory address of the bump pointer at the last
+ // operation. top is the memory address of the bump pointer at the last
// inline allocation (i.e. it determines the numbers of bytes actually
// allocated since the last step.) new_top is the address of the bump pointer
// where the next byte is going to be allocated from. top and new_top may be
@@ -2763,11 +2884,16 @@ class CompactionSpace : public PagedSpace {
Free(start, size_in_bytes);
}
- virtual bool is_local() { return true; }
+ bool is_local() override { return true; }
+
+ void RefillFreeList() override;
protected:
// The space is temporary and not included in any snapshots.
- virtual bool snapshotable() { return false; }
+ bool snapshotable() override { return false; }
+
+ MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
+ int size_in_bytes) override;
};
@@ -2776,7 +2902,9 @@ class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
- code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
+ code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
+ duration_(0.0),
+ bytes_compacted_(0) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
@@ -2791,9 +2919,21 @@ class CompactionSpaceCollection : public Malloced {
return nullptr;
}
+ void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
+ duration_ += duration;
+ bytes_compacted_ += bytes_compacted;
+ }
+
+ double duration() const { return duration_; }
+ intptr_t bytes_compacted() const { return bytes_compacted_; }
+
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
+
+ // Book keeping.
+ double duration_;
+ intptr_t bytes_compacted_;
};
@@ -2831,7 +2971,7 @@ class MapSpace : public PagedSpace {
// TODO(1600): this limit is artifical just to keep code compilable
static const int kMaxMapPageIndex = 1 << 16;
- virtual int RoundSizeDownToObjectAlignment(int size) {
+ int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo32(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
@@ -2839,11 +2979,12 @@ class MapSpace : public PagedSpace {
}
}
- protected:
- virtual void VerifyObject(HeapObject* obj);
+#ifdef VERIFY_HEAP
+ void VerifyObject(HeapObject* obj) override;
+#endif
private:
- static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
+ static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
// Do map space compaction if there is a page gap.
int CompactionThreshold() {
@@ -2889,10 +3030,6 @@ class LargeObjectSpace : public Space {
intptr_t SizeOfObjects() override { return objects_size_; }
- intptr_t MaximumCommittedMemory() { return maximum_committed_; }
-
- intptr_t CommittedMemory() override { return Size(); }
-
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -2935,7 +3072,6 @@ class LargeObjectSpace : public Space {
bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
private:
- intptr_t maximum_committed_;
// The head of the linked list of large object chunks.
LargePage* first_page_;
intptr_t size_; // allocated bytes
@@ -2994,7 +3130,7 @@ struct CommentStatistic {
static const int kMaxComments = 64;
};
#endif
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_SPACES_H_
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
index 1f3dda21d2..e11ad87087 100644
--- a/deps/v8/src/heap/store-buffer-inl.h
+++ b/deps/v8/src/heap/store-buffer-inl.h
@@ -48,7 +48,7 @@ void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
}
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 37a78eb075..08dcebfc08 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -227,7 +227,7 @@ class DontMoveStoreBufferEntriesScope {
StoreBuffer* store_buffer_;
bool stored_state_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STORE_BUFFER_H_
diff --git a/deps/v8/src/i18n.h b/deps/v8/src/i18n.h
index ea8380baa7..a8db4d18a6 100644
--- a/deps/v8/src/i18n.h
+++ b/deps/v8/src/i18n.h
@@ -128,6 +128,7 @@ class BreakIterator {
BreakIterator();
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_I18N_H_
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 17ae01ad53..6c9c538cc9 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -559,6 +559,7 @@ Operand::Operand(Immediate imm) {
set_modrm(0, ebp);
set_dispr(imm.x_, imm.rmode_);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 9f64a6005f..59d0025939 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -1307,6 +1307,14 @@ void Assembler::bsr(Register dst, const Operand& src) {
}
+void Assembler::bsf(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBC);
+ emit_operand(dst, src);
+}
+
+
void Assembler::hlt() {
EnsureSpace ensure_space(this);
EMIT(0xF4);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 57987bc751..15092951d7 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -40,12 +40,49 @@
#include <deque>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/isolate.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+#define GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(ebx) \
+ V(esp) \
+ V(ebp) \
+ V(esi) \
+ V(edi)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(ebx) \
+ V(esi) \
+ V(edi)
+
+#define DOUBLE_REGISTERS(V) \
+ V(xmm0) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7)
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -68,151 +105,86 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
- static const int kMaxNumAllocatableRegisters = 6;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 8;
-
- static inline const char* AllocationIndexToString(int index);
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static inline int ToAllocationIndex(Register reg);
-
- static inline Register FromAllocationIndex(int index);
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
DCHECK(code >= 0);
DCHECK(code < kNumRegisters);
- Register r = { code };
+ Register r = {code};
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // eax, ebx, ecx and edx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
+ bool is_byte_register() const { return reg_code <= 3; }
+
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-const int kRegister_eax_Code = 0;
-const int kRegister_ecx_Code = 1;
-const int kRegister_edx_Code = 2;
-const int kRegister_ebx_Code = 3;
-const int kRegister_esp_Code = 4;
-const int kRegister_ebp_Code = 5;
-const int kRegister_esi_Code = 6;
-const int kRegister_edi_Code = 7;
-const int kRegister_no_reg_Code = -1;
-
-const Register eax = { kRegister_eax_Code };
-const Register ecx = { kRegister_ecx_Code };
-const Register edx = { kRegister_edx_Code };
-const Register ebx = { kRegister_ebx_Code };
-const Register esp = { kRegister_esp_Code };
-const Register ebp = { kRegister_ebp_Code };
-const Register esi = { kRegister_esi_Code };
-const Register edi = { kRegister_edi_Code };
-const Register no_reg = { kRegister_no_reg_Code };
-
-
-inline const char* Register::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- // This is the mapping of allocation indices to registers.
- const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
- return kNames[index];
-}
-
-
-inline int Register::ToAllocationIndex(Register reg) {
- DCHECK(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
- return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
-}
-
-
-inline Register Register::FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return (index >= 4) ? from_code(index + 2) : from_code(index);
-}
-
-struct XMMRegister {
- static const int kMaxNumAllocatableRegisters = 7;
- static const int kMaxNumRegisters = 8;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
- // TODO(turbofan): Proper support for float32.
- static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
- static int ToAllocationIndex(XMMRegister reg) {
- DCHECK(reg.code() != 0);
- return reg.code() - 1;
- }
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static XMMRegister FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index + 1);
- }
+ static const int kMaxNumRegisters = Code::kAfterLast;
- static XMMRegister from_code(int code) {
- XMMRegister result = { code };
+ static DoubleRegister from_code(int code) {
+ DoubleRegister result = {code};
return result;
}
- bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumRegisters;
- }
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7"
- };
- return names[index];
- }
+ const char* ToString();
- int code_;
+ int reg_code;
};
+#define DECLARE_REGISTER(R) \
+ const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-typedef XMMRegister DoubleRegister;
-
-
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-const XMMRegister no_xmm_reg = { -1 };
-
+typedef DoubleRegister XMMRegister;
enum Condition {
// any value < 0 is considered no_condition
@@ -807,6 +779,8 @@ class Assembler : public AssemblerBase {
void bts(const Operand& dst, Register src);
void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
void bsr(Register dst, const Operand& src);
+ void bsf(Register dst, Register src) { bsf(dst, Operand(src)); }
+ void bsf(Register dst, const Operand& src);
// Miscellaneous
void hlt();
@@ -1595,6 +1569,7 @@ class EnsureSpace BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_ASSEMBLER_IA32_H_
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index ccdd01c7a3..4da4cb1db2 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -22,12 +22,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- eax : number of arguments excluding receiver
- // -- edi : called function (only guaranteed when
- // extra_args requires it)
+ // (only guaranteed when the called function
+ // is not marked as DontAdaptArguments)
+ // -- edi : called function
// -- esp[0] : return address
// -- esp[4] : last argument
// -- ...
- // -- esp[4 * argc] : first argument (argc == eax)
+ // -- esp[4 * argc] : first argument
// -- esp[4 * (argc +1)] : receiver
// -----------------------------------
__ AssertFunction(edi);
@@ -52,8 +53,22 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
// JumpToExternalReference expects eax to contain the number of arguments
- // including the receiver and the extra arguments.
+ // including the receiver and the extra arguments. But eax is only valid
+ // if the called function is marked as DontAdaptArguments, otherwise we
+ // need to load the argument count from the SharedFunctionInfo.
+ Label argc, done_argc;
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(ebx);
+ __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ j(equal, &argc, Label::kNear);
+ __ lea(eax, Operand(ebx, num_extra_args + 1));
+ __ jmp(&done_argc, Label::kNear);
+ __ bind(&argc);
__ add(eax, Immediate(num_extra_args + 1));
+ __ bind(&done_argc);
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -136,14 +151,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
__ j(not_equal, &rt_call);
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(edx, edi);
+ // Verify that the original constructor is a JSFunction.
+ __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &rt_call);
- // Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // edx: original constructor
+ __ mov(eax, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
__ JumpIfSmi(eax, &rt_call);
// edi: constructor
@@ -151,6 +165,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpObjectType(eax, MAP_TYPE, ebx);
__ j(not_equal, &rt_call);
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &rt_call);
+
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the
// initial map's instance type would be JS_FUNCTION_TYPE.
@@ -179,7 +198,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(edx);
__ push(edi);
- __ push(edi); // constructor
+ __ push(eax); // initial map
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ pop(edi);
@@ -265,8 +284,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// runtime.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ mov(edi, Operand(esp, offset));
- __ push(edi); // argument 2/1: constructor function
- __ push(edx); // argument 3/2: original constructor
+ __ push(edi); // constructor function
+ __ push(edx); // original constructor
__ CallRuntime(Runtime::kNewObject, 2);
__ mov(ebx, eax); // store result in ebx
@@ -627,21 +646,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
@@ -650,7 +655,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
+ __ push(kInterpreterBytecodeArrayRegister);
__ CallRuntime(Runtime::kStackGuard, 0);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -665,24 +672,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Since the dispatch table root might be set after builtins are generated,
// load directly from the roots table.
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ add(kInterpreterDispatchTableRegister,
- Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+ __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
// Push context as a stack located parameter to the bytecode handler.
- DCHECK_EQ(-1, kInterpreterContextSpillSlot);
- __ push(esi);
+ DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+ __ push(ebx);
// Dispatch to the first bytecode handler for the function.
- __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(esi, Operand(kInterpreterDispatchTableRegister, esi,
- times_pointer_size, 0));
+ __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
+ // Restore undefined_value in accumulator (eax)
+ // TODO(rmcilroy): Remove this once we move the dispatch table back into a
+ // register.
+ __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
- __ add(esi, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(esi);
+ __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(ebx);
}
@@ -708,13 +716,99 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register array_limit) {
+ // ----------- S t a t e -------------
+ // -- ebx : Pointer to the last argument in the args array.
+ // -- array_limit : Pointer to one before the first argument in the
+ // args array.
+ // -----------------------------------
+ Label loop_header, loop_check;
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ __ Push(Operand(ebx, 0));
+ __ sub(ebx, Immediate(kPointerSize));
+ __ bind(&loop_check);
+ __ cmp(ebx, array_limit);
+ __ j(greater, &loop_header, Label::kNear);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- edi : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(edx);
+
+ // Find the address of the last argument.
+ __ mov(ecx, eax);
+ __ add(ecx, Immediate(1)); // Add one for receiver.
+ __ shl(ecx, kPointerSizeLog2);
+ __ neg(ecx);
+ __ add(ecx, ebx);
+
+ Generate_InterpreterPushArgs(masm, ecx);
+
+ // Call the target.
+ __ Push(edx); // Re-push return address.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the original constructor
+ // -- edi : the constructor
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+
+ // Save number of arguments on the stack below where arguments are going
+ // to be pushed.
+ __ mov(ecx, eax);
+ __ neg(ecx);
+ __ mov(Operand(esp, ecx, times_pointer_size, -kPointerSize), eax);
+ __ mov(eax, ecx);
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(ecx);
+
+ // Find the address of the last argument.
+ __ shl(eax, kPointerSizeLog2);
+ __ add(eax, ebx);
+
+ // Push padding for receiver.
+ __ Push(Immediate(0));
+
+ Generate_InterpreterPushArgs(masm, eax);
+
+ // Restore number of arguments from slot on stack.
+ __ mov(eax, Operand(esp, -kPointerSize));
+
+ // Re-push return address.
+ __ Push(ecx);
+
+ // Call the constructor with unmodified eax, edi, ebi values.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
-
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function.
@@ -1270,6 +1364,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
+ // -- edx : original constructor
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
@@ -1295,16 +1390,18 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
Label convert, done_convert;
__ JumpIfSmi(ebx, &convert, Label::kNear);
- __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, ecx);
__ j(below, &done_convert);
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
__ Push(edi);
+ __ Push(edx);
__ Move(eax, ebx);
__ CallStub(&stub);
__ Move(ebx, eax);
+ __ Pop(edx);
__ Pop(edi);
}
__ bind(&done_convert);
@@ -1315,9 +1412,15 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ebx : the first argument
// -- edi : constructor function
+ // -- edx : original constructor
// -----------------------------------
- Label allocate, done_allocate;
+ Label allocate, done_allocate, rt_call;
+
+ // Fall back to runtime if the original constructor and constructor differ.
+ __ cmp(edx, edi);
+ __ j(not_equal, &rt_call);
+
__ Allocate(JSValue::kSize, eax, ecx, no_reg, &allocate, TAG_OBJECT);
__ bind(&done_allocate);
@@ -1344,6 +1447,21 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Pop(ebx);
}
__ jmp(&done_allocate);
+
+ // Fallback to the runtime to create new object.
+ __ bind(&rt_call);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx);
+ __ Push(edi);
+ __ Push(edi); // constructor function
+ __ Push(edx); // original constructor
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Pop(edi);
+ __ Pop(ebx);
+ }
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+ __ Ret();
}
}
@@ -1410,74 +1528,85 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(edi);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
+ SharedFunctionInfo::kClassConstructorBitsWithinByte);
+ __ j(not_zero, &class_constructor);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
(1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_zero, &done_convert);
{
- __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
-
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- ecx : the receiver
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
- __ j(above_equal, &done_convert);
- __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, &convert_global_proxy,
- Label::kNear);
- __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
- Label::kNear);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(ecx);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+ __ j(above_equal, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy, Label::kNear);
+ __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+ Label::kNear);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(ecx);
+ }
+ __ jmp(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ Push(eax);
+ __ Push(edi);
+ __ mov(eax, ecx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ecx, eax);
+ __ Pop(edi);
+ __ Pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ jmp(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ Push(eax);
- __ Push(edi);
- __ mov(eax, ecx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(ecx, eax);
- __ Pop(edi);
- __ Pop(eax);
- __ SmiUntag(eax);
- }
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
}
__ bind(&done_convert);
@@ -1496,11 +1625,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
ParameterCount expected(ebx);
__ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), expected,
actual, JUMP_FUNCTION, NullCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ }
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
@@ -1510,7 +1646,7 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
@@ -1531,7 +1667,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
@@ -1626,41 +1764,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- ebx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- edi : the target to call (can be any Object).
-
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(edx);
-
- // Find the address of the last argument.
- __ mov(ecx, eax);
- __ add(ecx, Immediate(1)); // Add one for receiver.
- __ shl(ecx, kPointerSizeLog2);
- __ neg(ecx);
- __ add(ecx, ebx);
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ Push(Operand(ebx, 0));
- __ sub(ebx, Immediate(kPointerSize));
- __ bind(&loop_check);
- __ cmp(ebx, ecx);
- __ j(greater, &loop_header, Label::kNear);
-
- // Call the target.
- __ Push(edx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 37e1876f3d..215a194d04 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -893,7 +893,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Get the arguments map from the current native context into edi.
Label has_mapped_parameters, instantiate;
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ __ mov(edi, FieldOperand(edi, JSGlobalObject::kNativeContextOffset));
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -1097,7 +1097,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the arguments map from the current native context.
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ __ mov(edi, FieldOperand(edi, JSGlobalObject::kNativeContextOffset));
const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
__ mov(edi, Operand(edi, offset));
@@ -2029,97 +2029,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, cont);
-
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, cont);
-}
-
-
-static void EmitSlowCase(Isolate* isolate, MacroAssembler* masm, int argc) {
- __ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(edi);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(edi);
- }
- __ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
- __ jmp(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // edi : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Load the receiver from the stack.
- __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(eax, &wrap);
-
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &wrap);
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm->isolate(), masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
@@ -2212,9 +2121,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2248,36 +2155,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + kPointerSize),
Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
-
- // Load the receiver from the stack.
- __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
- __ JumpIfSmi(eax, &wrap);
-
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(isolate, masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call);
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
- __ j(equal, &slow_start);
+ __ j(equal, &call);
// Check if we have an allocation site.
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -2309,7 +2195,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// We have to update statistics for runtime profiling.
__ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
__ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
- __ jmp(&slow_start);
+ __ jmp(&call);
__ bind(&uninitialized);
@@ -2346,23 +2232,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ pop(edi);
}
- __ jmp(&have_js_function);
+ __ jmp(&call);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
- __ jmp(&have_js_function);
+ __ jmp(&call);
// Unreachable
__ int3();
@@ -2425,11 +2302,23 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// esp: stack pointer (restored after C call)
// esi: current context (C callee-saved)
// edi: JS function of the caller (C callee-saved)
+ //
+ // If argv_in_register():
+ // ecx: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles());
+ if (argv_in_register()) {
+ DCHECK(!save_doubles());
+ __ EnterApiExitFrame(3);
+
+ // Move argc and argv into the correct registers.
+ __ mov(esi, ecx);
+ __ mov(edi, eax);
+ } else {
+ __ EnterExitFrame(save_doubles());
+ }
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@@ -2474,7 +2363,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles());
+ __ LeaveExitFrame(save_doubles(), !argv_in_register());
__ ret(0);
// Handling of exception.
@@ -2868,7 +2757,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode, 1);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -3180,6 +3069,25 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
}
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in eax.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, eax);
+ __ j(greater_equal, &positive_smi, Label::kNear);
+ __ xor_(eax, eax);
+ __ bind(&positive_smi);
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ pop(ecx); // Pop return address.
+ __ push(eax); // Push argument.
+ __ push(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kToLength, 1, 1);
+}
+
+
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in eax.
Label is_number;
@@ -4561,13 +4469,14 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Register key, Register vector,
Register slot, Register feedback,
- Label* miss) {
+ bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next, next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
+ Label pop_and_miss;
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
__ push(receiver);
__ push(vector);
@@ -4599,16 +4508,18 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
__ jmp(Operand::StaticVariable(virtual_register));
// Polymorphic, we have to loop from 2 to N
-
- // TODO(mvstanton): I think there is a bug here, we are assuming the
- // array has more than one map/handler pair, but we call this function in the
- // keyed store with a string key case, where it might be just an array of two
- // elements.
-
__ bind(&start_polymorphic);
__ push(key);
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(2)));
+
+ if (!is_polymorphic) {
+ // If is_polymorphic is false, we may only have a two element array.
+ // Check against length now in that case.
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(greater_equal, &pop_and_miss);
+ }
+
__ bind(&next_loop);
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -4630,6 +4541,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
__ j(less, &next_loop);
// We exhausted our array of map handler pairs.
+ __ bind(&pop_and_miss);
__ pop(key);
__ pop(vector);
__ pop(receiver);
@@ -4648,7 +4560,7 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
// The store ic value is on the stack.
DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister()));
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
// feedback initially contains the feedback array
Label compare_smi_map;
@@ -4710,7 +4622,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&try_array);
__ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &not_array);
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true,
+ &miss);
__ bind(&not_array);
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
@@ -4755,13 +4668,16 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
Label transition_call;
Label pop_and_miss;
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
+ ExternalReference virtual_slot =
+ ExternalReference::virtual_slot_register(masm->isolate());
__ push(receiver);
__ push(vector);
Register receiver_map = receiver;
Register cached_map = vector;
+ Register value = StoreDescriptor::ValueRegister();
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
@@ -4770,11 +4686,17 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
// Polymorphic, we have to loop from 0 to N - 1
__ push(key);
- // On the stack we have:
- // key (esp)
- // vector
- // receiver
- // value
+ // Current stack layout:
+ // - esp[0] -- key
+ // - esp[4] -- vector
+ // - esp[8] -- receiver
+ // - esp[12] -- value
+ // - esp[16] -- return address
+ //
+ // Required stack layout for handler call:
+ // - esp[0] -- return address
+ // - receiver, key, value, vector, slot in registers.
+ // - handler in virtual register.
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(0)));
__ bind(&next_loop);
@@ -4793,32 +4715,39 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ pop(receiver);
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
__ mov(Operand::StaticVariable(virtual_register), feedback);
- __ pop(feedback); // Pop "value".
+ __ pop(value);
__ jmp(Operand::StaticVariable(virtual_register));
__ bind(&transition_call);
- // Oh holy hell this will be tough.
- // The map goes in vector register.
- __ mov(receiver, FieldOperand(cached_map, WeakCell::kValueOffset));
- // The weak cell may have been cleared.
- __ JumpIfSmi(receiver, &pop_and_miss);
- // slot goes on the stack, and holds return address.
- __ xchg(slot, Operand(esp, 4 * kPointerSize));
- // Get the handler in value.
+ // Current stack layout:
+ // - esp[0] -- key
+ // - esp[4] -- vector
+ // - esp[8] -- receiver
+ // - esp[12] -- value
+ // - esp[16] -- return address
+ //
+ // Required stack layout for handler call:
+ // - esp[0] -- return address
+ // - receiver, key, value, map, vector in registers.
+ // - handler and slot in virtual registers.
+ __ mov(Operand::StaticVariable(virtual_slot), slot);
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize + 2 * kPointerSize));
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), feedback);
+
+ __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ // The weak cell may have been cleared.
+ __ JumpIfSmi(cached_map, &pop_and_miss);
+ DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map);
+
// Pop key into place.
__ pop(key);
- // Put the return address on top of stack, vector goes in slot.
- __ xchg(slot, Operand(esp, 0));
- // put the map on the stack, receiver holds receiver.
- __ xchg(receiver, Operand(esp, 1 * kPointerSize));
- // put the vector on the stack, slot holds value.
- __ xchg(slot, Operand(esp, 2 * kPointerSize));
- // feedback (value) = value, slot = handler.
- __ xchg(feedback, slot);
- __ jmp(slot);
+ __ pop(vector);
+ __ pop(receiver);
+ __ pop(value);
+ __ jmp(Operand::StaticVariable(virtual_register));
__ bind(&prepare_next);
__ add(counter, Immediate(Smi::FromInt(3)));
@@ -4885,7 +4814,8 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// at least one map/handler pair.
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
+ &miss);
__ bind(&miss);
__ pop(value);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index c09b27b773..121d12fe74 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -320,13 +320,15 @@ class RecordWriteStub: public PlatformCodeStub {
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(ecx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ Register candidate = Register::from_code(i);
+ if (candidate.IsAllocatable()) {
+ if (candidate.is(ecx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
}
UNREACHABLE();
return no_reg;
@@ -385,6 +387,7 @@ class RecordWriteStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 2382388bea..03bb128dd6 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -42,6 +42,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index d804f630ea..c644ffa60f 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -8,6 +8,7 @@
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ia32/frames-ia32.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -181,7 +182,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -203,7 +204,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -233,12 +234,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int offset = code * kDoubleSize;
__ movsd(Operand(esp, offset), xmm_reg);
}
@@ -288,9 +291,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int double_regs_offset = FrameDescription::double_registers_offset();
// Fill in the double input registers.
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize;
__ movsd(xmm0, Operand(esp, src_offset));
__ movsd(Operand(ebx, dst_offset), xmm0);
}
@@ -371,9 +375,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ j(below, &outer_push_loop);
// In case of a failed STUB, we have to restore the XMM registers.
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(ebx, src_offset));
}
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 935b22d900..5a43280659 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -1231,6 +1231,8 @@ static const char* F0Mnem(byte f0byte) {
case 0xAD: return "shrd";
case 0xAC: return "shrd"; // 3-operand version.
case 0xAB: return "bts";
+ case 0xBC:
+ return "bsf";
case 0xBD: return "bsr";
default: return NULL;
}
@@ -1482,6 +1484,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (f0byte == 0xBC) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
} else if (f0byte == 0xBD) {
data += 2;
int mod, regop, rm;
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index a5ce6a5f02..609dfec7b6 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -80,6 +80,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_FRAMES_IA32_H_
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 22d85d8cc3..2077dd76e6 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -35,12 +35,10 @@ const Register VectorStoreTransitionDescriptor::SlotRegister() {
}
-const Register VectorStoreTransitionDescriptor::VectorRegister() {
- return no_reg;
-}
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return ebx; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return no_reg; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
@@ -85,14 +83,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister()};
- // The other three parameters are on the stack in ia32.
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx};
@@ -116,6 +106,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return eax; }
@@ -237,6 +231,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -400,16 +401,39 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- eax, // argument count (including receiver)
+ eax, // argument count (not including receiver)
ebx, // address of first argument
edi // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // argument count (not including receiver)
+ edx, // original constructor
+ edi, // constructor
+ ebx, // address of first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // argument count (argc)
+ ecx, // address of first argument (argv)
+ ebx // the runtime function to call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 0ad5d778ec..4a595783e2 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -24,9 +24,8 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
generating_stub_(false),
has_frame_(false) {
if (isolate() != NULL) {
- // TODO(titzer): should we just use a null handle here instead?
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -974,7 +973,7 @@ void MacroAssembler::EnterApiExitFrame(int argc) {
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Optionally restore all XMM registers.
if (save_doubles) {
const int offset = -2 * kPointerSize;
@@ -984,15 +983,20 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
}
}
- // Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kPointerSize));
- mov(ebp, Operand(ebp, 0 * kPointerSize));
+ if (pop_arguments) {
+ // Get the return address from the stack and restore the frame pointer.
+ mov(ecx, Operand(ebp, 1 * kPointerSize));
+ mov(ebp, Operand(ebp, 0 * kPointerSize));
- // Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kPointerSize));
+ // Pop the arguments and the receiver from the caller stack.
+ lea(esp, Operand(esi, 1 * kPointerSize));
- // Push the return address to get ready to return.
- push(ecx);
+ // Push the return address to get ready to return.
+ push(ecx);
+ } else {
+ // Otherwise just leave the exit frame.
+ leave();
+ }
LeaveExitFrameEpilogue(true);
}
@@ -1067,7 +1071,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
mov(scratch1, FieldOperand(scratch1, offset));
- mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
+ mov(scratch1, FieldOperand(scratch1, JSGlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -2082,7 +2086,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the JavaScript builtin function from the builtins object.
mov(target, GlobalObjectOperand());
- mov(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
+ mov(target, FieldOperand(target, JSGlobalObject::kNativeContextOffset));
mov(target, ContextOperand(target, native_context_index));
}
@@ -2125,7 +2129,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
mov(dst, GlobalObjectOperand());
- mov(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
+ mov(dst, FieldOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
@@ -2137,7 +2141,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Label* no_map_match) {
// Load the global or builtins object from the current context.
mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ mov(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
mov(scratch, Operand(scratch,
@@ -2160,8 +2164,7 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
mov(function,
Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- mov(function,
- FieldOperand(function, GlobalObject::kNativeContextOffset));
+ mov(function, FieldOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
mov(function, Operand(function, Context::SlotOffset(index)));
}
@@ -2425,6 +2428,30 @@ void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
}
+void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcnt(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsf(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Move(dst, Immediate(32)); // The result of tzcnt is 32 if src = 0.
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Popcnt(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcnt(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 508e2099ad..bff3c041a4 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -14,20 +14,20 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_eax_Code};
-const Register kReturnRegister1 = {kRegister_edx_Code};
-const Register kJSFunctionRegister = {kRegister_edi_Code};
-const Register kContextRegister = {kRegister_esi_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_eax_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_edx_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_ecx_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_edi_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_ebx_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_ebx_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_eax_Code};
+const Register kReturnRegister0 = {Register::kCode_eax};
+const Register kReturnRegister1 = {Register::kCode_edx};
+const Register kJSFunctionRegister = {Register::kCode_edi};
+const Register kContextRegister = {Register::kCode_esi};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
// Spill slots used by interpreter dispatch calling convention.
-const int kInterpreterContextSpillSlot = -1;
+const int kInterpreterDispatchTableSpillSlot = -1;
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
@@ -278,8 +278,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
- // argument in register esi.
- void LeaveExitFrame(bool save_doubles);
+ // argument in register esi (if pop_arguments == true).
+ void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
@@ -854,6 +854,12 @@ class MacroAssembler: public Assembler {
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
void Lzcnt(Register dst, const Operand& src);
+ void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
+ void Tzcnt(Register dst, const Operand& src);
+
+ void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
+ void Popcnt(Register dst, const Operand& src);
+
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
@@ -1129,6 +1135,7 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index 02a8e9c03a..af7ee3c71b 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -43,6 +43,7 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() { }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index a5beb714f8..223bde479a 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -81,7 +81,7 @@ class PropertyAccessCompiler BASE_EMBEDDED {
// Ensure that MacroAssembler has a reasonable size.
STATIC_ASSERT(sizeof(MacroAssembler) < 128 * kPointerSize);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_ACCESS_COMPILER_H_
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index e2585fe222..1b6b51538e 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -171,7 +171,7 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ ldr(result, MemOperand(cp, offset));
- __ ldr(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
__ ldr(result, MemOperand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
__ ldr(result,
@@ -356,8 +356,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index de219ae72f..89b3cc38d4 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -26,8 +26,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, global_object);
- __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(eq, global_object);
__ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, global_object);
}
@@ -432,7 +430,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ mov(slot, Operand(Smi::FromInt(slot_index)));
@@ -709,7 +707,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ mov(slot, Operand(Smi::FromInt(slot_index)));
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 10ea1d72ff..576d333428 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -79,7 +79,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
__ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ Ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
__ Ldr(result, ContextMemOperand(result, index));
// Load its initial map. The global functions all have initial maps.
__ Ldr(result,
@@ -404,8 +404,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index c4c856aab7..90b89018fe 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -22,7 +22,6 @@ namespace internal {
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
__ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
__ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
__ B(eq, global_object);
}
@@ -410,7 +409,7 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
@@ -700,7 +699,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
}
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index 01947d7fed..7963d1ce67 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -61,7 +61,7 @@ class CallOptimization BASE_EMBEDDED {
Handle<FunctionTemplateInfo> expected_receiver_type_;
Handle<CallHandlerInfo> api_call_info_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_CALL_OPTIMIZATION_H_
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 77e0fb5e43..2b6f88ac95 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -58,7 +58,7 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
cache_name = name;
JSReceiver* prototype = JSReceiver::cast(current_map->prototype());
if (!prototype->map()->is_hidden_prototype() &&
- !prototype->map()->IsGlobalObjectMap()) {
+ !prototype->map()->IsJSGlobalObjectMap()) {
break;
}
}
@@ -330,6 +330,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
PrototypeIterator iter(isolate(), last);
while (!iter.IsAtEnd()) {
lost_holder_register = true;
+ // Casting to JSObject is fine here. The LookupIterator makes sure to
+ // look behind non-masking interceptors during the original lookup, and
+ // we wouldn't try to compile a handler if there was a Proxy anywhere.
last = iter.GetCurrent<JSObject>();
iter.Advance();
}
@@ -456,17 +459,18 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
DCHECK(!transition->is_access_check_needed());
// Call to respective StoreTransitionStub.
- Register transition_map_reg = StoreTransitionHelper::MapRegister();
- bool stack_args = StoreTransitionHelper::UsesStackArgs();
- Register map_reg = stack_args ? scratch1() : transition_map_reg;
+ bool virtual_args = StoreTransitionHelper::HasVirtualSlotArg();
+ Register map_reg = StoreTransitionHelper::MapRegister();
if (details.type() == DATA_CONSTANT) {
DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
- GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
- GenerateConstantCheck(map_reg, descriptor, value(), scratch2(), &miss);
- if (stack_args) {
- // Also pushes vector and slot.
- GeneratePushMap(map_reg, scratch2());
+ Register tmp =
+ virtual_args ? VectorStoreICDescriptor::VectorRegister() : map_reg;
+ GenerateRestoreMap(transition, tmp, scratch2(), &miss);
+ GenerateConstantCheck(tmp, descriptor, value(), scratch2(), &miss);
+ if (virtual_args) {
+ // This will move the map from tmp into map_reg.
+ RearrangeVectorAndSlot(tmp, map_reg);
} else if (FLAG_vector_stores) {
PopVectorAndSlot();
}
@@ -484,10 +488,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
: StoreTransitionStub::StoreMapAndValue;
- GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
- if (stack_args) {
- // Also pushes vector and slot.
- GeneratePushMap(map_reg, scratch2());
+ Register tmp =
+ virtual_args ? VectorStoreICDescriptor::VectorRegister() : map_reg;
+ GenerateRestoreMap(transition, tmp, scratch2(), &miss);
+ if (virtual_args) {
+ RearrangeVectorAndSlot(tmp, map_reg);
} else if (FLAG_vector_stores) {
PopVectorAndSlot();
}
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index f5dafe9038..fe59210353 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -251,7 +251,10 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
virtual void FrontendFooter(Handle<Name> name, Label* miss);
void GenerateRestoreName(Label* label, Handle<Name> name);
- void GeneratePushMap(Register map_reg, Register scratch);
+
+ // Pop the vector and slot into appropriate registers, moving the map in
+ // the process. (This is an accomodation for register pressure on ia32).
+ void RearrangeVectorAndSlot(Register current_map, Register destination_map);
private:
void GenerateRestoreName(Handle<Name> name);
@@ -297,7 +300,7 @@ class ElementHandlerCompiler : public PropertyHandlerCompiler {
static void GenerateStoreSlow(MacroAssembler* masm);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_HANDLER_COMPILER_H_
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 1d019092c7..d5011fb7e9 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -116,7 +116,7 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ mov(result, Operand(esi, offset));
- __ mov(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
+ __ mov(result, FieldOperand(result, JSGlobalObject::kNativeContextOffset));
__ mov(result, Operand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
__ mov(result,
@@ -362,18 +362,16 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
- // current after GeneratePushMap
- // -------------------------------------------------
- // ret addr slot
- // vector vector
- // sp -> slot map
- // sp -> ret addr
- //
- __ xchg(map_reg, Operand(esp, 0));
- __ xchg(map_reg, Operand(esp, 2 * kPointerSize));
- __ push(map_reg);
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
+ DCHECK(destination_map.is(StoreTransitionHelper::MapRegister()));
+ DCHECK(current_map.is(StoreTransitionHelper::VectorRegister()));
+ ExternalReference virtual_slot =
+ ExternalReference::virtual_slot_register(isolate());
+ __ mov(destination_map, current_map);
+ __ pop(current_map);
+ __ mov(Operand::StaticVariable(virtual_slot), current_map);
+ __ pop(current_map); // put vector in place.
}
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 7a6a41541c..1754d5a6fc 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -25,8 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmp(type, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, global_object);
- __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object);
__ cmp(type, JS_GLOBAL_PROXY_TYPE);
__ j(equal, global_object);
}
@@ -341,7 +339,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(isolate);
int slot = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
@@ -569,7 +567,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
}
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
index 7366ebe15f..fcfae4bc0c 100644
--- a/deps/v8/src/ic/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -24,7 +24,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
bool is_vector_store =
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index ee6597d59d..ff32404afa 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -125,7 +125,7 @@ class PropertyICCompiler : public PropertyAccessCompiler {
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_IC_COMPILER_H_
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 646b73d641..65a5a2ddec 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -127,19 +127,6 @@ Code* IC::raw_target() const {
void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); }
-JSFunction* IC::GetRootConstructor(Map* receiver_map, Context* native_context) {
- DisallowHeapAllocation no_alloc;
- if (receiver_map->IsPrimitiveMap()) {
- int constructor_function_index =
- receiver_map->GetConstructorFunctionIndex();
- if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
- return JSFunction::cast(native_context->get(constructor_function_index));
- }
- }
- return nullptr;
-}
-
-
Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
bool receiver_is_holder, Isolate* isolate,
CacheHolderFlag* flag) {
@@ -147,9 +134,9 @@ Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
*flag = kCacheOnReceiver;
return receiver_map;
}
- Context* native_context = *isolate->native_context();
- JSFunction* builtin_ctor = GetRootConstructor(*receiver_map, native_context);
- if (builtin_ctor != NULL) {
+ Handle<JSFunction> builtin_ctor;
+ if (Map::GetConstructorFunction(receiver_map, isolate->native_context())
+ .ToHandle(&builtin_ctor)) {
*flag = kCacheOnPrototypeReceiverIsPrimitive;
return handle(HeapObject::cast(builtin_ctor->instance_prototype())->map());
}
@@ -163,9 +150,9 @@ Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
Handle<Map> IC::GetICCacheHolder(Handle<Map> map, Isolate* isolate,
CacheHolderFlag* flag) {
- Context* native_context = *isolate->native_context();
- JSFunction* builtin_ctor = GetRootConstructor(*map, native_context);
- if (builtin_ctor != NULL) {
+ Handle<JSFunction> builtin_ctor;
+ if (Map::GetConstructorFunction(map, isolate->native_context())
+ .ToHandle(&builtin_ctor)) {
*flag = kCacheOnPrototype;
return handle(builtin_ctor->initial_map());
}
@@ -193,7 +180,7 @@ bool IC::AddressIsDeoptimizedCode(Isolate* isolate, Address address) {
return (host->kind() == Code::OPTIMIZED_FUNCTION &&
host->marked_for_deoptimization());
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_INL_H_
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index bc03d7d487..298eaa2707 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -16,22 +16,8 @@ void ICUtility::Clear(Isolate* isolate, Address address,
}
-CallICState::CallICState(ExtraICState extra_ic_state)
- : argc_(ArgcBits::decode(extra_ic_state)),
- call_type_(CallTypeBits::decode(extra_ic_state)) {}
-
-
-ExtraICState CallICState::GetExtraICState() const {
- ExtraICState extra_ic_state =
- ArgcBits::encode(argc_) | CallTypeBits::encode(call_type_);
- return extra_ic_state;
-}
-
-
std::ostream& operator<<(std::ostream& os, const CallICState& s) {
- return os << "(args(" << s.arg_count() << "), "
- << (s.call_type() == CallICState::METHOD ? "METHOD" : "FUNCTION")
- << ", ";
+ return os << "(args(" << s.argc() << "), " << s.convert_mode() << ", ";
}
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index b529b8c54d..ebc686b738 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -23,30 +23,29 @@ class ICUtility : public AllStatic {
class CallICState final BASE_EMBEDDED {
public:
- explicit CallICState(ExtraICState extra_ic_state);
+ explicit CallICState(ExtraICState extra_ic_state)
+ : bit_field_(extra_ic_state) {}
+ CallICState(int argc, ConvertReceiverMode convert_mode)
+ : bit_field_(ArgcBits::encode(argc) |
+ ConvertModeBits::encode(convert_mode)) {}
- enum CallType { METHOD, FUNCTION };
-
- CallICState(int argc, CallType call_type)
- : argc_(argc), call_type_(call_type) {}
-
- ExtraICState GetExtraICState() const;
+ ExtraICState GetExtraICState() const { return bit_field_; }
static void GenerateAheadOfTime(Isolate*,
void (*Generate)(Isolate*,
const CallICState&));
- int arg_count() const { return argc_; }
- CallType call_type() const { return call_type_; }
-
- bool CallAsMethod() const { return call_type_ == METHOD; }
+ int argc() const { return ArgcBits::decode(bit_field_); }
+ ConvertReceiverMode convert_mode() const {
+ return ConvertModeBits::decode(bit_field_);
+ }
private:
- class ArgcBits : public BitField<int, 0, Code::kArgumentsBits> {};
- class CallTypeBits : public BitField<CallType, Code::kArgumentsBits, 1> {};
+ typedef BitField<int, 0, Code::kArgumentsBits> ArgcBits;
+ typedef BitField<ConvertReceiverMode, Code::kArgumentsBits, 2>
+ ConvertModeBits;
- const int argc_;
- const CallType call_type_;
+ int const bit_field_;
};
@@ -268,7 +267,8 @@ class StoreICState final BASE_EMBEDDED {
private:
const ExtraICState state_;
};
-}
-}
+
+} // namespace internal
+} // namespace v8
#endif // V8_IC_STATE_H_
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index b2bcaeeb0f..3dc3029300 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -47,9 +47,6 @@ char IC::TransitionMarkFromState(IC::State state) {
// these cases fall through to the unreachable code below.
case DEBUG_STUB:
break;
- // Type-vector-based ICs resolve state to one of the above.
- case DEFAULT:
- break;
}
UNREACHABLE();
return 0;
@@ -200,6 +197,11 @@ SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
// corresponding to the frame.
StackFrameIterator it(isolate());
while (it.frame()->fp() != this->fp()) it.Advance();
+ if (FLAG_ignition && it.frame()->type() == StackFrame::STUB) {
+ // Advance over bytecode handler frame.
+ // TODO(rmcilroy): Remove this once bytecode handlers don't need a frame.
+ it.Advance();
+ }
JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
// Find the function on the stack and both the active code for the
// function and the original code.
@@ -293,8 +295,8 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
}
}
- if (receiver->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+ if (receiver->IsJSGlobalObject()) {
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(receiver);
LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
if (it.state() == LookupIterator::ACCESS_CHECK) return false;
if (!it.IsFound()) return false;
@@ -332,14 +334,6 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
MarkPrototypeFailure(name);
return;
}
-
- // The builtins object is special. It only changes when JavaScript
- // builtins are loaded lazily. It is important to keep inline
- // caches for the builtins object monomorphic. Therefore, if we get
- // an inline cache miss for the builtins object after lazily loading
- // JavaScript builtins, we return uninitialized as the state to
- // force the inline cache back to monomorphic state.
- if (receiver->IsJSBuiltinsObject()) state_ = PREMONOMORPHIC;
}
@@ -388,7 +382,6 @@ static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
break;
case PROTOTYPE_FAILURE:
case DEBUG_STUB:
- case DEFAULT:
UNREACHABLE();
}
}
@@ -707,10 +700,10 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
- if (object->IsGlobalObject() && name->IsString()) {
+ if (object->IsJSGlobalObject() && name->IsString()) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
@@ -918,7 +911,6 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
break;
case DEBUG_STUB:
break;
- case DEFAULT:
case GENERIC:
UNREACHABLE();
break;
@@ -1241,7 +1233,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
// When debugging we need to go the slow path to flood the accessor.
if (GetSharedFunctionInfo()->HasDebugInfo()) break;
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- if (!receiver->IsJSObject() && !function->IsBuiltin() &&
+ if (!receiver->IsJSObject() && !function->shared()->IsBuiltin() &&
is_sloppy(function->shared()->language_mode())) {
// Calling sloppy non-builtins with a value as the receiver
// requires boxing.
@@ -1269,7 +1261,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
case LookupIterator::DATA: {
if (lookup->is_dictionary_holder()) {
if (kind() != Code::LOAD_IC) break;
- if (holder->IsGlobalObject()) {
+ if (holder->IsJSGlobalObject()) {
NamedLoadHandlerCompiler compiler(isolate(), map, holder,
cache_holder);
Handle<PropertyCell> cell = lookup->GetPropertyCell();
@@ -1507,6 +1499,8 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
PrototypeIterator::GetCurrent(iter));
}
+ if (it->HolderIsReceiverOrHiddenPrototype()) return false;
+
it->PrepareTransitionToDataProperty(value, NONE, store_mode);
return it->IsCacheableTransition();
}
@@ -1542,10 +1536,10 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
return result;
}
- if (object->IsGlobalObject() && name->IsString()) {
+ if (object->IsJSGlobalObject() && name->IsString()) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
@@ -1607,26 +1601,23 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
LookupIterator it(object, name);
if (FLAG_use_ic) UpdateCaches(&it, value, store_mode);
- // Set the property.
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::SetProperty(&it, value, language_mode(), store_mode), Object);
- return result;
+ MAYBE_RETURN_NULL(
+ Object::SetProperty(&it, value, language_mode(), store_mode));
+ return value;
}
Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
- CallICState::CallType call_type) {
- CallICTrampolineStub stub(isolate, CallICState(argc, call_type));
+ ConvertReceiverMode mode) {
+ CallICTrampolineStub stub(isolate, CallICState(argc, mode));
Handle<Code> code = stub.GetCode();
return code;
}
Handle<Code> CallIC::initialize_stub_in_optimized_code(
- Isolate* isolate, int argc, CallICState::CallType call_type) {
- CallICStub stub(isolate, CallICState(argc, call_type));
+ Isolate* isolate, int argc, ConvertReceiverMode mode) {
+ CallICStub stub(isolate, CallICState(argc, mode));
Handle<Code> code = stub.GetCode();
return code;
}
@@ -1730,7 +1721,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
static Handle<Code> PropertyCellStoreHandler(
- Isolate* isolate, Handle<JSObject> receiver, Handle<GlobalObject> holder,
+ Isolate* isolate, Handle<JSObject> receiver, Handle<JSGlobalObject> holder,
Handle<Name> name, Handle<PropertyCell> cell, PropertyCellType type) {
auto constant_type = Nothing<PropertyCellConstantType>();
if (type == PropertyCellType::kConstantType) {
@@ -1759,12 +1750,12 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
switch (lookup->state()) {
case LookupIterator::TRANSITION: {
auto store_target = lookup->GetStoreTarget();
- if (store_target->IsGlobalObject()) {
+ if (store_target->IsJSGlobalObject()) {
// TODO(dcarney): this currently just deopts. Use the transition cell.
auto cell = isolate()->factory()->NewPropertyCell();
cell->set_value(*value);
auto code = PropertyCellStoreHandler(
- isolate(), store_target, Handle<GlobalObject>::cast(store_target),
+ isolate(), store_target, Handle<JSGlobalObject>::cast(store_target),
lookup->name(), cell, PropertyCellType::kConstant);
cell->set_value(isolate()->heap()->the_hole_value());
return code;
@@ -1842,14 +1833,14 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
case LookupIterator::DATA: {
if (lookup->is_dictionary_holder()) {
- if (holder->IsGlobalObject()) {
+ if (holder->IsJSGlobalObject()) {
DCHECK(holder.is_identical_to(receiver) ||
receiver->map()->prototype() == *holder);
auto cell = lookup->GetPropertyCell();
auto updated_type = PropertyCell::UpdatedType(
cell, value, lookup->property_details());
auto code = PropertyCellStoreHandler(
- isolate(), receiver, Handle<GlobalObject>::cast(holder),
+ isolate(), receiver, Handle<JSGlobalObject>::cast(holder),
lookup->name(), cell, updated_type);
return code;
}
@@ -2276,7 +2267,8 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// from peeking in the code bits of the handlers.
if (!FLAG_vector_stores) ValidateStoreMode(stub);
} else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary prototype");
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ "dictionary or proxy prototype");
}
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-smi-like key");
@@ -2367,7 +2359,7 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
Handle<Object> function = args.at<Object>(0);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
Handle<Smi> slot = args.at<Smi>(2);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
CallICNexus nexus(vector, vector_slot);
CallIC ic(isolate, &nexus);
ic.HandleMiss(function);
@@ -2386,7 +2378,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
@@ -2418,7 +2410,7 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2437,7 +2429,7 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2460,7 +2452,7 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
DCHECK(args.length() == 5 || args.length() == 6);
Handle<Smi> slot = args.at<Smi>(3);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
StoreICNexus nexus(vector, vector_slot);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2496,10 +2488,30 @@ RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
Handle<Object> result;
if (FLAG_vector_stores) {
- DCHECK(args.length() == 5 || args.length() == 6);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ int length = args.length();
+ DCHECK(length == 5 || length == 6);
+ // We might have slot and vector, for a normal miss (slot(3), vector(4)).
+ // Or, map and vector for a transitioning store miss (map(3), vector(4)).
+ // In this case, we need to recover the slot from a virtual register.
+ // If length == 6, then a map is included (map(3), slot(4), vector(5)).
+ Handle<Smi> slot;
+ Handle<TypeFeedbackVector> vector;
+ if (length == 5) {
+ if (args.at<Object>(3)->IsMap()) {
+ vector = args.at<TypeFeedbackVector>(4);
+ slot = handle(
+ *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()),
+ isolate);
+ } else {
+ vector = args.at<TypeFeedbackVector>(4);
+ slot = args.at<Smi>(3);
+ }
+ } else {
+ vector = args.at<TypeFeedbackVector>(5);
+ slot = args.at<Smi>(4);
+ }
+
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
StoreICNexus nexus(vector, vector_slot);
StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
@@ -2539,7 +2551,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
DCHECK(args.length() == 5);
Handle<Smi> slot = args.at<Smi>(3);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2568,7 +2580,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
DCHECK(args.length() == 5);
Handle<Smi> slot = args.at<Smi>(3);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2634,11 +2646,14 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == (FLAG_vector_stores ? 6 : 4));
+ // Without vector stores, length == 4.
+ // With vector stores, length == 5 or 6, depending on whether the vector slot
+ // is passed in a virtual register or not.
+ DCHECK(!FLAG_vector_stores || args.length() == 5 || args.length() == 6);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- Handle<Map> map = args.at<Map>(FLAG_vector_stores ? 5 : 3);
+ Handle<Map> map = args.at<Map>(3);
LanguageMode language_mode;
if (FLAG_vector_stores) {
KeyedStoreICNexus nexus(isolate);
@@ -2910,9 +2925,9 @@ void CompareNilIC::Clear(Address address, Code* target, Address constant_pool) {
Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate, NilValue nil,
Handle<Object> object) {
if (object->IsNull() || object->IsUndefined()) {
- return handle(Smi::FromInt(true), isolate);
+ return isolate->factory()->true_value();
}
- return handle(Smi::FromInt(object->IsUndetectableObject()), isolate);
+ return isolate->factory()->ToBoolean(object->IsUndetectableObject());
}
@@ -3127,7 +3142,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index d65d7a8c1b..47883b46af 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -57,8 +57,6 @@ class IC {
bool IsCallStub() const { return target()->is_call_stub(); }
#endif
- static inline JSFunction* GetRootConstructor(Map* receiver_map,
- Context* native_context);
static inline Handle<Map> GetHandlerCacheHolder(Handle<Map> receiver_map,
bool receiver_is_holder,
Isolate* isolate,
@@ -210,7 +208,7 @@ class IC {
inline void UpdateTarget();
Handle<TypeFeedbackVector> vector() const { return nexus()->vector_handle(); }
- FeedbackVectorICSlot slot() const { return nexus()->slot(); }
+ FeedbackVectorSlot slot() const { return nexus()->slot(); }
State saved_state() const {
return state() == PROTOTYPE_FAILURE ? old_state_ : state();
}
@@ -289,9 +287,9 @@ class CallIC : public IC {
// Code generator routines.
static Handle<Code> initialize_stub(Isolate* isolate, int argc,
- CallICState::CallType call_type);
+ ConvertReceiverMode mode);
static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, int argc, CallICState::CallType call_type);
+ Isolate* isolate, int argc, ConvertReceiverMode mode);
static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
};
@@ -319,7 +317,7 @@ class LoadIC : public IC {
}
bool ShouldThrowReferenceError(Handle<Object> receiver) {
- return receiver->IsGlobalObject() && typeof_mode() == NOT_INSIDE_TYPEOF;
+ return receiver->IsJSGlobalObject() && typeof_mode() == NOT_INSIDE_TYPEOF;
}
// Code generator routines.
@@ -362,9 +360,8 @@ class LoadIC : public IC {
// lookup result.
void UpdateCaches(LookupIterator* lookup);
- virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> unused,
- CacheHolderFlag cache_holder) override;
+ Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
+ CacheHolderFlag cache_holder) override;
private:
Handle<Code> SimpleFieldLoad(FieldIndex index);
@@ -498,9 +495,8 @@ class StoreIC : public IC {
// lookup result.
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode);
- virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder) override;
+ Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> value,
+ CacheHolderFlag cache_holder) override;
private:
inline void set_target(Code* code);
@@ -685,7 +681,7 @@ class ToBooleanIC : public IC {
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_H_
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 8c135e4088..200d1f6ebe 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -167,7 +167,7 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ lw(result, MemOperand(cp, offset));
- __ lw(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ lw(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
__ lw(result, MemOperand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
__ lw(result,
@@ -346,8 +346,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index a1a118135b..60c06a3eb4 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -25,7 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// Register usage:
// type: holds the receiver instance type on entry.
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
}
@@ -436,7 +435,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
@@ -685,7 +684,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
}
@@ -880,8 +879,6 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address patch_address =
andi_instruction_address - delta * Instruction::kInstrSize;
Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
// This is patching a conditional "jump if not smi/jump if smi" site.
// Enabling by changing from
// andi at, rx, 0
@@ -901,13 +898,44 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
patcher.masm()->andi(at, reg, 0);
}
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
DCHECK(Assembler::IsBranch(branch_instr));
- if (Assembler::IsBeq(branch_instr)) {
- patcher.ChangeBranchCondition(ne);
- } else {
- DCHECK(Assembler::IsBne(branch_instr));
- patcher.ChangeBranchCondition(eq);
+
+ uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
+ // Currently only the 'eq' and 'ne' cond values are supported and the simple
+ // branch instructions and their r6 variants (with opcode being the branch
+ // type). There are some special cases (see Assembler::IsBranch()) so
+ // extending this would be tricky.
+ DCHECK(opcode == BEQ || // BEQ
+ opcode == BNE || // BNE
+ opcode == POP10 || // BEQC
+ opcode == POP30 || // BNEC
+ opcode == POP66 || // BEQZC
+ opcode == POP76); // BNEZC
+ switch (opcode) {
+ case BEQ:
+ opcode = BNE; // change BEQ to BNE.
+ break;
+ case POP10:
+ opcode = POP30; // change BEQC to BNEC.
+ break;
+ case POP66:
+ opcode = POP76; // change BEQZC to BNEZC.
+ break;
+ case BNE:
+ opcode = BEQ; // change BNE to BEQ.
+ break;
+ case POP30:
+ opcode = POP10; // change BNEC to BEQC.
+ break;
+ case POP76:
+ opcode = POP66; // change BNEZC to BEQZC.
+ break;
+ default:
+ UNIMPLEMENTED();
}
+ patcher.ChangeBranchCondition(branch_instr, opcode);
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 9c3a5b3e70..942c42c221 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -168,7 +168,7 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
// Check we're still in the same context.
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ ld(result, MemOperand(cp, offset));
- __ ld(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ ld(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
__ ld(result, MemOperand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
__ ld(result,
@@ -347,8 +347,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 0d7af56071..e73921a317 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -25,7 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// Register usage:
// type: holds the receiver instance type on entry.
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
}
@@ -433,7 +432,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
@@ -684,7 +683,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 52efcf91a4..0335362fbb 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -169,7 +169,8 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ LoadP(result, MemOperand(cp, offset));
- __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ LoadP(result,
+ FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
__ LoadP(result, MemOperand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
__ LoadP(result,
@@ -355,8 +356,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 09117179ea..ea8239a3e2 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -26,8 +26,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ beq(global_object);
- __ cmpi(type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ beq(global_object);
__ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE));
__ beq(global_object);
}
@@ -442,7 +440,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
@@ -715,7 +713,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index cb1b62848e..4b27e6e396 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -165,7 +165,7 @@ class StubCache {
DISALLOW_COPY_AND_ASSIGN(StubCache);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STUB_CACHE_H_
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 1490c921fc..6bc3aafa89 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -80,7 +80,7 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ movp(result, Operand(rsi, offset));
- __ movp(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
+ __ movp(result, FieldOperand(result, JSGlobalObject::kNativeContextOffset));
__ movp(result, Operand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
__ movp(result,
@@ -364,8 +364,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index ff74a965e4..3fc8747c66 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -25,8 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
__ j(equal, global_object);
- __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
- __ j(equal, global_object);
__ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
__ j(equal, global_object);
}
@@ -346,7 +344,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ Move(vector, dummy_vector);
__ Move(slot, Smi::FromInt(slot_index));
@@ -574,7 +572,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ Move(vector, dummy_vector);
__ Move(slot, Smi::FromInt(slot_index));
}
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index d9f7e8012d..bb3b25a47f 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -116,7 +116,7 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ mov(result, Operand(esi, offset));
- __ mov(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
+ __ mov(result, FieldOperand(result, JSGlobalObject::kNativeContextOffset));
__ mov(result, Operand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
__ mov(result,
@@ -362,18 +362,16 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
- // current after GeneratePushMap
- // -------------------------------------------------
- // ret addr slot
- // vector vector
- // sp -> slot map
- // sp -> ret addr
- //
- __ xchg(map_reg, Operand(esp, 0));
- __ xchg(map_reg, Operand(esp, 2 * kPointerSize));
- __ push(map_reg);
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
+ DCHECK(destination_map.is(StoreTransitionHelper::MapRegister()));
+ DCHECK(current_map.is(StoreTransitionHelper::VectorRegister()));
+ ExternalReference virtual_slot =
+ ExternalReference::virtual_slot_register(isolate());
+ __ mov(destination_map, current_map);
+ __ pop(current_map);
+ __ mov(Operand::StaticVariable(virtual_slot), current_map);
+ __ pop(current_map); // put vector in place.
}
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index 53e7a5ca0c..6ef5b635c7 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -25,8 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmp(type, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, global_object);
- __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object);
__ cmp(type, JS_GLOBAL_PROXY_TYPE);
__ j(equal, global_object);
}
@@ -341,7 +339,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(isolate);
int slot = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
@@ -569,7 +567,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
}
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
index 2522223ead..dfc0ef6c66 100644
--- a/deps/v8/src/ic/x87/stub-cache-x87.cc
+++ b/deps/v8/src/ic/x87/stub-cache-x87.cc
@@ -24,7 +24,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
bool is_vector_store =
diff --git a/deps/v8/src/icu_util.h b/deps/v8/src/icu_util.h
index cd98ff0dfc..c308decfe5 100644
--- a/deps/v8/src/icu_util.h
+++ b/deps/v8/src/icu_util.h
@@ -14,6 +14,7 @@ namespace internal {
// function should be called before ICU is used.
bool InitializeICU(const char* icu_data_file);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ICU_UTIL_H_
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index 1d23af95e8..723cdfa2a6 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -5,6 +5,7 @@
#include "src/identity-map.h"
#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -14,31 +15,26 @@ static const int kInitialIdentityMapSize = 4;
static const int kResizeFactor = 4;
IdentityMapBase::~IdentityMapBase() {
- if (keys_) {
- Heap::OptionalRelocationLock lock(heap_, concurrent_);
- heap_->UnregisterStrongRoots(keys_);
- }
+ if (keys_) heap_->UnregisterStrongRoots(keys_);
}
-IdentityMapBase::RawEntry IdentityMapBase::Lookup(Handle<Object> key) {
- AllowHandleDereference for_lookup;
- int index = LookupIndex(*key);
+IdentityMapBase::RawEntry IdentityMapBase::Lookup(Object* key) {
+ int index = LookupIndex(key);
return index >= 0 ? &values_[index] : nullptr;
}
-IdentityMapBase::RawEntry IdentityMapBase::Insert(Handle<Object> key) {
- AllowHandleDereference for_lookup;
- int index = InsertIndex(*key);
+IdentityMapBase::RawEntry IdentityMapBase::Insert(Object* key) {
+ int index = InsertIndex(key);
DCHECK_GE(index, 0);
return &values_[index];
}
int IdentityMapBase::Hash(Object* address) {
+ CHECK_NE(address, heap_->not_mapped_symbol());
uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
- CHECK_NE(0U, raw_address); // Cannot store Smi 0 as a key here, sorry.
// Xor some of the upper bits, since the lower 2 or 3 are usually aligned.
return static_cast<int>((raw_address >> 11) ^ raw_address);
}
@@ -46,26 +42,28 @@ int IdentityMapBase::Hash(Object* address) {
int IdentityMapBase::LookupIndex(Object* address) {
int start = Hash(address) & mask_;
+ Object* not_mapped = heap_->not_mapped_symbol();
for (int index = start; index < size_; index++) {
if (keys_[index] == address) return index; // Found.
- if (keys_[index] == nullptr) return -1; // Not found.
+ if (keys_[index] == not_mapped) return -1; // Not found.
}
for (int index = 0; index < start; index++) {
if (keys_[index] == address) return index; // Found.
- if (keys_[index] == nullptr) return -1; // Not found.
+ if (keys_[index] == not_mapped) return -1; // Not found.
}
return -1;
}
int IdentityMapBase::InsertIndex(Object* address) {
+ Object* not_mapped = heap_->not_mapped_symbol();
while (true) {
int start = Hash(address) & mask_;
int limit = size_ / 2;
// Search up to {limit} entries.
for (int index = start; --limit > 0; index = (index + 1) & mask_) {
if (keys_[index] == address) return index; // Found.
- if (keys_[index] == nullptr) { // Free entry.
+ if (keys_[index] == not_mapped) { // Free entry.
keys_[index] = address;
return index;
}
@@ -81,8 +79,7 @@ int IdentityMapBase::InsertIndex(Object* address) {
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => a pointer to a new storage location for the value
-IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Handle<Object> key) {
- Heap::OptionalRelocationLock lock(heap_, concurrent_);
+IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Object* key) {
RawEntry result;
if (size_ == 0) {
// Allocate the initial storage for keys and values.
@@ -91,7 +88,8 @@ IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Handle<Object> key) {
gc_counter_ = heap_->gc_count();
keys_ = zone_->NewArray<Object*>(size_);
- memset(keys_, 0, sizeof(Object*) * size_);
+ Object* not_mapped = heap_->not_mapped_symbol();
+ for (int i = 0; i < size_; i++) keys_[i] = not_mapped;
values_ = zone_->NewArray<void*>(size_);
memset(values_, 0, sizeof(void*) * size_);
@@ -114,10 +112,9 @@ IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Handle<Object> key) {
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => {nullptr}
-IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Handle<Object> key) {
+IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Object* key) {
if (size_ == 0) return nullptr;
- Heap::OptionalRelocationLock lock(heap_, concurrent_);
RawEntry result = Lookup(key);
if (result == nullptr && gc_counter_ != heap_->gc_count()) {
Rehash(); // Rehash is expensive, so only do it in case of a miss.
@@ -135,15 +132,16 @@ void IdentityMapBase::Rehash() {
// Search the table looking for keys that wouldn't be found with their
// current hashcode and evacuate them.
int last_empty = -1;
+ Object* not_mapped = heap_->not_mapped_symbol();
for (int i = 0; i < size_; i++) {
- if (keys_[i] == nullptr) {
+ if (keys_[i] == not_mapped) {
last_empty = i;
} else {
int pos = Hash(keys_[i]) & mask_;
if (pos <= last_empty || pos > i) {
// Evacuate an entry that is in the wrong place.
reinsert.push_back(std::pair<Object*, void*>(keys_[i], values_[i]));
- keys_[i] = nullptr;
+ keys_[i] = not_mapped;
values_[i] = nullptr;
last_empty = i;
}
@@ -153,7 +151,7 @@ void IdentityMapBase::Rehash() {
for (auto pair : reinsert) {
int index = InsertIndex(pair.first);
DCHECK_GE(index, 0);
- DCHECK_NULL(values_[index]);
+ DCHECK_NE(heap_->not_mapped_symbol(), values_[index]);
values_[index] = pair.second;
}
}
@@ -172,12 +170,13 @@ void IdentityMapBase::Resize() {
CHECK_LE(size_, (1024 * 1024 * 16)); // that would be extreme...
keys_ = zone_->NewArray<Object*>(size_);
- memset(keys_, 0, sizeof(Object*) * size_);
+ Object* not_mapped = heap_->not_mapped_symbol();
+ for (int i = 0; i < size_; i++) keys_[i] = not_mapped;
values_ = zone_->NewArray<void*>(size_);
memset(values_, 0, sizeof(void*) * size_);
for (int i = 0; i < old_size; i++) {
- if (old_keys[i] == nullptr) continue;
+ if (old_keys[i] == not_mapped) continue;
int index = InsertIndex(old_keys[i]);
DCHECK_GE(index, 0);
values_[index] = old_values[i];
diff --git a/deps/v8/src/identity-map.h b/deps/v8/src/identity-map.h
index 2143e24e37..2c4a0f3399 100644
--- a/deps/v8/src/identity-map.h
+++ b/deps/v8/src/identity-map.h
@@ -17,11 +17,6 @@ class Zone;
// Base class of identity maps contains shared code for all template
// instantions.
class IdentityMapBase {
- public:
- // Enable or disable concurrent mode for this map. Concurrent mode implies
- // taking the heap's relocation lock during most operations.
- void SetConcurrent(bool concurrent) { concurrent_ = concurrent; }
-
protected:
// Allow Tester to access internals, including changing the address of objects
// within the {keys_} array in order to simulate a moving GC.
@@ -32,7 +27,6 @@ class IdentityMapBase {
IdentityMapBase(Heap* heap, Zone* zone)
: heap_(heap),
zone_(zone),
- concurrent_(false),
gc_counter_(-1),
size_(0),
mask_(0),
@@ -40,8 +34,8 @@ class IdentityMapBase {
values_(nullptr) {}
~IdentityMapBase();
- RawEntry GetEntry(Handle<Object> key);
- RawEntry FindEntry(Handle<Object> key);
+ RawEntry GetEntry(Object* key);
+ RawEntry FindEntry(Object* key);
private:
// Internal implementation should not be called directly by subclasses.
@@ -49,13 +43,12 @@ class IdentityMapBase {
int InsertIndex(Object* address);
void Rehash();
void Resize();
- RawEntry Lookup(Handle<Object> key);
- RawEntry Insert(Handle<Object> key);
+ RawEntry Lookup(Object* key);
+ RawEntry Insert(Object* key);
int Hash(Object* address);
Heap* heap_;
Zone* zone_;
- bool concurrent_;
int gc_counter_;
int size_;
int mask_;
@@ -79,20 +72,21 @@ class IdentityMap : public IdentityMapBase {
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => a pointer to a new storage location for the value
- V* Get(Handle<Object> key) { return reinterpret_cast<V*>(GetEntry(key)); }
+ V* Get(Handle<Object> key) { return Get(*key); }
+ V* Get(Object* key) { return reinterpret_cast<V*>(GetEntry(key)); }
// Searches this map for the given key using the object's address
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => {nullptr}
- V* Find(Handle<Object> key) { return reinterpret_cast<V*>(FindEntry(key)); }
+ V* Find(Handle<Object> key) { return Find(*key); }
+ V* Find(Object* key) { return reinterpret_cast<V*>(FindEntry(key)); }
// Set the value for the given key.
- void Set(Handle<Object> key, V value) {
- *(reinterpret_cast<V*>(GetEntry(key))) = value;
- }
+ void Set(Handle<Object> key, V v) { Set(*key, v); }
+ void Set(Object* key, V v) { *(reinterpret_cast<V*>(GetEntry(key))) = v; }
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IDENTITY_MAP_H_
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index b71f973120..297722c255 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -14,8 +14,8 @@ Type* SmiType(Zone* zone) {
}
-Type* UntaggedSigned32(Zone* zone) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone);
+Type* UntaggedIntegral32(Zone* zone) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone);
}
@@ -29,7 +29,7 @@ Type* AnyTagged(Zone* zone) {
Type* ExternalPointer(Zone* zone) {
return Type::Intersect(Type::Internal(), Type::UntaggedPointer(), zone);
}
-}
+} // namespace
Type::FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
@@ -75,6 +75,12 @@ const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
}
+void AllocateMutableHeapNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
+}
+
+
Type::FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
@@ -109,6 +115,21 @@ void StoreTransitionDescriptor::InitializePlatformSpecific(
}
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ if (SlotRegister().is(no_reg)) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister(), VectorRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+ } else {
+ Register registers[] = {ReceiverRegister(), NameRegister(),
+ ValueRegister(), MapRegister(),
+ SlotRegister(), VectorRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+ }
+}
+
+
Type::FunctionType*
StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
@@ -129,7 +150,7 @@ LoadGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 1, zone);
- function->InitParameter(0, UntaggedSigned32(zone));
+ function->InitParameter(0, UntaggedIntegral32(zone));
return function;
}
@@ -147,7 +168,7 @@ StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
- function->InitParameter(0, UntaggedSigned32(zone));
+ function->InitParameter(0, UntaggedIntegral32(zone));
function->InitParameter(1, AnyTagged(zone));
return function;
}
@@ -174,6 +195,13 @@ void StringCompareDescriptor::InitializePlatformSpecific(
}
+void ToLengthDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister()};
@@ -228,14 +256,19 @@ Type::FunctionType*
VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 6, zone);
- function->InitParameter(0, AnyTagged(zone)); // receiver
- function->InitParameter(1, AnyTagged(zone)); // name
- function->InitParameter(2, AnyTagged(zone)); // value
- function->InitParameter(3, SmiType(zone)); // slot
- function->InitParameter(4, AnyTagged(zone)); // vector
- function->InitParameter(5, AnyTagged(zone)); // map
+ bool has_slot = !VectorStoreTransitionDescriptor::SlotRegister().is(no_reg);
+ int arg_count = has_slot ? 6 : 5;
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(zone), Type::Undefined(), arg_count, zone);
+ int index = 0;
+ function->InitParameter(index++, AnyTagged(zone)); // receiver
+ function->InitParameter(index++, AnyTagged(zone)); // name
+ function->InitParameter(index++, AnyTagged(zone)); // value
+ function->InitParameter(index++, AnyTagged(zone)); // map
+ if (has_slot) {
+ function->InitParameter(index++, SmiType(zone)); // slot
+ }
+ function->InitParameter(index++, AnyTagged(zone)); // vector
return function;
}
@@ -389,7 +422,7 @@ CallTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
function->InitParameter(0, AnyTagged(zone)); // target
function->InitParameter(
- 1, UntaggedSigned32(zone)); // actual number of arguments
+ 1, UntaggedIntegral32(zone)); // actual number of arguments
return function;
}
@@ -427,7 +460,7 @@ ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
function->InitParameter(1, AnyTagged(zone));
- function->InitParameter(2, UntaggedSigned32(zone));
+ function->InitParameter(2, UntaggedIntegral32(zone));
return function;
}
@@ -439,7 +472,7 @@ InternalArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, UntaggedSigned32(zone));
+ function->InitParameter(1, UntaggedIntegral32(zone));
return function;
}
@@ -452,10 +485,10 @@ ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
function->InitParameter(
- 1, UntaggedSigned32(zone)); // actual number of arguments
+ 1, UntaggedIntegral32(zone)); // actual number of arguments
function->InitParameter(
2,
- UntaggedSigned32(zone)); // expected number of arguments
+ UntaggedIntegral32(zone)); // expected number of arguments
return function;
}
@@ -471,7 +504,7 @@ ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
function->InitParameter(2, AnyTagged(zone)); // holder
function->InitParameter(3, ExternalPointer(zone)); // api_function_address
function->InitParameter(
- 4, UntaggedSigned32(zone)); // actual number of arguments
+ 4, UntaggedIntegral32(zone)); // actual number of arguments
return function;
}
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 534313f7d3..2c5ac4b052 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -25,6 +25,7 @@ class PlatformInterfaceDescriptor;
V(FastNewClosure) \
V(FastNewContext) \
V(ToNumber) \
+ V(ToLength) \
V(ToString) \
V(ToObject) \
V(NumberToString) \
@@ -38,10 +39,11 @@ class PlatformInterfaceDescriptor;
V(CallFunctionWithFeedbackAndVector) \
V(CallConstruct) \
V(CallTrampoline) \
- V(PushArgsAndCall) \
V(RegExpConstructResult) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
+ V(AllocateMutableHeapNumber) \
+ V(AllocateInNewSpace) \
V(ArrayConstructorConstantArgCount) \
V(ArrayConstructor) \
V(InternalArrayConstructorConstantArgCount) \
@@ -70,7 +72,10 @@ class PlatformInterfaceDescriptor;
V(ContextOnly) \
V(GrowArrayElements) \
V(MathRoundVariantCallFromUnoptimizedCode) \
- V(MathRoundVariantCallFromOptimizedCode)
+ V(MathRoundVariantCallFromOptimizedCode) \
+ V(InterpreterPushArgsAndCall) \
+ V(InterpreterPushArgsAndConstruct) \
+ V(InterpreterCEntry)
class CallInterfaceDescriptorData {
@@ -219,12 +224,12 @@ class CallInterfaceDescriptor {
static inline CallDescriptors::Key key();
-#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
- DECLARE_DESCRIPTOR(name, base) \
- protected: \
- virtual Type::FunctionType* BuildCallInterfaceDescriptorFunctionType( \
- Isolate* isolate, int register_param_count) override; \
- \
+#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
+ DECLARE_DESCRIPTOR(name, base) \
+ protected: \
+ Type::FunctionType* BuildCallInterfaceDescriptorFunctionType( \
+ Isolate* isolate, int register_param_count) override; \
+ \
public:
// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
class LoadDescriptor : public CallInterfaceDescriptor {
@@ -280,19 +285,21 @@ class VectorStoreTransitionDescriptor : public StoreDescriptor {
// Extends StoreDescriptor with Map parameter.
enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kValueIndex,
- kSlotIndex,
- kVectorIndex,
- kMapIndex,
- kParameterCount
+ kReceiverIndex = 0,
+ kNameIndex = 1,
+ kValueIndex = 2,
+
+ kMapIndex = 3,
+
+ kSlotIndex = 4, // not present on ia32.
+ kVirtualSlotVectorIndex = 4,
+
+ kVectorIndex = 5
};
- // These registers are no_reg for ia32, using the stack instead.
+ static const Register MapRegister();
static const Register SlotRegister();
static const Register VectorRegister();
- static const Register MapRegister();
};
@@ -368,6 +375,16 @@ class ToNumberDescriptor : public CallInterfaceDescriptor {
};
+class ToLengthDescriptor : public CallInterfaceDescriptor {
+ public:
+ enum ParameterIndices { kReceiverIndex };
+
+ DECLARE_DESCRIPTOR(ToLengthDescriptor, CallInterfaceDescriptor)
+
+ static const Register ReceiverRegister();
+};
+
+
class ToStringDescriptor : public CallInterfaceDescriptor {
public:
enum ParameterIndices { kReceiverIndex };
@@ -505,6 +522,19 @@ class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
};
+class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(AllocateMutableHeapNumberDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class AllocateInNewSpaceDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(AllocateInNewSpaceDescriptor, CallInterfaceDescriptor)
+};
+
+
class ArrayConstructorConstantArgCountDescriptor
: public CallInterfaceDescriptor {
public:
@@ -706,11 +736,27 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
};
-class PushArgsAndCallDescriptor : public CallInterfaceDescriptor {
+class InterpreterPushArgsAndCallDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(InterpreterPushArgsAndCallDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class InterpreterPushArgsAndConstructDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(InterpreterPushArgsAndConstructDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(PushArgsAndCallDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
};
+
#undef DECLARE_DESCRIPTOR
@@ -720,8 +766,8 @@ class PushArgsAndCallDescriptor : public CallInterfaceDescriptor {
CallDescriptors::Key name##Descriptor::key() { return CallDescriptors::name; }
INTERFACE_DESCRIPTOR_LIST(DEF_KEY)
#undef DEF_KEY
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#if V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index 906a5ce641..5ad730c8a4 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -1 +1,6 @@
+set noparent
+
+bmeurer@chromium.org
+mstarzinger@chromium.org
+oth@chromium.org
rmcilroy@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 9c6b5905cc..f2f5c07251 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -10,52 +10,86 @@ namespace interpreter {
BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
: isolate_(isolate),
+ zone_(zone),
bytecodes_(zone),
bytecode_generated_(false),
last_block_end_(0),
last_bytecode_start_(~0),
- return_seen_in_block_(false),
+ exit_seen_in_block_(false),
constants_map_(isolate->heap(), zone),
constants_(zone),
parameter_count_(-1),
local_register_count_(-1),
+ context_register_count_(-1),
temporary_register_count_(0),
- temporary_register_next_(0) {}
+ free_temporaries_(zone) {}
void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
local_register_count_ = number_of_locals;
- temporary_register_next_ = local_register_count_;
+ DCHECK_LE(context_register_count_, 0);
}
-int BytecodeArrayBuilder::locals_count() const { return local_register_count_; }
-
-
void BytecodeArrayBuilder::set_parameter_count(int number_of_parameters) {
parameter_count_ = number_of_parameters;
}
-int BytecodeArrayBuilder::parameter_count() const { return parameter_count_; }
+void BytecodeArrayBuilder::set_context_count(int number_of_contexts) {
+ context_register_count_ = number_of_contexts;
+ DCHECK_GE(local_register_count_, 0);
+}
+
+Register BytecodeArrayBuilder::first_context_register() const {
+ DCHECK_GT(context_register_count_, 0);
+ return Register(local_register_count_);
+}
-Register BytecodeArrayBuilder::Parameter(int parameter_index) {
+
+Register BytecodeArrayBuilder::last_context_register() const {
+ DCHECK_GT(context_register_count_, 0);
+ return Register(local_register_count_ + context_register_count_ - 1);
+}
+
+
+Register BytecodeArrayBuilder::first_temporary_register() const {
+ DCHECK_GT(temporary_register_count_, 0);
+ return Register(fixed_register_count());
+}
+
+
+Register BytecodeArrayBuilder::last_temporary_register() const {
+ DCHECK_GT(temporary_register_count_, 0);
+ return Register(fixed_register_count() + temporary_register_count_ - 1);
+}
+
+
+Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
DCHECK_GE(parameter_index, 0);
- DCHECK_LT(parameter_index, parameter_count_);
- return Register::FromParameterIndex(parameter_index, parameter_count_);
+ return Register::FromParameterIndex(parameter_index, parameter_count());
+}
+
+
+bool BytecodeArrayBuilder::RegisterIsParameterOrLocal(Register reg) const {
+ return reg.is_parameter() || reg.index() < locals_count();
+}
+
+
+bool BytecodeArrayBuilder::RegisterIsTemporary(Register reg) const {
+ return temporary_register_count_ > 0 && first_temporary_register() <= reg &&
+ reg <= last_temporary_register();
}
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
DCHECK_EQ(bytecode_generated_, false);
- DCHECK_GE(parameter_count_, 0);
- DCHECK_GE(local_register_count_, 0);
EnsureReturn();
int bytecode_size = static_cast<int>(bytecodes_.size());
- int register_count = local_register_count_ + temporary_register_count_;
+ int register_count = fixed_register_count() + temporary_register_count_;
int frame_size = register_count * kPointerSize;
Factory* factory = isolate_->factory();
@@ -68,60 +102,108 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
Handle<BytecodeArray> output =
factory->NewBytecodeArray(bytecode_size, &bytecodes_.front(), frame_size,
- parameter_count_, constant_pool);
+ parameter_count(), constant_pool);
bytecode_generated_ = true;
return output;
}
template <size_t N>
-void BytecodeArrayBuilder::Output(uint8_t(&bytes)[N]) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(Bytecodes::FromByte(bytes[0])),
- static_cast<int>(N) - 1);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
+ // Don't output dead code.
+ if (exit_seen_in_block_) return;
+
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), static_cast<int>(N));
last_bytecode_start_ = bytecodes()->size();
- for (int i = 1; i < static_cast<int>(N); i++) {
- DCHECK(OperandIsValid(Bytecodes::FromByte(bytes[0]), i - 1, bytes[i]));
+ bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+ for (int i = 0; i < static_cast<int>(N); i++) {
+ DCHECK(OperandIsValid(bytecode, i, operands[i]));
+ switch (Bytecodes::GetOperandSize(bytecode, i)) {
+ case OperandSize::kNone:
+ UNREACHABLE();
+ case OperandSize::kByte:
+ bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
+ break;
+ case OperandSize::kShort: {
+ uint8_t operand_bytes[2];
+ WriteUnalignedUInt16(operand_bytes, operands[i]);
+ bytecodes()->insert(bytecodes()->end(), operand_bytes,
+ operand_bytes + 2);
+ break;
+ }
+ }
}
- bytecodes()->insert(bytecodes()->end(), bytes, bytes + N);
}
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
- uint8_t operand1, uint8_t operand2) {
- uint8_t bytes[] = {Bytecodes::ToByte(bytecode), operand0, operand1, operand2};
- Output(bytes);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2) {
+ uint32_t operands[] = {operand0, operand1, operand2};
+ Output(bytecode, operands);
}
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
- uint8_t operand1) {
- uint8_t bytes[] = {Bytecodes::ToByte(bytecode), operand0, operand1};
- Output(bytes);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1) {
+ uint32_t operands[] = {operand0, operand1};
+ Output(bytecode, operands);
}
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0) {
- uint8_t bytes[] = {Bytecodes::ToByte(bytecode), operand0};
- Output(bytes);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
+ uint32_t operands[] = {operand0};
+ Output(bytecode, operands);
}
void BytecodeArrayBuilder::Output(Bytecode bytecode) {
- uint8_t bytes[] = {Bytecodes::ToByte(bytecode)};
- Output(bytes);
+ // Don't output dead code.
+ if (exit_seen_in_block_) return;
+
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+ last_bytecode_start_ = bytecodes()->size();
+ bytecodes()->push_back(Bytecodes::ToByte(bytecode));
}
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
- Register reg) {
+ Register reg,
+ Strength strength) {
+ if (is_strong(strength)) {
+ UNIMPLEMENTED();
+ }
+
Output(BytecodeForBinaryOperation(op), reg.ToOperand());
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
+ Strength strength) {
+ if (is_strong(strength)) {
+ UNIMPLEMENTED();
+ }
+
+ Output(BytecodeForCountOperation(op));
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
+ Output(Bytecode::kLogicalNot);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
+ Output(Bytecode::kTypeOf);
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
- Token::Value op, Register reg, LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+ Token::Value op, Register reg, Strength strength) {
+ if (is_strong(strength)) {
UNIMPLEMENTED();
}
@@ -146,8 +228,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
size_t entry = GetConstantPoolEntry(object);
- if (FitsInIdxOperand(entry)) {
+ if (FitsInIdx8Operand(entry)) {
Output(Bytecode::kLdaConstant, static_cast<uint8_t>(entry));
+ } else if (FitsInIdx16Operand(entry)) {
+ Output(Bytecode::kLdaConstantWide, static_cast<uint16_t>(entry));
} else {
UNIMPLEMENTED();
}
@@ -187,6 +271,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
+ // TODO(oth): Avoid loading the accumulator with the register if the
+ // previous bytecode stored the accumulator with the same register.
Output(Bytecode::kLdar, reg.ToOperand());
return *this;
}
@@ -194,30 +280,88 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
+ // TODO(oth): Avoid storing the accumulator in the register if the
+ // previous bytecode loaded the accumulator with the same register.
Output(Bytecode::kStar, reg.ToOperand());
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int slot_index) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
+ size_t name_index, int feedback_slot, LanguageMode language_mode,
+ TypeofMode typeof_mode) {
+ // TODO(rmcilroy): Potentially store language and typeof information in an
+ // operand rather than having extra bytecodes.
+ Bytecode bytecode = BytecodeForLoadGlobal(language_mode, typeof_mode);
+ if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, static_cast<uint8_t>(name_index),
+ static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(name_index) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
+ static_cast<uint16_t>(feedback_slot));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
+ size_t name_index, int feedback_slot, LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
+ if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, static_cast<uint8_t>(name_index),
+ static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(name_index) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
+ static_cast<uint16_t>(feedback_slot));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
+ int slot_index) {
DCHECK(slot_index >= 0);
- if (FitsInIdxOperand(slot_index)) {
- Output(Bytecode::kLdaGlobal, static_cast<uint8_t>(slot_index));
+ if (FitsInIdx8Operand(slot_index)) {
+ Output(Bytecode::kLdaContextSlot, context.ToOperand(),
+ static_cast<uint8_t>(slot_index));
} else {
UNIMPLEMENTED();
}
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
- Register object, int feedback_slot, LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
+ int slot_index) {
+ DCHECK(slot_index >= 0);
+ if (FitsInIdx8Operand(slot_index)) {
+ Output(Bytecode::kStaContextSlot, context.ToOperand(),
+ static_cast<uint8_t>(slot_index));
+ } else {
UNIMPLEMENTED();
}
+ return *this;
+}
+
- if (FitsInIdxOperand(feedback_slot)) {
- Output(Bytecode::kLoadIC, object.ToOperand(),
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
+ Register object, size_t name_index, int feedback_slot,
+ LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForLoadIC(language_mode);
+ if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(name_index) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ static_cast<uint16_t>(name_index),
+ static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
}
@@ -227,13 +371,31 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
Register object, int feedback_slot, LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+ Bytecode bytecode = BytecodeForKeyedLoadIC(language_mode);
+ if (FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, object.ToOperand(), static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ static_cast<uint16_t>(feedback_slot));
+ } else {
UNIMPLEMENTED();
}
+ return *this;
+}
+
- if (FitsInIdxOperand(feedback_slot)) {
- Output(Bytecode::kKeyedLoadIC, object.ToOperand(),
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
+ Register object, size_t name_index, int feedback_slot,
+ LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForStoreIC(language_mode);
+ if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(name_index) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ static_cast<uint16_t>(name_index),
+ static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
}
@@ -241,16 +403,47 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
- Register object, Register name, int feedback_slot,
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
+ Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+ Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
+ if (FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, object.ToOperand(), key.ToOperand(),
+ static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ key.ToOperand(), static_cast<uint16_t>(feedback_slot));
+ } else {
UNIMPLEMENTED();
}
+ return *this;
+}
- if (FitsInIdxOperand(feedback_slot)) {
- Output(Bytecode::kStoreIC, object.ToOperand(), name.ToOperand(),
- static_cast<uint8_t>(feedback_slot));
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
+ PretenureFlag tenured) {
+ DCHECK(FitsInImm8Operand(tenured));
+ Output(Bytecode::kCreateClosure, static_cast<uint8_t>(tenured));
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
+ CreateArgumentsType type) {
+ // TODO(rmcilroy): Consider passing the type as a bytecode operand rather
+ // than having two different bytecodes once we have better support for
+ // branches in the InterpreterAssembler.
+ Bytecode bytecode = BytecodeForCreateArguments(type);
+ Output(bytecode);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
+ int literal_index, Register flags) {
+ if (FitsInIdx8Operand(literal_index)) {
+ Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(literal_index),
+ flags.ToOperand());
} else {
UNIMPLEMENTED();
}
@@ -258,16 +451,25 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
- Register object, Register key, int feedback_slot,
- LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
+ int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bytes.
+ if (FitsInIdx8Operand(literal_index)) {
+ Output(Bytecode::kCreateArrayLiteral, static_cast<uint8_t>(literal_index),
+ static_cast<uint8_t>(flags));
+ } else {
UNIMPLEMENTED();
}
+ return *this;
+}
- if (FitsInIdxOperand(feedback_slot)) {
- Output(Bytecode::kKeyedStoreIC, object.ToOperand(), key.ToOperand(),
- static_cast<uint8_t>(feedback_slot));
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
+ int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bytes.
+ if (FitsInIdx8Operand(literal_index)) {
+ Output(Bytecode::kCreateObjectLiteral, static_cast<uint8_t>(literal_index),
+ static_cast<uint8_t>(flags));
} else {
UNIMPLEMENTED();
}
@@ -275,34 +477,79 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
+ Output(Bytecode::kPushContext, context.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
+ Output(Bytecode::kPopContext, context.ToOperand());
+ return *this;
+}
+
+
+bool BytecodeArrayBuilder::NeedToBooleanCast() {
+ if (!LastBytecodeInSameBlock()) {
+ // If the previous bytecode was from a different block return false.
+ return true;
+ }
+
+ // If the previous bytecode puts a boolean in the accumulator return true.
+ switch (Bytecodes::FromByte(bytecodes()->at(last_bytecode_start_))) {
+ case Bytecode::kToBoolean:
+ UNREACHABLE();
+ case Bytecode::kLdaTrue:
+ case Bytecode::kLdaFalse:
+ case Bytecode::kLogicalNot:
+ case Bytecode::kTestEqual:
+ case Bytecode::kTestNotEqual:
+ case Bytecode::kTestEqualStrict:
+ case Bytecode::kTestNotEqualStrict:
+ case Bytecode::kTestLessThan:
+ case Bytecode::kTestLessThanOrEqual:
+ case Bytecode::kTestGreaterThan:
+ case Bytecode::kTestGreaterThanOrEqual:
+ case Bytecode::kTestInstanceOf:
+ case Bytecode::kTestIn:
+ case Bytecode::kForInDone:
+ return false;
+ default:
+ return true;
+ }
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToBoolean() {
- if (LastBytecodeInSameBlock()) {
- // If the previous bytecode puts a boolean in the accumulator
- // there is no need to emit an instruction.
- switch (Bytecodes::FromByte(bytecodes()->at(last_bytecode_start_))) {
- case Bytecode::kToBoolean:
- UNREACHABLE();
- case Bytecode::kLdaTrue:
- case Bytecode::kLdaFalse:
- case Bytecode::kTestEqual:
- case Bytecode::kTestNotEqual:
- case Bytecode::kTestEqualStrict:
- case Bytecode::kTestNotEqualStrict:
- case Bytecode::kTestLessThan:
- case Bytecode::kTestLessThanOrEqual:
- case Bytecode::kTestGreaterThan:
- case Bytecode::kTestGreaterThanOrEqual:
- case Bytecode::kTestInstanceOf:
- case Bytecode::kTestIn:
- break;
- default:
- Output(Bytecode::kToBoolean);
- }
+ // If the previous bytecode puts a boolean in the accumulator
+ // there is no need to emit an instruction.
+ if (NeedToBooleanCast()) {
+ Output(Bytecode::kToBoolean);
}
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
+ Output(Bytecode::kToObject);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
+ Output(Bytecode::kToName);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber() {
+ // TODO(rmcilroy): consider omitting if the preceeding bytecode always returns
+ // a number.
+ Output(Bytecode::kToNumber);
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
if (label->is_forward_target()) {
// An earlier jump instruction refers to this label. Update it's location.
@@ -310,15 +557,20 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
// Now treat as if the label will only be back referred to.
}
label->bind_to(bytecodes()->size());
+ LeaveBasicBlock();
return *this;
}
-// static
-bool BytecodeArrayBuilder::IsJumpWithImm8Operand(Bytecode jump_bytecode) {
- return jump_bytecode == Bytecode::kJump ||
- jump_bytecode == Bytecode::kJumpIfTrue ||
- jump_bytecode == Bytecode::kJumpIfFalse;
+BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
+ BytecodeLabel* label) {
+ DCHECK(!label->is_bound());
+ DCHECK(target.is_bound());
+ PatchJump(bytecodes()->begin() + target.offset(),
+ bytecodes()->begin() + label->offset());
+ label->bind_to(target.offset());
+ LeaveBasicBlock();
+ return *this;
}
@@ -332,6 +584,14 @@ Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
return Bytecode::kJumpIfTrueConstant;
case Bytecode::kJumpIfFalse:
return Bytecode::kJumpIfFalseConstant;
+ case Bytecode::kJumpIfToBooleanTrue:
+ return Bytecode::kJumpIfToBooleanTrueConstant;
+ case Bytecode::kJumpIfToBooleanFalse:
+ return Bytecode::kJumpIfToBooleanFalseConstant;
+ case Bytecode::kJumpIfNull:
+ return Bytecode::kJumpIfNullConstant;
+ case Bytecode::kJumpIfUndefined:
+ return Bytecode::kJumpIfUndefinedConstant;
default:
UNREACHABLE();
return Bytecode::kJumpConstant;
@@ -345,9 +605,9 @@ void BytecodeArrayBuilder::PatchJump(
Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
int delta = static_cast<int>(jump_target - jump_location);
- DCHECK(IsJumpWithImm8Operand(jump_bytecode));
+ DCHECK(Bytecodes::IsJump(jump_bytecode));
DCHECK_EQ(Bytecodes::Size(jump_bytecode), 2);
- DCHECK_GE(delta, 0);
+ DCHECK_NE(delta, 0);
if (FitsInImm8Operand(delta)) {
// Just update the operand
@@ -356,9 +616,9 @@ void BytecodeArrayBuilder::PatchJump(
} else {
// Update the jump type and operand
size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
- if (FitsInIdxOperand(entry)) {
- *jump_location++ =
- Bytecodes::ToByte(GetJumpWithConstantOperand(jump_bytecode));
+ if (FitsInIdx8Operand(entry)) {
+ jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+ *jump_location++ = Bytecodes::ToByte(jump_bytecode);
*jump_location = static_cast<uint8_t>(entry);
} else {
// TODO(oth): OutputJump should reserve a constant pool entry
@@ -374,8 +634,35 @@ void BytecodeArrayBuilder::PatchJump(
}
+// static
+Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
+ switch (jump_bytecode) {
+ case Bytecode::kJump:
+ case Bytecode::kJumpIfNull:
+ case Bytecode::kJumpIfUndefined:
+ return jump_bytecode;
+ case Bytecode::kJumpIfTrue:
+ return Bytecode::kJumpIfToBooleanTrue;
+ case Bytecode::kJumpIfFalse:
+ return Bytecode::kJumpIfToBooleanFalse;
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label) {
+ // Don't emit dead code.
+ if (exit_seen_in_block_) return *this;
+
+ // Check if the value in accumulator is boolean, if not choose an
+ // appropriate JumpIfToBoolean bytecode.
+ if (NeedToBooleanCast()) {
+ jump_bytecode = GetJumpWithToBoolean(jump_bytecode);
+ }
+
int delta;
if (label->is_bound()) {
// Label has been bound already so this is a backwards jump.
@@ -394,13 +681,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
Output(jump_bytecode, static_cast<uint8_t>(delta));
} else {
size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
- if (FitsInIdxOperand(entry)) {
+ if (FitsInIdx8Operand(entry)) {
Output(GetJumpWithConstantOperand(jump_bytecode),
static_cast<uint8_t>(entry));
} else {
UNIMPLEMENTED();
}
}
+ LeaveBasicBlock();
return *this;
}
@@ -420,34 +708,68 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
+ return OutputJump(Bytecode::kJumpIfNull, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
+ BytecodeLabel* label) {
+ return OutputJump(Bytecode::kJumpIfUndefined, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
+ Output(Bytecode::kThrow);
+ exit_seen_in_block_ = true;
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
Output(Bytecode::kReturn);
- return_seen_in_block_ = true;
+ exit_seen_in_block_ = true;
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::EnterBlock() { return *this; }
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(Register receiver) {
+ Output(Bytecode::kForInPrepare, receiver.ToOperand());
+ return *this;
+}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LeaveBlock() {
- last_block_end_ = bytecodes()->size();
- return_seen_in_block_ = false;
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register for_in_state,
+ Register index) {
+ Output(Bytecode::kForInNext, for_in_state.ToOperand(), index.ToOperand());
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register for_in_state) {
+ Output(Bytecode::kForInDone, for_in_state.ToOperand());
+ return *this;
+}
+
+
+void BytecodeArrayBuilder::LeaveBasicBlock() {
+ last_block_end_ = bytecodes()->size();
+ exit_seen_in_block_ = false;
+}
+
+
void BytecodeArrayBuilder::EnsureReturn() {
- if (!return_seen_in_block_) {
+ if (!exit_seen_in_block_) {
LoadUndefined();
Return();
}
}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
Register receiver,
size_t arg_count) {
- if (FitsInIdxOperand(arg_count)) {
+ if (FitsInIdx8Operand(arg_count)) {
Output(Bytecode::kCall, callable.ToOperand(), receiver.ToOperand(),
static_cast<uint8_t>(arg_count));
} else {
@@ -457,6 +779,52 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
+ Register first_arg,
+ size_t arg_count) {
+ if (!first_arg.is_valid()) {
+ DCHECK_EQ(0u, arg_count);
+ first_arg = Register(0);
+ }
+ DCHECK(FitsInIdx8Operand(arg_count));
+ Output(Bytecode::kNew, constructor.ToOperand(), first_arg.ToOperand(),
+ static_cast<uint8_t>(arg_count));
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
+ Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
+ DCHECK(FitsInIdx16Operand(function_id));
+ DCHECK(FitsInIdx8Operand(arg_count));
+ if (!first_arg.is_valid()) {
+ DCHECK_EQ(0u, arg_count);
+ first_arg = Register(0);
+ }
+ Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
+ first_arg.ToOperand(), static_cast<uint8_t>(arg_count));
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
+ Register receiver,
+ size_t arg_count) {
+ DCHECK(FitsInIdx16Operand(context_index));
+ DCHECK(FitsInIdx8Operand(arg_count));
+ Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
+ receiver.ToOperand(), static_cast<uint8_t>(arg_count));
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
+ LanguageMode language_mode) {
+ Output(BytecodeForDelete(language_mode), object.ToOperand());
+ return *this;
+}
+
+
size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
// These constants shouldn't be added to the constant pool, the should use
// specialzed bytecodes instead.
@@ -478,39 +846,105 @@ size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
int BytecodeArrayBuilder::BorrowTemporaryRegister() {
- DCHECK_GE(local_register_count_, 0);
- int temporary_reg_index = temporary_register_next_++;
- int count = temporary_register_next_ - local_register_count_;
- if (count > temporary_register_count_) {
- temporary_register_count_ = count;
+ if (free_temporaries_.empty()) {
+ temporary_register_count_ += 1;
+ return last_temporary_register().index();
+ } else {
+ auto pos = free_temporaries_.begin();
+ int retval = *pos;
+ free_temporaries_.erase(pos);
+ return retval;
}
- return temporary_reg_index;
+}
+
+
+void BytecodeArrayBuilder::BorrowConsecutiveTemporaryRegister(int reg_index) {
+ DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
+ free_temporaries_.erase(reg_index);
}
void BytecodeArrayBuilder::ReturnTemporaryRegister(int reg_index) {
- DCHECK_EQ(reg_index, temporary_register_next_ - 1);
- temporary_register_next_ = reg_index;
+ DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
+ free_temporaries_.insert(reg_index);
+}
+
+
+int BytecodeArrayBuilder::PrepareForConsecutiveTemporaryRegisters(
+ size_t count) {
+ if (count == 0) {
+ return -1;
+ }
+
+ // Search within existing temporaries for a run.
+ auto start = free_temporaries_.begin();
+ size_t run_length = 0;
+ for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
+ if (*run_end != *start + static_cast<int>(run_length)) {
+ start = run_end;
+ run_length = 0;
+ }
+ if (++run_length == count) {
+ return *start;
+ }
+ }
+
+ // Continue run if possible across existing last temporary.
+ if (temporary_register_count_ > 0 &&
+ (start == free_temporaries_.end() ||
+ *start + static_cast<int>(run_length) !=
+ last_temporary_register().index() + 1)) {
+ run_length = 0;
+ }
+
+ // Ensure enough registers for run.
+ while (run_length++ < count) {
+ temporary_register_count_++;
+ free_temporaries_.insert(last_temporary_register().index());
+ }
+ return last_temporary_register().index() - static_cast<int>(count) + 1;
+}
+
+
+bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
+ if (temporary_register_count_ > 0) {
+ DCHECK(reg.index() >= first_temporary_register().index() &&
+ reg.index() <= last_temporary_register().index());
+ return free_temporaries_.find(reg.index()) == free_temporaries_.end();
+ } else {
+ return false;
+ }
}
bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
- uint8_t operand_value) const {
+ uint32_t operand_value) const {
OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
switch (operand_type) {
case OperandType::kNone:
return false;
- case OperandType::kCount:
+ case OperandType::kIdx16:
+ return static_cast<uint16_t>(operand_value) == operand_value;
+ case OperandType::kCount8:
case OperandType::kImm8:
- case OperandType::kIdx:
- return true;
- case OperandType::kReg: {
- Register reg = Register::FromOperand(operand_value);
- if (reg.is_parameter()) {
+ case OperandType::kIdx8:
+ return static_cast<uint8_t>(operand_value) == operand_value;
+ case OperandType::kMaybeReg8:
+ if (operand_value == 0) {
+ return true;
+ }
+ // Fall-through to kReg8 case.
+ case OperandType::kReg8: {
+ Register reg = Register::FromOperand(static_cast<uint8_t>(operand_value));
+ if (reg.is_function_context() || reg.is_function_closure()) {
+ return true;
+ } else if (reg.is_parameter()) {
int parameter_index = reg.ToParameterIndex(parameter_count_);
return parameter_index >= 0 && parameter_index < parameter_count_;
+ } else if (reg.index() < fixed_register_count()) {
+ return true;
} else {
- return (reg.index() >= 0 && reg.index() < temporary_register_next_);
+ return TemporaryRegisterIsLive(reg);
}
}
}
@@ -537,6 +971,32 @@ Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
return Bytecode::kDiv;
case Token::Value::MOD:
return Bytecode::kMod;
+ case Token::Value::BIT_OR:
+ return Bytecode::kBitwiseOr;
+ case Token::Value::BIT_XOR:
+ return Bytecode::kBitwiseXor;
+ case Token::Value::BIT_AND:
+ return Bytecode::kBitwiseAnd;
+ case Token::Value::SHL:
+ return Bytecode::kShiftLeft;
+ case Token::Value::SAR:
+ return Bytecode::kShiftRight;
+ case Token::Value::SHR:
+ return Bytecode::kShiftRightLogical;
+ default:
+ UNREACHABLE();
+ return static_cast<Bytecode>(-1);
+ }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
+ switch (op) {
+ case Token::Value::ADD:
+ return Bytecode::kInc;
+ case Token::Value::SUB:
+ return Bytecode::kDec;
default:
UNREACHABLE();
return static_cast<Bytecode>(-1);
@@ -575,13 +1035,186 @@ Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
// static
-bool BytecodeArrayBuilder::FitsInIdxOperand(int value) {
+Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kLoadICSloppy:
+ return Bytecode::kLoadICSloppyWide;
+ case Bytecode::kLoadICStrict:
+ return Bytecode::kLoadICStrictWide;
+ case Bytecode::kKeyedLoadICSloppy:
+ return Bytecode::kKeyedLoadICSloppyWide;
+ case Bytecode::kKeyedLoadICStrict:
+ return Bytecode::kKeyedLoadICStrictWide;
+ case Bytecode::kStoreICSloppy:
+ return Bytecode::kStoreICSloppyWide;
+ case Bytecode::kStoreICStrict:
+ return Bytecode::kStoreICStrictWide;
+ case Bytecode::kKeyedStoreICSloppy:
+ return Bytecode::kKeyedStoreICSloppyWide;
+ case Bytecode::kKeyedStoreICStrict:
+ return Bytecode::kKeyedStoreICStrictWide;
+ case Bytecode::kLdaGlobalSloppy:
+ return Bytecode::kLdaGlobalSloppyWide;
+ case Bytecode::kLdaGlobalStrict:
+ return Bytecode::kLdaGlobalStrictWide;
+ case Bytecode::kLdaGlobalInsideTypeofSloppy:
+ return Bytecode::kLdaGlobalInsideTypeofSloppyWide;
+ case Bytecode::kLdaGlobalInsideTypeofStrict:
+ return Bytecode::kLdaGlobalInsideTypeofStrictWide;
+ case Bytecode::kStaGlobalSloppy:
+ return Bytecode::kStaGlobalSloppyWide;
+ case Bytecode::kStaGlobalStrict:
+ return Bytecode::kStaGlobalStrictWide;
+ default:
+ UNREACHABLE();
+ return static_cast<Bytecode>(-1);
+ }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForLoadIC(LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kLoadICSloppy;
+ case STRICT:
+ return Bytecode::kLoadICStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForKeyedLoadIC(
+ LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kKeyedLoadICSloppy;
+ case STRICT:
+ return Bytecode::kKeyedLoadICStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kStoreICSloppy;
+ case STRICT:
+ return Bytecode::kStoreICStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForKeyedStoreIC(
+ LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kKeyedStoreICSloppy;
+ case STRICT:
+ return Bytecode::kKeyedStoreICStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(LanguageMode language_mode,
+ TypeofMode typeof_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return typeof_mode == INSIDE_TYPEOF
+ ? Bytecode::kLdaGlobalInsideTypeofSloppy
+ : Bytecode::kLdaGlobalSloppy;
+ case STRICT:
+ return typeof_mode == INSIDE_TYPEOF
+ ? Bytecode::kLdaGlobalInsideTypeofStrict
+ : Bytecode::kLdaGlobalStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
+ LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kStaGlobalSloppy;
+ case STRICT:
+ return Bytecode::kStaGlobalStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
+ CreateArgumentsType type) {
+ switch (type) {
+ case CreateArgumentsType::kMappedArguments:
+ return Bytecode::kCreateMappedArguments;
+ case CreateArgumentsType::kUnmappedArguments:
+ return Bytecode::kCreateUnmappedArguments;
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kDeletePropertySloppy;
+ case STRICT:
+ return Bytecode::kDeletePropertyStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInIdx8Operand(int value) {
return kMinUInt8 <= value && value <= kMaxUInt8;
}
// static
-bool BytecodeArrayBuilder::FitsInIdxOperand(size_t value) {
+bool BytecodeArrayBuilder::FitsInIdx8Operand(size_t value) {
return value <= static_cast<size_t>(kMaxUInt8);
}
@@ -592,21 +1225,65 @@ bool BytecodeArrayBuilder::FitsInImm8Operand(int value) {
}
+// static
+bool BytecodeArrayBuilder::FitsInIdx16Operand(int value) {
+ return kMinUInt16 <= value && value <= kMaxUInt16;
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInIdx16Operand(size_t value) {
+ return value <= static_cast<size_t>(kMaxUInt16);
+}
+
+
TemporaryRegisterScope::TemporaryRegisterScope(BytecodeArrayBuilder* builder)
- : builder_(builder), count_(0), last_register_index_(-1) {}
+ : builder_(builder),
+ allocated_(builder->zone()),
+ next_consecutive_register_(-1),
+ next_consecutive_count_(-1) {}
TemporaryRegisterScope::~TemporaryRegisterScope() {
- while (count_-- != 0) {
- builder_->ReturnTemporaryRegister(last_register_index_--);
+ for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
+ builder_->ReturnTemporaryRegister(*i);
}
+ allocated_.clear();
}
Register TemporaryRegisterScope::NewRegister() {
- count_++;
- last_register_index_ = builder_->BorrowTemporaryRegister();
- return Register(last_register_index_);
+ int allocated = builder_->BorrowTemporaryRegister();
+ allocated_.push_back(allocated);
+ return Register(allocated);
+}
+
+
+bool TemporaryRegisterScope::RegisterIsAllocatedInThisScope(
+ Register reg) const {
+ for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
+ if (*i == reg.index()) return true;
+ }
+ return false;
+}
+
+
+void TemporaryRegisterScope::PrepareForConsecutiveAllocations(size_t count) {
+ if (static_cast<int>(count) > next_consecutive_count_) {
+ next_consecutive_register_ =
+ builder_->PrepareForConsecutiveTemporaryRegisters(count);
+ next_consecutive_count_ = static_cast<int>(count);
+ }
+}
+
+
+Register TemporaryRegisterScope::NextConsecutiveRegister() {
+ DCHECK_GE(next_consecutive_register_, 0);
+ DCHECK_GT(next_consecutive_count_, 0);
+ builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
+ allocated_.push_back(next_consecutive_register_);
+ next_consecutive_count_--;
+ return Register(next_consecutive_register_++);
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index d68d5e7ffb..b766ccd4a6 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -23,20 +23,53 @@ namespace interpreter {
class BytecodeLabel;
class Register;
+// TODO(rmcilroy): Unify this with CreateArgumentsParameters::Type in Turbofan
+// when rest parameters implementation has settled down.
+enum class CreateArgumentsType { kMappedArguments, kUnmappedArguments };
+
class BytecodeArrayBuilder {
public:
BytecodeArrayBuilder(Isolate* isolate, Zone* zone);
Handle<BytecodeArray> ToBytecodeArray();
- // Set number of parameters expected by function.
+ // Set the number of parameters expected by function.
void set_parameter_count(int number_of_params);
- int parameter_count() const;
+ int parameter_count() const {
+ DCHECK_GE(parameter_count_, 0);
+ return parameter_count_;
+ }
- // Set number of locals required for bytecode array.
+ // Set the number of locals required for bytecode array.
void set_locals_count(int number_of_locals);
- int locals_count() const;
+ int locals_count() const {
+ DCHECK_GE(local_register_count_, 0);
+ return local_register_count_;
+ }
+
+ // Set number of contexts required for bytecode array.
+ void set_context_count(int number_of_contexts);
+ int context_count() const {
+ DCHECK_GE(context_register_count_, 0);
+ return context_register_count_;
+ }
+
+ Register first_context_register() const;
+ Register last_context_register() const;
- Register Parameter(int parameter_index);
+ // Returns the number of fixed (non-temporary) registers.
+ int fixed_register_count() const { return context_count() + locals_count(); }
+
+ Register Parameter(int parameter_index) const;
+
+ // Return true if the register |reg| represents a parameter or a
+ // local.
+ bool RegisterIsParameterOrLocal(Register reg) const;
+
+ // Return true if the register |reg| represents a temporary register.
+ bool RegisterIsTemporary(Register reg) const;
+
+ // Gets a constant pool entry for the |object|.
+ size_t GetConstantPoolEntry(Handle<Object> object);
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
@@ -47,27 +80,57 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& LoadTrue();
BytecodeArrayBuilder& LoadFalse();
- // Global loads to accumulator.
- BytecodeArrayBuilder& LoadGlobal(int slot_index);
+ // Global loads to the accumulator and stores from the accumulator.
+ BytecodeArrayBuilder& LoadGlobal(size_t name_index, int feedback_slot,
+ LanguageMode language_mode,
+ TypeofMode typeof_mode);
+ BytecodeArrayBuilder& StoreGlobal(size_t name_index, int feedback_slot,
+ LanguageMode language_mode);
+
+ // Load the object at |slot_index| in |context| into the accumulator.
+ BytecodeArrayBuilder& LoadContextSlot(Register context, int slot_index);
+
+ // Stores the object in the accumulator into |slot_index| of |context|.
+ BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index);
// Register-accumulator transfers.
BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
- // Load properties. The property name should be in the accumulator.
- BytecodeArrayBuilder& LoadNamedProperty(Register object, int feedback_slot,
+ // Named load property.
+ BytecodeArrayBuilder& LoadNamedProperty(Register object, size_t name_index,
+ int feedback_slot,
LanguageMode language_mode);
+ // Keyed load property. The key should be in the accumulator.
BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot,
LanguageMode language_mode);
// Store properties. The value to be stored should be in the accumulator.
- BytecodeArrayBuilder& StoreNamedProperty(Register object, Register name,
+ BytecodeArrayBuilder& StoreNamedProperty(Register object, size_t name_index,
int feedback_slot,
LanguageMode language_mode);
BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
int feedback_slot,
LanguageMode language_mode);
+ // Create a new closure for the SharedFunctionInfo in the accumulator.
+ BytecodeArrayBuilder& CreateClosure(PretenureFlag tenured);
+
+ // Create a new arguments object in the accumulator.
+ BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
+
+ // Literals creation. Constant elements should be in the accumulator.
+ BytecodeArrayBuilder& CreateRegExpLiteral(int literal_index, Register flags);
+ BytecodeArrayBuilder& CreateArrayLiteral(int literal_index, int flags);
+ BytecodeArrayBuilder& CreateObjectLiteral(int literal_index, int flags);
+
+ // Push the context in accumulator as the new context, and store in register
+ // |context|.
+ BytecodeArrayBuilder& PushContext(Register context);
+
+ // Pop the current context and replace with |context|.
+ BytecodeArrayBuilder& PopContext(Register context);
+
// Call a JS function. The JSFunction or Callable to be called should be in
// |callable|, the receiver should be in |receiver| and all subsequent
// arguments should be in registers <receiver + 1> to
@@ -75,25 +138,69 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& Call(Register callable, Register receiver,
size_t arg_count);
- // Operators (register == lhs, accumulator = rhs).
- BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg);
+ // Call the new operator. The |constructor| register is followed by
+ // |arg_count| consecutive registers containing arguments to be
+ // applied to the constructor.
+ BytecodeArrayBuilder& New(Register constructor, Register first_arg,
+ size_t arg_count);
+
+ // Call the runtime function with |function_id|. The first argument should be
+ // in |first_arg| and all subsequent arguments should be in registers
+ // <first_arg + 1> to <first_arg + 1 + arg_count>.
+ BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
+ Register first_arg, size_t arg_count);
+
+ // Call the JS runtime function with |context_index|. The the receiver should
+ // be in |receiver| and all subsequent arguments should be in registers
+ // <receiver + 1> to <receiver + 1 + arg_count>.
+ BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver,
+ size_t arg_count);
+
+ // Operators (register holds the lhs value, accumulator holds the rhs value).
+ BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
+ Strength strength);
+
+ // Count Operators (value stored in accumulator).
+ BytecodeArrayBuilder& CountOperation(Token::Value op, Strength strength);
+
+ // Unary Operators.
+ BytecodeArrayBuilder& LogicalNot();
+ BytecodeArrayBuilder& TypeOf();
+
+ // Deletes property from an object. This expects that accumulator contains
+ // the key to be deleted and the register contains a reference to the object.
+ BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
// Tests.
BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
- LanguageMode language_mode);
+ Strength strength);
- // Casts
+ // Casts.
BytecodeArrayBuilder& CastAccumulatorToBoolean();
+ BytecodeArrayBuilder& CastAccumulatorToJSObject();
+ BytecodeArrayBuilder& CastAccumulatorToName();
+ BytecodeArrayBuilder& CastAccumulatorToNumber();
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
+ BytecodeArrayBuilder& Bind(const BytecodeLabel& target, BytecodeLabel* label);
+
BytecodeArrayBuilder& Jump(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
+
+ BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& Return();
- BytecodeArrayBuilder& EnterBlock();
- BytecodeArrayBuilder& LeaveBlock();
+ // Complex flow control.
+ BytecodeArrayBuilder& ForInPrepare(Register receiver);
+ BytecodeArrayBuilder& ForInNext(Register for_in_state, Register index);
+ BytecodeArrayBuilder& ForInDone(Register for_in_state);
+
+ // Accessors
+ Zone* zone() const { return zone_; }
private:
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
@@ -101,54 +208,79 @@ class BytecodeArrayBuilder {
Isolate* isolate() const { return isolate_; }
static Bytecode BytecodeForBinaryOperation(Token::Value op);
+ static Bytecode BytecodeForCountOperation(Token::Value op);
static Bytecode BytecodeForCompareOperation(Token::Value op);
- static bool FitsInIdxOperand(int value);
- static bool FitsInIdxOperand(size_t value);
+ static Bytecode BytecodeForWideOperands(Bytecode bytecode);
+ static Bytecode BytecodeForLoadIC(LanguageMode language_mode);
+ static Bytecode BytecodeForKeyedLoadIC(LanguageMode language_mode);
+ static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
+ static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
+ static Bytecode BytecodeForLoadGlobal(LanguageMode language_mode,
+ TypeofMode typeof_mode);
+ static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
+ static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
+ static Bytecode BytecodeForDelete(LanguageMode language_mode);
+
+ static bool FitsInIdx8Operand(int value);
+ static bool FitsInIdx8Operand(size_t value);
static bool FitsInImm8Operand(int value);
- static bool IsJumpWithImm8Operand(Bytecode jump_bytecode);
+ static bool FitsInIdx16Operand(int value);
+ static bool FitsInIdx16Operand(size_t value);
+
static Bytecode GetJumpWithConstantOperand(Bytecode jump_with_smi8_operand);
+ static Bytecode GetJumpWithToBoolean(Bytecode jump);
template <size_t N>
- INLINE(void Output(uint8_t(&bytes)[N]));
- void Output(Bytecode bytecode, uint8_t operand0, uint8_t operand1,
- uint8_t operand2);
- void Output(Bytecode bytecode, uint8_t operand0, uint8_t operand1);
- void Output(Bytecode bytecode, uint8_t operand0);
+ INLINE(void Output(Bytecode bytecode, uint32_t(&oprands)[N]));
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2);
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
+ void Output(Bytecode bytecode, uint32_t operand0);
void Output(Bytecode bytecode);
- void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
- ZoneVector<uint8_t>::iterator jump_location);
+
BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label);
+ void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
+ ZoneVector<uint8_t>::iterator jump_location);
+ void LeaveBasicBlock();
void EnsureReturn();
bool OperandIsValid(Bytecode bytecode, int operand_index,
- uint8_t operand_value) const;
+ uint32_t operand_value) const;
bool LastBytecodeInSameBlock() const;
- size_t GetConstantPoolEntry(Handle<Object> object);
+ bool NeedToBooleanCast();
- // Scope helpers used by TemporaryRegisterScope
int BorrowTemporaryRegister();
void ReturnTemporaryRegister(int reg_index);
+ int PrepareForConsecutiveTemporaryRegisters(size_t count);
+ void BorrowConsecutiveTemporaryRegister(int reg_index);
+ bool TemporaryRegisterIsLive(Register reg) const;
+
+ Register first_temporary_register() const;
+ Register last_temporary_register() const;
Isolate* isolate_;
+ Zone* zone_;
ZoneVector<uint8_t> bytecodes_;
bool bytecode_generated_;
size_t last_block_end_;
size_t last_bytecode_start_;
- bool return_seen_in_block_;
+ bool exit_seen_in_block_;
IdentityMap<size_t> constants_map_;
ZoneVector<Handle<Object>> constants_;
int parameter_count_;
int local_register_count_;
+ int context_register_count_;
int temporary_register_count_;
- int temporary_register_next_;
+
+ ZoneSet<int> free_temporaries_;
friend class TemporaryRegisterScope;
- DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArrayBuilder);
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
@@ -159,7 +291,8 @@ class BytecodeArrayBuilder {
class BytecodeLabel final {
public:
BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
- ~BytecodeLabel() { DCHECK(bound_ && offset_ != kInvalidOffset); }
+
+ INLINE(bool is_bound() const) { return bound_; }
private:
static const size_t kInvalidOffset = static_cast<size_t>(-1);
@@ -170,11 +303,10 @@ class BytecodeLabel final {
bound_ = true;
}
INLINE(void set_referrer(size_t offset)) {
- DCHECK(!bound_ && offset != kInvalidOffset);
+ DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
offset_ = offset;
}
INLINE(size_t offset() const) { return offset_; }
- INLINE(bool is_bound() const) { return bound_; }
INLINE(bool is_forward_target() const) {
return offset() != kInvalidOffset && !is_bound();
}
@@ -188,25 +320,33 @@ class BytecodeLabel final {
size_t offset_;
friend class BytecodeArrayBuilder;
- DISALLOW_COPY_AND_ASSIGN(BytecodeLabel);
};
// A stack-allocated class than allows the instantiator to allocate
// temporary registers that are cleaned up when scope is closed.
+// TODO(oth): Deprecate TemporaryRegisterScope use. Code should be
+// using result scopes as far as possible.
class TemporaryRegisterScope {
public:
explicit TemporaryRegisterScope(BytecodeArrayBuilder* builder);
~TemporaryRegisterScope();
Register NewRegister();
+ void PrepareForConsecutiveAllocations(size_t count);
+ Register NextConsecutiveRegister();
+
+ bool RegisterIsAllocatedInThisScope(Register reg) const;
+
private:
void* operator new(size_t size);
void operator delete(void* p);
BytecodeArrayBuilder* builder_;
- int count_;
- int last_register_index_;
+ const TemporaryRegisterScope* outer_;
+ ZoneVector<int> allocated_;
+ int next_consecutive_register_;
+ int next_consecutive_count_;
DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterScope);
};
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index dc49308fbe..b84215660e 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -32,31 +32,55 @@ Bytecode BytecodeArrayIterator::current_bytecode() const {
}
-uint8_t BytecodeArrayIterator::GetRawOperand(int operand_index,
- OperandType operand_type) const {
+uint32_t BytecodeArrayIterator::GetRawOperand(int operand_index,
+ OperandType operand_type) const {
DCHECK_GE(operand_index, 0);
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
DCHECK_EQ(operand_type,
Bytecodes::GetOperandType(current_bytecode(), operand_index));
- int operands_start = bytecode_offset_ + 1;
- return bytecode_array()->get(operands_start + operand_index);
+ uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index);
+ switch (Bytecodes::SizeOfOperand(operand_type)) {
+ default:
+ case OperandSize::kNone:
+ UNREACHABLE();
+ case OperandSize::kByte:
+ return static_cast<uint32_t>(*operand_start);
+ case OperandSize::kShort:
+ return ReadUnalignedUInt16(operand_start);
+ }
}
-int8_t BytecodeArrayIterator::GetSmi8Operand(int operand_index) const {
- uint8_t operand = GetRawOperand(operand_index, OperandType::kImm8);
+int8_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
+ uint32_t operand = GetRawOperand(operand_index, OperandType::kImm8);
return static_cast<int8_t>(operand);
}
+int BytecodeArrayIterator::GetCountOperand(int operand_index) const {
+ uint32_t operand = GetRawOperand(operand_index, OperandType::kCount8);
+ return static_cast<int>(operand);
+}
+
+
int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
- uint8_t operand = GetRawOperand(operand_index, OperandType::kIdx);
+ OperandSize size =
+ Bytecodes::GetOperandSize(current_bytecode(), operand_index);
+ OperandType type =
+ (size == OperandSize::kByte) ? OperandType::kIdx8 : OperandType::kIdx16;
+ uint32_t operand = GetRawOperand(operand_index, type);
return static_cast<int>(operand);
}
Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
- uint8_t operand = GetRawOperand(operand_index, OperandType::kReg);
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kReg8 ||
+ operand_type == OperandType::kMaybeReg8);
+ uint32_t operand = GetRawOperand(operand_index, operand_type);
return Register::FromOperand(operand);
}
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 0d9011f242..31e237f098 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -25,14 +25,15 @@ class BytecodeArrayIterator {
return bytecode_array_;
}
- int8_t GetSmi8Operand(int operand_index) const;
+ int8_t GetImmediateOperand(int operand_index) const;
int GetIndexOperand(int operand_index) const;
+ int GetCountOperand(int operand_index) const;
Register GetRegisterOperand(int operand_index) const;
Handle<Object> GetConstantForIndexOperand(int operand_index) const;
// Get the raw byte for the given operand. Note: you should prefer using the
// typed versions above which cast the return to an appropriate type.
- uint8_t GetRawOperand(int operand_index, OperandType operand_type) const;
+ uint32_t GetRawOperand(int operand_index, OperandType operand_type) const;
private:
Handle<BytecodeArray> bytecode_array_;
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 7257fd4134..02061a7514 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -4,10 +4,10 @@
#include "src/interpreter/bytecode-generator.h"
-#include <stack>
-
#include "src/compiler.h"
+#include "src/interpreter/control-flow-builders.h"
#include "src/objects.h"
+#include "src/parser.h"
#include "src/scopes.h"
#include "src/token.h"
@@ -15,9 +15,301 @@ namespace v8 {
namespace internal {
namespace interpreter {
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body, allowing pushing and
+// popping of the current {context_register} during visitation.
+class BytecodeGenerator::ContextScope BASE_EMBEDDED {
+ public:
+ ContextScope(BytecodeGenerator* generator, Scope* scope,
+ bool should_pop_context = true)
+ : generator_(generator),
+ scope_(scope),
+ outer_(generator_->execution_context()),
+ register_(generator_->NextContextRegister()),
+ depth_(0),
+ should_pop_context_(should_pop_context) {
+ if (outer_) {
+ depth_ = outer_->depth_ + 1;
+ generator_->builder()->PushContext(register_);
+ }
+ generator_->set_execution_context(this);
+ }
+
+ ~ContextScope() {
+ if (outer_ && should_pop_context_) {
+ generator_->builder()->PopContext(outer_->reg());
+ }
+ generator_->set_execution_context(outer_);
+ }
+
+ // Returns the depth of the given |scope| for the current execution context.
+ int ContextChainDepth(Scope* scope) {
+ return scope_->ContextChainLength(scope);
+ }
+
+ // Returns the execution context at |depth| in the current context chain if it
+ // is a function local execution context, otherwise returns nullptr.
+ ContextScope* Previous(int depth) {
+ if (depth > depth_) {
+ return nullptr;
+ }
+
+ ContextScope* previous = this;
+ for (int i = depth; i > 0; --i) {
+ previous = previous->outer_;
+ }
+ return previous;
+ }
+
+ Scope* scope() const { return scope_; }
+ Register reg() const { return register_; }
+
+ private:
+ BytecodeGenerator* generator_;
+ Scope* scope_;
+ ContextScope* outer_;
+ Register register_;
+ int depth_;
+ bool should_pop_context_;
+};
+
+
+// Scoped class for tracking control statements entered by the
+// visitor. The pattern derives AstGraphBuilder::ControlScope.
+class BytecodeGenerator::ControlScope BASE_EMBEDDED {
+ public:
+ explicit ControlScope(BytecodeGenerator* generator)
+ : generator_(generator), outer_(generator->execution_control()) {
+ generator_->set_execution_control(this);
+ }
+ virtual ~ControlScope() { generator_->set_execution_control(outer()); }
+
+ void Break(Statement* stmt) { PerformCommand(CMD_BREAK, stmt); }
+ void Continue(Statement* stmt) { PerformCommand(CMD_CONTINUE, stmt); }
+
+ protected:
+ enum Command { CMD_BREAK, CMD_CONTINUE };
+ void PerformCommand(Command command, Statement* statement);
+ virtual bool Execute(Command command, Statement* statement) = 0;
+
+ BytecodeGenerator* generator() const { return generator_; }
+ ControlScope* outer() const { return outer_; }
+
+ private:
+ BytecodeGenerator* generator_;
+ ControlScope* outer_;
+
+ DISALLOW_COPY_AND_ASSIGN(ControlScope);
+};
+
+
+// Scoped class for enabling 'break' and 'continue' in iteration
+// constructs, e.g. do...while, while..., for...
+class BytecodeGenerator::ControlScopeForIteration
+ : public BytecodeGenerator::ControlScope {
+ public:
+ ControlScopeForIteration(BytecodeGenerator* generator,
+ IterationStatement* statement,
+ LoopBuilder* loop_builder)
+ : ControlScope(generator),
+ statement_(statement),
+ loop_builder_(loop_builder) {}
+
+ protected:
+ virtual bool Execute(Command command, Statement* statement) {
+ if (statement != statement_) return false;
+ switch (command) {
+ case CMD_BREAK:
+ loop_builder_->Break();
+ return true;
+ case CMD_CONTINUE:
+ loop_builder_->Continue();
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ Statement* statement_;
+ LoopBuilder* loop_builder_;
+};
+
+
+// Scoped class for enabling 'break' in switch statements.
+class BytecodeGenerator::ControlScopeForSwitch
+ : public BytecodeGenerator::ControlScope {
+ public:
+ ControlScopeForSwitch(BytecodeGenerator* generator,
+ SwitchStatement* statement,
+ SwitchBuilder* switch_builder)
+ : ControlScope(generator),
+ statement_(statement),
+ switch_builder_(switch_builder) {}
+
+ protected:
+ virtual bool Execute(Command command, Statement* statement) {
+ if (statement != statement_) return false;
+ switch (command) {
+ case CMD_BREAK:
+ switch_builder_->Break();
+ return true;
+ case CMD_CONTINUE:
+ break;
+ }
+ return false;
+ }
+
+ private:
+ Statement* statement_;
+ SwitchBuilder* switch_builder_;
+};
+
+
+void BytecodeGenerator::ControlScope::PerformCommand(Command command,
+ Statement* statement) {
+ ControlScope* current = this;
+ do {
+ if (current->Execute(command, statement)) return;
+ current = current->outer();
+ } while (current != nullptr);
+ UNREACHABLE();
+}
+
+
+// Scoped base class for determining where the result of an expression
+// is stored.
+class BytecodeGenerator::ExpressionResultScope {
+ public:
+ ExpressionResultScope(BytecodeGenerator* generator, Expression::Context kind)
+ : generator_(generator),
+ kind_(kind),
+ outer_(generator->execution_result()),
+ allocator_(builder()),
+ result_identified_(false) {
+ generator_->set_execution_result(this);
+ }
+
+ virtual ~ExpressionResultScope() {
+ generator_->set_execution_result(outer_);
+ DCHECK(result_identified());
+ }
+
+ bool IsEffect() const { return kind_ == Expression::kEffect; }
+ bool IsValue() const { return kind_ == Expression::kValue; }
+
+ virtual void SetResultInAccumulator() = 0;
+ virtual void SetResultInRegister(Register reg) = 0;
+
+ BytecodeGenerator* generator() const { return generator_; }
+ BytecodeArrayBuilder* builder() const { return generator()->builder(); }
+ ExpressionResultScope* outer() const { return outer_; }
+
+ Register NewRegister() { return allocator_.NewRegister(); }
+
+ void PrepareForConsecutiveAllocations(size_t count) {
+ allocator_.PrepareForConsecutiveAllocations(count);
+ }
+
+ Register NextConsecutiveRegister() {
+ return allocator_.NextConsecutiveRegister();
+ }
+
+ protected:
+ void set_result_identified() {
+ DCHECK(!result_identified());
+ result_identified_ = true;
+ }
+
+ bool result_identified() const { return result_identified_; }
+
+ const TemporaryRegisterScope* allocator() const { return &allocator_; }
+
+ private:
+ BytecodeGenerator* generator_;
+ Expression::Context kind_;
+ ExpressionResultScope* outer_;
+ TemporaryRegisterScope allocator_;
+ bool result_identified_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
+};
+
+
+// Scoped class used when the result of the current expression is not
+// expected to produce a result.
+class BytecodeGenerator::EffectResultScope final
+ : public ExpressionResultScope {
+ public:
+ explicit EffectResultScope(BytecodeGenerator* generator)
+ : ExpressionResultScope(generator, Expression::kEffect) {
+ set_result_identified();
+ }
+
+ virtual void SetResultInAccumulator() {}
+ virtual void SetResultInRegister(Register reg) {}
+};
+
+
+// Scoped class used when the result of the current expression to be
+// evaluated should go into the interpreter's accumulator register.
+class BytecodeGenerator::AccumulatorResultScope final
+ : public ExpressionResultScope {
+ public:
+ explicit AccumulatorResultScope(BytecodeGenerator* generator)
+ : ExpressionResultScope(generator, Expression::kValue) {}
+
+ virtual void SetResultInAccumulator() { set_result_identified(); }
+
+ virtual void SetResultInRegister(Register reg) {
+ builder()->LoadAccumulatorWithRegister(reg);
+ set_result_identified();
+ }
+};
+
+
+// Scoped class used when the result of the current expression to be
+// evaluated should go into an interpreter register.
+class BytecodeGenerator::RegisterResultScope final
+ : public ExpressionResultScope {
+ public:
+ explicit RegisterResultScope(BytecodeGenerator* generator)
+ : ExpressionResultScope(generator, Expression::kValue) {}
+
+ virtual void SetResultInAccumulator() {
+ result_register_ = outer()->NewRegister();
+ builder()->StoreAccumulatorInRegister(result_register_);
+ set_result_identified();
+ }
+
+ virtual void SetResultInRegister(Register reg) {
+ DCHECK(builder()->RegisterIsParameterOrLocal(reg) ||
+ (builder()->RegisterIsTemporary(reg) &&
+ !allocator()->RegisterIsAllocatedInThisScope(reg)));
+ result_register_ = reg;
+ set_result_identified();
+ }
+
+ Register ResultRegister() const { return result_register_; }
+
+ private:
+ Register result_register_;
+};
+
+
BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
- : builder_(isolate, zone) {
- InitializeAstVisitor(isolate, zone);
+ : isolate_(isolate),
+ zone_(zone),
+ builder_(isolate, zone),
+ info_(nullptr),
+ scope_(nullptr),
+ globals_(0, zone),
+ execution_control_(nullptr),
+ execution_context_(nullptr),
+ execution_result_(nullptr),
+ binary_expression_depth_(0),
+ binary_expression_hazard_set_(zone) {
+ InitializeAstVisitor(isolate);
}
@@ -28,59 +320,114 @@ Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
set_info(info);
set_scope(info->scope());
- // This a temporary guard (oth).
- DCHECK(scope()->is_function_scope());
+ // Initialize the incoming context.
+ ContextScope incoming_context(this, scope(), false);
+
+ builder()->set_parameter_count(info->num_parameters_including_this());
+ builder()->set_locals_count(scope()->num_stack_slots());
+ builder()->set_context_count(scope()->MaxNestedContextChainLength());
+
+ // Build function context only if there are context allocated variables.
+ if (scope()->NeedsContext()) {
+ // Push a new inner context scope for the function.
+ VisitNewLocalFunctionContext();
+ ContextScope local_function_context(this, scope(), false);
+ VisitBuildLocalActivationContext();
+ MakeBytecodeBody();
+ } else {
+ MakeBytecodeBody();
+ }
+
+ set_scope(nullptr);
+ set_info(nullptr);
+ return builder_.ToBytecodeArray();
+}
+
+
+void BytecodeGenerator::MakeBytecodeBody() {
+ // Build the arguments object if it is used.
+ VisitArgumentsObject(scope()->arguments());
+
+ // Build assignment to {.this_function} variable if it is used.
+ VisitThisFunctionVariable(scope()->this_function_var());
- builder().set_parameter_count(info->num_parameters_including_this());
- builder().set_locals_count(scope()->num_stack_slots());
+ // Build assignment to {new.target} variable if it is used.
+ VisitNewTargetVariable(scope()->new_target_var());
- // Visit implicit declaration of the function name.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VisitVariableDeclaration(scope()->function());
+ // TODO(rmcilroy): Emit tracing call if requested to do so.
+ if (FLAG_trace) {
+ UNIMPLEMENTED();
+ }
+
+ // Visit illegal re-declaration and bail out if it exists.
+ if (scope()->HasIllegalRedeclaration()) {
+ Visit(scope()->GetIllegalRedeclaration());
+ return;
}
// Visit declarations within the function scope.
VisitDeclarations(scope()->declarations());
// Visit statements in the function body.
- VisitStatements(info->literal()->body());
-
- set_scope(nullptr);
- set_info(nullptr);
- return builder_.ToBytecodeArray();
+ VisitStatements(info()->literal()->body());
}
-void BytecodeGenerator::VisitBlock(Block* node) {
- builder().EnterBlock();
- if (node->scope() == NULL) {
+void BytecodeGenerator::VisitBlock(Block* stmt) {
+ if (stmt->scope() == NULL) {
// Visit statements in the same scope, no declarations.
- VisitStatements(node->statements());
+ VisitStatements(stmt->statements());
} else {
// Visit declarations and statements in a block scope.
- if (node->scope()->ContextLocalCount() > 0) {
- UNIMPLEMENTED();
+ if (stmt->scope()->NeedsContext()) {
+ VisitNewLocalBlockContext(stmt->scope());
+ ContextScope scope(this, stmt->scope());
+ VisitDeclarations(stmt->scope()->declarations());
+ VisitStatements(stmt->statements());
} else {
- VisitDeclarations(node->scope()->declarations());
- VisitStatements(node->statements());
+ VisitDeclarations(stmt->scope()->declarations());
+ VisitStatements(stmt->statements());
}
}
- builder().LeaveBlock();
}
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->proxy()->var();
+ VariableMode mode = decl->mode();
+ // Const and let variables are initialized with the hole so that we can
+ // check that they are only assigned once.
+ bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- UNIMPLEMENTED();
+ case VariableLocation::UNALLOCATED: {
+ Handle<Oddball> value = variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value();
+ globals()->push_back(variable->name());
+ globals()->push_back(value);
break;
- case VariableLocation::PARAMETER:
+ }
case VariableLocation::LOCAL:
- // Details stored in scope, i.e. variable index.
+ if (hole_init) {
+ Register destination(variable->index());
+ builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
+ }
+ break;
+ case VariableLocation::PARAMETER:
+ if (hole_init) {
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+ Register destination(builder()->Parameter(variable->index() + 1));
+ builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
+ }
break;
case VariableLocation::CONTEXT:
+ if (hole_init) {
+ builder()->LoadTheHole().StoreContextSlot(execution_context()->reg(),
+ variable->index());
+ }
+ break;
case VariableLocation::LOOKUP:
UNIMPLEMENTED();
break;
@@ -89,7 +436,34 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
- UNIMPLEMENTED();
+ Variable* variable = decl->proxy()->var();
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
+ decl->fun(), info()->script(), info());
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals()->push_back(variable->name());
+ globals()->push_back(function);
+ break;
+ }
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
+ VisitForAccumulatorValue(decl->fun());
+ VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+ break;
+ }
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
+ VisitForAccumulatorValue(decl->fun());
+ builder()->StoreContextSlot(execution_context()->reg(),
+ variable->index());
+ break;
+ }
+ case VariableLocation::LOOKUP:
+ UNIMPLEMENTED();
+ }
}
@@ -103,31 +477,68 @@ void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
}
+void BytecodeGenerator::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
+ DCHECK(globals()->empty());
+ AstVisitor::VisitDeclarations(declarations);
+ if (globals()->empty()) return;
+ int array_index = 0;
+ Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
+ static_cast<int>(globals()->size()), TENURED);
+ for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
+ int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+ DeclareGlobalsLanguageMode::encode(language_mode());
+
+ TemporaryRegisterScope temporary_register_scope(builder());
+ Register pairs = temporary_register_scope.NewRegister();
+ builder()->LoadLiteral(data);
+ builder()->StoreAccumulatorInRegister(pairs);
+
+ Register flags = temporary_register_scope.NewRegister();
+ builder()->LoadLiteral(Smi::FromInt(encoded_flags));
+ builder()->StoreAccumulatorInRegister(flags);
+ DCHECK(flags.index() == pairs.index() + 1);
+
+ builder()->CallRuntime(Runtime::kDeclareGlobals, pairs, 2);
+ globals()->clear();
+}
+
+
void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Visit(stmt->expression());
+ // TODO(rmcilroy): Replace this with a StatementResultScope when it exists.
+ EffectResultScope effect_scope(this);
+ VisitForEffect(stmt->expression());
}
void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
- // TODO(oth): For control-flow it could be useful to signal empty paths here.
}
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
- BytecodeLabel else_start, else_end;
- // TODO(oth): Spot easy cases where there code would not need to
- // emit the then block or the else block, e.g. condition is
- // obviously true/1/false/0.
- Visit(stmt->condition());
- builder().CastAccumulatorToBoolean();
- builder().JumpIfFalse(&else_start);
-
- Visit(stmt->then_statement());
- builder().Jump(&else_end);
- builder().Bind(&else_start);
-
- Visit(stmt->else_statement());
- builder().Bind(&else_end);
+ BytecodeLabel else_label, end_label;
+ if (stmt->condition()->ToBooleanIsTrue()) {
+ // Generate only then block.
+ Visit(stmt->then_statement());
+ } else if (stmt->condition()->ToBooleanIsFalse()) {
+ // Generate only else block if it exists.
+ if (stmt->HasElseStatement()) {
+ Visit(stmt->else_statement());
+ }
+ } else {
+ VisitForAccumulatorValue(stmt->condition());
+ builder()->JumpIfFalse(&else_label);
+ Visit(stmt->then_statement());
+ if (stmt->HasElseStatement()) {
+ builder()->Jump(&end_label);
+ builder()->Bind(&else_label);
+ Visit(stmt->else_statement());
+ } else {
+ builder()->Bind(&else_label);
+ }
+ builder()->Bind(&end_label);
+ }
}
@@ -138,18 +549,19 @@ void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- UNIMPLEMENTED();
+ execution_control()->Continue(stmt->target());
}
void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- UNIMPLEMENTED();
+ execution_control()->Break(stmt->target());
}
void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
- builder().Return();
+ EffectResultScope effect_scope(this);
+ VisitForAccumulatorValue(stmt->expression());
+ builder()->Return();
}
@@ -159,30 +571,265 @@ void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- UNIMPLEMENTED();
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ SwitchBuilder switch_builder(builder(), clauses->length());
+ ControlScopeForSwitch scope(this, stmt, &switch_builder);
+ int default_index = -1;
+
+ // Keep the switch value in a register until a case matches.
+ Register tag = VisitForRegisterValue(stmt->tag());
+
+ // Iterate over all cases and create nodes for label comparison.
+ BytecodeLabel done_label;
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+
+ // The default is not a test, remember index.
+ if (clause->is_default()) {
+ default_index = i;
+ continue;
+ }
+
+ // Perform label comparison as if via '===' with tag.
+ VisitForAccumulatorValue(clause->label());
+ builder()->CompareOperation(Token::Value::EQ_STRICT, tag,
+ language_mode_strength());
+ switch_builder.Case(i);
+ }
+
+ if (default_index >= 0) {
+ // Emit default jump if there is a default case.
+ switch_builder.DefaultAt(default_index);
+ } else {
+ // Otherwise if we have reached here none of the cases matched, so jump to
+ // done.
+ builder()->Jump(&done_label);
+ }
+
+ // Iterate over all cases and create the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ switch_builder.SetCaseTarget(i);
+ VisitStatements(clause->statements());
+ }
+ builder()->Bind(&done_label);
+
+ switch_builder.SetBreakTarget(done_label);
}
-void BytecodeGenerator::VisitCaseClause(CaseClause* clause) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
+ // Handled entirely in VisitSwitchStatement.
+ UNREACHABLE();
+}
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNIMPLEMENTED();
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+ BytecodeLabel body_label, condition_label, done_label;
+
+ if (stmt->cond()->ToBooleanIsFalse()) {
+ Visit(stmt->body());
+ // Bind condition_label and done_label for processing continue and break.
+ builder()->Bind(&condition_label);
+ builder()->Bind(&done_label);
+ } else {
+ builder()->Bind(&body_label);
+ Visit(stmt->body());
+
+ builder()->Bind(&condition_label);
+ if (stmt->cond()->ToBooleanIsTrue()) {
+ builder()->Jump(&body_label);
+ } else {
+ VisitForAccumulatorValue(stmt->cond());
+ builder()->JumpIfTrue(&body_label);
+ }
+ builder()->Bind(&done_label);
+ }
+ loop_builder.SetBreakTarget(done_label);
+ loop_builder.SetContinueTarget(condition_label);
}
void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- UNIMPLEMENTED();
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+
+ BytecodeLabel body_label, condition_label, done_label;
+ if (stmt->cond()->ToBooleanIsFalse()) {
+ // If the condition is false there is no need to generating the loop.
+ return;
+ }
+
+ if (!stmt->cond()->ToBooleanIsTrue()) {
+ builder()->Jump(&condition_label);
+ }
+ builder()->Bind(&body_label);
+ Visit(stmt->body());
+
+ builder()->Bind(&condition_label);
+ if (stmt->cond()->ToBooleanIsTrue()) {
+ builder()->Jump(&body_label);
+ } else {
+ VisitForAccumulatorValue(stmt->cond());
+ builder()->JumpIfTrue(&body_label);
+ }
+ builder()->Bind(&done_label);
+
+ loop_builder.SetBreakTarget(done_label);
+ loop_builder.SetContinueTarget(condition_label);
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
- UNIMPLEMENTED();
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+
+ if (stmt->init() != nullptr) {
+ Visit(stmt->init());
+ }
+
+ if (stmt->cond() && stmt->cond()->ToBooleanIsFalse()) {
+ // If the condition is known to be false there is no need to generate
+ // body, next or condition blocks. Init block should be generated.
+ return;
+ }
+
+ BytecodeLabel body_label, condition_label, next_label, done_label;
+ if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
+ builder()->Jump(&condition_label);
+ }
+ builder()->Bind(&body_label);
+ Visit(stmt->body());
+ builder()->Bind(&next_label);
+ if (stmt->next() != nullptr) {
+ Visit(stmt->next());
+ }
+ if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
+ builder()->Bind(&condition_label);
+ VisitForAccumulatorValue(stmt->cond());
+ builder()->JumpIfTrue(&body_label);
+ } else {
+ builder()->Jump(&body_label);
+ }
+ builder()->Bind(&done_label);
+
+ loop_builder.SetBreakTarget(done_label);
+ loop_builder.SetContinueTarget(next_label);
+}
+
+
+void BytecodeGenerator::VisitForInAssignment(Expression* expr,
+ FeedbackVectorSlot slot) {
+ DCHECK(expr->IsValidReferenceExpression());
+
+ // Evaluate assignment starting with the value to be stored in the
+ // accumulator.
+ Property* property = expr->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* variable = expr->AsVariableProxy()->var();
+ VisitVariableAssignment(variable, slot);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ TemporaryRegisterScope temporary_register_scope(builder());
+ Register value = temporary_register_scope.NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ Register object = VisitForRegisterValue(property->obj());
+ size_t name_index = builder()->GetConstantPoolEntry(
+ property->key()->AsLiteral()->AsPropertyName());
+ builder()->StoreNamedProperty(object, name_index, feedback_index(slot),
+ language_mode());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ TemporaryRegisterScope temporary_register_scope(builder());
+ Register value = temporary_register_scope.NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ Register object = VisitForRegisterValue(property->obj());
+ Register key = VisitForRegisterValue(property->key());
+ builder()->LoadAccumulatorWithRegister(value);
+ builder()->StoreKeyedProperty(object, key, feedback_index(slot),
+ language_mode());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ }
}
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- UNIMPLEMENTED();
+ // TODO(oth): For now we need a parent scope for paths that end up
+ // in VisitLiteral which can allocate in the parent scope. A future
+ // CL in preparation will add a StatementResultScope that will
+ // remove the need for this EffectResultScope.
+ EffectResultScope result_scope(this);
+
+ if (stmt->subject()->IsNullLiteral() ||
+ stmt->subject()->IsUndefinedLiteral(isolate())) {
+ // ForIn generates lots of code, skip if it wouldn't produce any effects.
+ return;
+ }
+
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+
+ // Prepare the state for executing ForIn.
+ VisitForAccumulatorValue(stmt->subject());
+ loop_builder.BreakIfUndefined();
+ loop_builder.BreakIfNull();
+
+ Register receiver = execution_result()->NewRegister();
+ builder()->CastAccumulatorToJSObject();
+ builder()->StoreAccumulatorInRegister(receiver);
+ builder()->CallRuntime(Runtime::kGetPropertyNamesFast, receiver, 1);
+ builder()->ForInPrepare(receiver);
+ loop_builder.BreakIfUndefined();
+
+ Register for_in_state = execution_result()->NewRegister();
+ builder()->StoreAccumulatorInRegister(for_in_state);
+
+ // The loop.
+ BytecodeLabel condition_label, break_label, continue_label;
+ Register index = receiver; // Re-using register as receiver no longer used.
+ builder()->LoadLiteral(Smi::FromInt(0));
+
+ // Check loop termination (accumulator holds index).
+ builder()
+ ->Bind(&condition_label)
+ .StoreAccumulatorInRegister(index)
+ .ForInDone(for_in_state);
+ loop_builder.BreakIfTrue();
+
+ // Get the next item.
+ builder()->ForInNext(for_in_state, index);
+
+ // Start again if the item, currently in the accumulator, is undefined.
+ loop_builder.ContinueIfUndefined();
+
+ // Store the value in the each variable.
+ VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
+ // NB the user's loop variable will be assigned the value of each so
+ // even an empty body will have this assignment.
+ Visit(stmt->body());
+
+ // Increment the index and start loop again.
+ builder()
+ ->Bind(&continue_label)
+ .LoadAccumulatorWithRegister(index)
+ .CountOperation(Token::Value::ADD, language_mode_strength())
+ .Jump(&condition_label);
+
+ // End of the loop.
+ builder()->Bind(&break_label);
+
+ loop_builder.SetBreakTarget(break_label);
+ loop_builder.SetContinueTarget(continue_label);
}
@@ -192,11 +839,20 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ if (FLAG_ignition_fake_try_catch) {
+ Visit(stmt->try_block());
+ return;
+ }
UNIMPLEMENTED();
}
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ if (FLAG_ignition_fake_try_catch) {
+ Visit(stmt->try_block());
+ Visit(stmt->finally_block());
+ return;
+ }
UNIMPLEMENTED();
}
@@ -207,7 +863,15 @@ void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNIMPLEMENTED();
+ // Find or build a shared function info.
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
+ CHECK(!shared_info.is_null()); // TODO(rmcilroy): Set stack overflow?
+
+ builder()
+ ->LoadLiteral(shared_info)
+ .CreateClosure(expr->pretenure() ? TENURED : NOT_TENURED);
+ execution_result()->SetResultInAccumulator();
}
@@ -222,74 +886,434 @@ void BytecodeGenerator::VisitNativeFunctionLiteral(
}
-void BytecodeGenerator::VisitConditional(Conditional* expr) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitConditional(Conditional* expr) {
+ // TODO(rmcilroy): Spot easy cases where there code would not need to
+ // emit the then block or the else block, e.g. condition is
+ // obviously true/1/false/0.
+
+ BytecodeLabel else_label, end_label;
+
+ VisitForAccumulatorValue(expr->condition());
+ builder()->JumpIfFalse(&else_label);
+
+ VisitForAccumulatorValue(expr->then_expression());
+ builder()->Jump(&end_label);
+
+ builder()->Bind(&else_label);
+ VisitForAccumulatorValue(expr->else_expression());
+ builder()->Bind(&end_label);
+
+ execution_result()->SetResultInAccumulator();
+}
void BytecodeGenerator::VisitLiteral(Literal* expr) {
- Handle<Object> value = expr->value();
- if (value->IsSmi()) {
- builder().LoadLiteral(Smi::cast(*value));
- } else if (value->IsUndefined()) {
- builder().LoadUndefined();
- } else if (value->IsTrue()) {
- builder().LoadTrue();
- } else if (value->IsFalse()) {
- builder().LoadFalse();
- } else if (value->IsNull()) {
- builder().LoadNull();
- } else if (value->IsTheHole()) {
- builder().LoadTheHole();
- } else {
- builder().LoadLiteral(value);
+ if (!execution_result()->IsEffect()) {
+ Handle<Object> value = expr->value();
+ if (value->IsSmi()) {
+ builder()->LoadLiteral(Smi::cast(*value));
+ } else if (value->IsUndefined()) {
+ builder()->LoadUndefined();
+ } else if (value->IsTrue()) {
+ builder()->LoadTrue();
+ } else if (value->IsFalse()) {
+ builder()->LoadFalse();
+ } else if (value->IsNull()) {
+ builder()->LoadNull();
+ } else if (value->IsTheHole()) {
+ builder()->LoadTheHole();
+ } else {
+ builder()->LoadLiteral(value);
+ }
+ execution_result()->SetResultInAccumulator();
}
}
void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNIMPLEMENTED();
+ // Materialize a regular expression literal.
+ TemporaryRegisterScope temporary_register_scope(builder());
+ Register flags = temporary_register_scope.NewRegister();
+ builder()
+ ->LoadLiteral(expr->flags())
+ .StoreAccumulatorInRegister(flags)
+ .LoadLiteral(expr->pattern())
+ .CreateRegExpLiteral(expr->literal_index(), flags);
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- UNIMPLEMENTED();
+ // Deep-copy the literal boilerplate.
+ builder()
+ ->LoadLiteral(expr->constant_properties())
+ .CreateObjectLiteral(expr->literal_index(), expr->ComputeFlags(true));
+
+ TemporaryRegisterScope temporary_register_scope(builder());
+ Register literal;
+
+ // Store computed values into the literal.
+ bool literal_in_accumulator = true;
+ int property_index = 0;
+ AccessorTable accessor_table(zone());
+ for (; property_index < expr->properties()->length(); property_index++) {
+ TemporaryRegisterScope inner_temporary_register_scope(builder());
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
+ if (property->IsCompileTimeValue()) continue;
+
+ if (literal_in_accumulator) {
+ literal = temporary_register_scope.NewRegister();
+ builder()->StoreAccumulatorInRegister(literal);
+ literal_in_accumulator = false;
+ }
+
+ Literal* literal_key = property->key()->AsLiteral();
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED: {
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
+ if (literal_key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ size_t name_index =
+ builder()->GetConstantPoolEntry(literal_key->AsPropertyName());
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreNamedProperty(literal, name_index,
+ feedback_index(property->GetSlot(0)),
+ language_mode());
+ } else {
+ VisitForEffect(property->value());
+ }
+ } else {
+ inner_temporary_register_scope.PrepareForConsecutiveAllocations(3);
+ Register key =
+ inner_temporary_register_scope.NextConsecutiveRegister();
+ Register value =
+ inner_temporary_register_scope.NextConsecutiveRegister();
+ Register language =
+ inner_temporary_register_scope.NextConsecutiveRegister();
+ // TODO(oth): This is problematic - can't assume contiguous here.
+ // literal is allocated in temporary_register_scope, whereas
+ // key, value, language are in another.
+ DCHECK(Register::AreContiguous(literal, key, value, language));
+ VisitForAccumulatorValue(property->key());
+ builder()->StoreAccumulatorInRegister(key);
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value);
+ if (property->emit_store()) {
+ builder()
+ ->LoadLiteral(Smi::FromInt(SLOPPY))
+ .StoreAccumulatorInRegister(language)
+ .CallRuntime(Runtime::kSetProperty, literal, 4);
+ VisitSetHomeObject(value, literal, property);
+ }
+ }
+ break;
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ inner_temporary_register_scope.PrepareForConsecutiveAllocations(1);
+ DCHECK(property->emit_store());
+ Register value =
+ inner_temporary_register_scope.NextConsecutiveRegister();
+ DCHECK(Register::AreContiguous(literal, value));
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value).CallRuntime(
+ Runtime::kInternalSetPrototype, literal, 2);
+ break;
+ }
+ case ObjectLiteral::Property::GETTER:
+ if (property->emit_store()) {
+ accessor_table.lookup(literal_key)->second->getter = property;
+ }
+ break;
+ case ObjectLiteral::Property::SETTER:
+ if (property->emit_store()) {
+ accessor_table.lookup(literal_key)->second->setter = property;
+ }
+ break;
+ }
+ }
+
+ // Define accessors, using only a single call to the runtime for each pair of
+ // corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end(); ++it) {
+ TemporaryRegisterScope inner_temporary_register_scope(builder());
+ inner_temporary_register_scope.PrepareForConsecutiveAllocations(4);
+ Register name = inner_temporary_register_scope.NextConsecutiveRegister();
+ Register getter = inner_temporary_register_scope.NextConsecutiveRegister();
+ Register setter = inner_temporary_register_scope.NextConsecutiveRegister();
+ Register attr = inner_temporary_register_scope.NextConsecutiveRegister();
+ DCHECK(Register::AreContiguous(literal, name, getter, setter, attr));
+ VisitForAccumulatorValue(it->first);
+ builder()->StoreAccumulatorInRegister(name);
+ VisitObjectLiteralAccessor(literal, it->second->getter, getter);
+ VisitObjectLiteralAccessor(literal, it->second->setter, setter);
+ builder()
+ ->LoadLiteral(Smi::FromInt(NONE))
+ .StoreAccumulatorInRegister(attr)
+ .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, literal, 5);
+ }
+
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
+ // with the first computed property name and continues with all properties to
+ // its right. All the code from above initializes the static component of the
+ // object literal, and arranges for the map of the result to reflect the
+ // static order in which the keys appear. For the dynamic properties, we
+ // compile them into a series of "SetOwnProperty" runtime calls. This will
+ // preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ if (literal_in_accumulator) {
+ temporary_register_scope.PrepareForConsecutiveAllocations(4);
+ literal = temporary_register_scope.NextConsecutiveRegister();
+ builder()->StoreAccumulatorInRegister(literal);
+ literal_in_accumulator = false;
+ }
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(property->emit_store());
+ TemporaryRegisterScope inner_temporary_register_scope(builder());
+ Register value = inner_temporary_register_scope.NewRegister();
+ DCHECK(Register::AreContiguous(literal, value));
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value).CallRuntime(
+ Runtime::kInternalSetPrototype, literal, 2);
+ continue;
+ }
+
+ TemporaryRegisterScope inner_temporary_register_scope(builder());
+ inner_temporary_register_scope.PrepareForConsecutiveAllocations(3);
+ Register key = inner_temporary_register_scope.NextConsecutiveRegister();
+ Register value = inner_temporary_register_scope.NextConsecutiveRegister();
+ Register attr = inner_temporary_register_scope.NextConsecutiveRegister();
+ DCHECK(Register::AreContiguous(literal, key, value, attr));
+
+ VisitForAccumulatorValue(property->key());
+ builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value);
+ VisitSetHomeObject(value, literal, property);
+ builder()->LoadLiteral(Smi::FromInt(NONE)).StoreAccumulatorInRegister(attr);
+ Runtime::FunctionId function_id = static_cast<Runtime::FunctionId>(-1);
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ function_id = Runtime::kDefineDataPropertyUnchecked;
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE(); // Handled specially above.
+ break;
+ case ObjectLiteral::Property::GETTER:
+ function_id = Runtime::kDefineGetterPropertyUnchecked;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ function_id = Runtime::kDefineSetterPropertyUnchecked;
+ break;
+ }
+ builder()->CallRuntime(function_id, literal, 4);
+ }
+
+ // Transform literals that contain functions to fast properties.
+ if (expr->has_function()) {
+ DCHECK(!literal_in_accumulator);
+ builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
+ }
+
+ if (!literal_in_accumulator) {
+ // Restore literal array into accumulator.
+ builder()->LoadAccumulatorWithRegister(literal);
+ }
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- UNIMPLEMENTED();
+ // Deep-copy the literal boilerplate.
+ builder()
+ ->LoadLiteral(expr->constant_elements())
+ .CreateArrayLiteral(expr->literal_index(), expr->ComputeFlags(true));
+
+ TemporaryRegisterScope temporary_register_scope(builder());
+ Register index, literal;
+
+ // Evaluate all the non-constant subexpressions and store them into the
+ // newly cloned array.
+ bool literal_in_accumulator = true;
+ for (int array_index = 0; array_index < expr->values()->length();
+ array_index++) {
+ Expression* subexpr = expr->values()->at(array_index);
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+ if (subexpr->IsSpread()) {
+ // TODO(rmcilroy): Deal with spread expressions.
+ UNIMPLEMENTED();
+ }
+
+ if (literal_in_accumulator) {
+ index = temporary_register_scope.NewRegister();
+ literal = temporary_register_scope.NewRegister();
+ builder()->StoreAccumulatorInRegister(literal);
+ literal_in_accumulator = false;
+ }
+
+ FeedbackVectorSlot slot = expr->LiteralFeedbackSlot();
+ builder()
+ ->LoadLiteral(Smi::FromInt(array_index))
+ .StoreAccumulatorInRegister(index);
+ VisitForAccumulatorValue(subexpr);
+ builder()->StoreKeyedProperty(literal, index, feedback_index(slot),
+ language_mode());
+ }
+
+ if (!literal_in_accumulator) {
+ // Restore literal array into accumulator.
+ builder()->LoadAccumulatorWithRegister(literal);
+ }
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
- VisitVariableLoad(proxy->var());
+ VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
}
-void BytecodeGenerator::VisitVariableLoad(Variable* variable) {
+void BytecodeGenerator::VisitVariableLoad(Variable* variable,
+ FeedbackVectorSlot slot,
+ TypeofMode typeof_mode) {
switch (variable->location()) {
case VariableLocation::LOCAL: {
- Register source(variable->index());
- builder().LoadAccumulatorWithRegister(source);
+ Register source(Register(variable->index()));
+ execution_result()->SetResultInRegister(source);
break;
}
case VariableLocation::PARAMETER: {
// The parameter indices are shifted by 1 (receiver is variable
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
- Register source(builder().Parameter(variable->index() + 1));
- builder().LoadAccumulatorWithRegister(source);
+ Register source = builder()->Parameter(variable->index() + 1);
+ execution_result()->SetResultInRegister(source);
break;
}
- case VariableLocation::GLOBAL: {
- // Global var, const, or let variable.
- // TODO(rmcilroy): If context chain depth is short enough, do this using
- // a generic version of LoadGlobalViaContextStub rather than calling the
- // runtime.
- DCHECK(variable->IsStaticGlobalObjectProperty());
- builder().LoadGlobal(variable->index());
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ size_t name_index = builder()->GetConstantPoolEntry(variable->name());
+ builder()->LoadGlobal(name_index, feedback_index(slot), language_mode(),
+ typeof_mode);
+ execution_result()->SetResultInAccumulator();
+ break;
+ }
+ case VariableLocation::CONTEXT: {
+ int depth = execution_context()->ContextChainDepth(variable->scope());
+ ContextScope* context = execution_context()->Previous(depth);
+ Register context_reg;
+ if (context) {
+ context_reg = context->reg();
+ } else {
+ context_reg = execution_result()->NewRegister();
+ // Walk the context chain to find the context at the given depth.
+ // TODO(rmcilroy): Perform this work in a bytecode handler once we have
+ // a generic mechanism for performing jumps in interpreter.cc.
+ builder()
+ ->LoadAccumulatorWithRegister(execution_context()->reg())
+ .StoreAccumulatorInRegister(context_reg);
+ for (int i = 0; i < depth; ++i) {
+ builder()
+ ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
+ .StoreAccumulatorInRegister(context_reg);
+ }
+ }
+ builder()->LoadContextSlot(context_reg, variable->index());
+ execution_result()->SetResultInAccumulator();
+ // TODO(rmcilroy): Perform check for uninitialized legacy const, const and
+ // let variables.
+ break;
+ }
+ case VariableLocation::LOOKUP:
+ UNIMPLEMENTED();
+ }
+}
+
+
+void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
+ Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
+ AccumulatorResultScope accumulator_result(this);
+ VisitVariableLoad(variable, slot, typeof_mode);
+}
+
+
+Register BytecodeGenerator::VisitVariableLoadForRegisterValue(
+ Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
+ RegisterResultScope register_scope(this);
+ VisitVariableLoad(variable, slot, typeof_mode);
+ return register_scope.ResultRegister();
+}
+
+
+void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
+ FeedbackVectorSlot slot) {
+ switch (variable->location()) {
+ case VariableLocation::LOCAL: {
+ // TODO(rmcilroy): support const mode initialization.
+ Register destination(variable->index());
+ builder()->StoreAccumulatorInRegister(destination);
+ RecordStoreToRegister(destination);
+ break;
+ }
+ case VariableLocation::PARAMETER: {
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+ Register destination(builder()->Parameter(variable->index() + 1));
+ builder()->StoreAccumulatorInRegister(destination);
+ RecordStoreToRegister(destination);
+ break;
+ }
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ size_t name_index = builder()->GetConstantPoolEntry(variable->name());
+ builder()->StoreGlobal(name_index, feedback_index(slot), language_mode());
+ break;
+ }
+ case VariableLocation::CONTEXT: {
+ // TODO(rmcilroy): support const mode initialization.
+ int depth = execution_context()->ContextChainDepth(variable->scope());
+ ContextScope* context = execution_context()->Previous(depth);
+ Register context_reg;
+ if (context) {
+ context_reg = context->reg();
+ } else {
+ Register value_temp = execution_result()->NewRegister();
+ context_reg = execution_result()->NewRegister();
+ // Walk the context chain to find the context at the given depth.
+ // TODO(rmcilroy): Perform this work in a bytecode handler once we have
+ // a generic mechanism for performing jumps in interpreter.cc.
+ builder()
+ ->StoreAccumulatorInRegister(value_temp)
+ .LoadAccumulatorWithRegister(execution_context()->reg())
+ .StoreAccumulatorInRegister(context_reg);
+ for (int i = 0; i < depth; ++i) {
+ builder()
+ ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
+ .StoreAccumulatorInRegister(context_reg);
+ }
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ }
+ builder()->StoreContextSlot(context_reg, variable->index());
break;
}
- case VariableLocation::UNALLOCATED:
- case VariableLocation::CONTEXT:
case VariableLocation::LOOKUP:
UNIMPLEMENTED();
}
@@ -298,8 +1322,8 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable) {
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
- TemporaryRegisterScope temporary_register_scope(&builder_);
Register object, key;
+ size_t name_index = kMaxUInt32;
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
@@ -310,22 +1334,25 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
// Nothing to do to evaluate variable assignment LHS.
break;
- case NAMED_PROPERTY:
- object = temporary_register_scope.NewRegister();
- key = temporary_register_scope.NewRegister();
- Visit(property->obj());
- builder().StoreAccumulatorInRegister(object);
- builder().LoadLiteral(property->key()->AsLiteral()->AsPropertyName());
- builder().StoreAccumulatorInRegister(key);
+ case NAMED_PROPERTY: {
+ object = VisitForRegisterValue(property->obj());
+ name_index = builder()->GetConstantPoolEntry(
+ property->key()->AsLiteral()->AsPropertyName());
break;
- case KEYED_PROPERTY:
- object = temporary_register_scope.NewRegister();
- key = temporary_register_scope.NewRegister();
- Visit(property->obj());
- builder().StoreAccumulatorInRegister(object);
- Visit(property->key());
- builder().StoreAccumulatorInRegister(key);
+ }
+ case KEYED_PROPERTY: {
+ object = VisitForRegisterValue(property->obj());
+ if (expr->is_compound()) {
+ // Use VisitForAccumulator and store to register so that the key is
+ // still in the accumulator for loading the old value below.
+ key = execution_result()->NewRegister();
+ VisitForAccumulatorValue(property->key());
+ builder()->StoreAccumulatorInRegister(key);
+ } else {
+ key = VisitForRegisterValue(property->key());
+ }
break;
+ }
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
UNIMPLEMENTED();
@@ -334,151 +1361,461 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// Evaluate the value and potentially handle compound assignments by loading
// the left-hand side value and performing a binary operation.
if (expr->is_compound()) {
- UNIMPLEMENTED();
+ Register old_value;
+ switch (assign_type) {
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ old_value = VisitVariableLoadForRegisterValue(
+ proxy->var(), proxy->VariableFeedbackSlot());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ old_value = execution_result()->NewRegister();
+ builder()
+ ->LoadNamedProperty(object, name_index, feedback_index(slot),
+ language_mode())
+ .StoreAccumulatorInRegister(old_value);
+ break;
+ }
+ case KEYED_PROPERTY: {
+ // Key is already in accumulator at this point due to evaluating the
+ // LHS above.
+ FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ old_value = execution_result()->NewRegister();
+ builder()
+ ->LoadKeyedProperty(object, feedback_index(slot), language_mode())
+ .StoreAccumulatorInRegister(old_value);
+ break;
+ }
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ break;
+ }
+ VisitForAccumulatorValue(expr->value());
+ builder()->BinaryOperation(expr->binary_op(), old_value,
+ language_mode_strength());
} else {
- Visit(expr->value());
+ VisitForAccumulatorValue(expr->value());
}
// Store the value.
- FeedbackVectorICSlot slot = expr->AssignmentSlot();
+ FeedbackVectorSlot slot = expr->AssignmentSlot();
switch (assign_type) {
case VARIABLE: {
+ // TODO(oth): The VisitVariableAssignment() call is hard to reason about.
+ // Is the value in the accumulator safe? Yes, but scary.
Variable* variable = expr->target()->AsVariableProxy()->var();
- DCHECK(variable->location() == VariableLocation::LOCAL);
- Register destination(variable->index());
- builder().StoreAccumulatorInRegister(destination);
+ VisitVariableAssignment(variable, slot);
break;
}
case NAMED_PROPERTY:
- builder().StoreNamedProperty(object, key, feedback_index(slot),
- language_mode());
+ builder()->StoreNamedProperty(object, name_index, feedback_index(slot),
+ language_mode());
break;
case KEYED_PROPERTY:
- builder().StoreKeyedProperty(object, key, feedback_index(slot),
- language_mode());
+ builder()->StoreKeyedProperty(object, key, feedback_index(slot),
+ language_mode());
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
UNIMPLEMENTED();
}
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitYield(Yield* expr) { UNIMPLEMENTED(); }
-void BytecodeGenerator::VisitThrow(Throw* expr) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitThrow(Throw* expr) {
+ VisitForAccumulatorValue(expr->exception());
+ builder()->Throw();
+}
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
LhsKind property_kind = Property::GetAssignType(expr);
- FeedbackVectorICSlot slot = expr->PropertyFeedbackSlot();
+ FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
switch (property_kind) {
case VARIABLE:
UNREACHABLE();
case NAMED_PROPERTY: {
- builder().LoadLiteral(expr->key()->AsLiteral()->AsPropertyName());
- builder().LoadNamedProperty(obj, feedback_index(slot), language_mode());
+ size_t name_index = builder()->GetConstantPoolEntry(
+ expr->key()->AsLiteral()->AsPropertyName());
+ builder()->LoadNamedProperty(obj, name_index, feedback_index(slot),
+ language_mode());
break;
}
case KEYED_PROPERTY: {
- Visit(expr->key());
- builder().LoadKeyedProperty(obj, feedback_index(slot), language_mode());
+ VisitForAccumulatorValue(expr->key());
+ builder()->LoadKeyedProperty(obj, feedback_index(slot), language_mode());
break;
}
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
UNIMPLEMENTED();
}
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
+ Property* expr) {
+ AccumulatorResultScope result_scope(this);
+ VisitPropertyLoad(obj, expr);
}
void BytecodeGenerator::VisitProperty(Property* expr) {
- TemporaryRegisterScope temporary_register_scope(&builder_);
- Register obj = temporary_register_scope.NewRegister();
- Visit(expr->obj());
- builder().StoreAccumulatorInRegister(obj);
+ Register obj = VisitForRegisterValue(expr->obj());
VisitPropertyLoad(obj, expr);
}
+Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
+ if (args->length() == 0) {
+ return Register();
+ }
+
+ // Visit arguments and place in a contiguous block of temporary
+ // registers. Return the first temporary register corresponding to
+ // the first argument.
+ //
+ // NB the caller may have already called
+ // PrepareForConsecutiveAllocations() with args->length() + N. The
+ // second call here will be a no-op provided there have been N or
+ // less calls to NextConsecutiveRegister(). Otherwise, the arguments
+ // here will be consecutive, but they will not be consecutive with
+ // earlier consecutive allocations made by the caller.
+ execution_result()->PrepareForConsecutiveAllocations(args->length());
+
+ // Visit for first argument that goes into returned register
+ Register first_arg = execution_result()->NextConsecutiveRegister();
+ VisitForAccumulatorValue(args->at(0));
+ builder()->StoreAccumulatorInRegister(first_arg);
+
+ // Visit remaining arguments
+ for (int i = 1; i < static_cast<int>(args->length()); i++) {
+ Register ith_arg = execution_result()->NextConsecutiveRegister();
+ VisitForAccumulatorValue(args->at(i));
+ builder()->StoreAccumulatorInRegister(ith_arg);
+ DCHECK(ith_arg.index() - i == first_arg.index());
+ }
+ return first_arg;
+}
+
+
void BytecodeGenerator::VisitCall(Call* expr) {
Expression* callee_expr = expr->expression();
Call::CallType call_type = expr->GetCallType(isolate());
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
- TemporaryRegisterScope temporary_register_scope(&builder_);
- Register callee = temporary_register_scope.NewRegister();
- Register receiver = temporary_register_scope.NewRegister();
+ Register callee = execution_result()->NewRegister();
+
+ // The receiver and arguments need to be allocated consecutively for
+ // Call(). Future optimizations could avoid this there are no
+ // arguments or the receiver and arguments are already consecutive.
+ ZoneList<Expression*>* args = expr->arguments();
+ execution_result()->PrepareForConsecutiveAllocations(args->length() + 1);
+ Register receiver = execution_result()->NextConsecutiveRegister();
switch (call_type) {
- case Call::PROPERTY_CALL: {
+ case Call::NAMED_PROPERTY_CALL:
+ case Call::KEYED_PROPERTY_CALL: {
Property* property = callee_expr->AsProperty();
- if (property->IsSuperAccess()) {
- UNIMPLEMENTED();
- }
- Visit(property->obj());
- builder().StoreAccumulatorInRegister(receiver);
- // Perform a property load of the callee.
- VisitPropertyLoad(receiver, property);
- builder().StoreAccumulatorInRegister(callee);
+ VisitForAccumulatorValue(property->obj());
+ builder()->StoreAccumulatorInRegister(receiver);
+ VisitPropertyLoadForAccumulator(receiver, property);
+ builder()->StoreAccumulatorInRegister(callee);
break;
}
case Call::GLOBAL_CALL: {
// Receiver is undefined for global calls.
- builder().LoadUndefined().StoreAccumulatorInRegister(receiver);
+ builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
// Load callee as a global variable.
VariableProxy* proxy = callee_expr->AsVariableProxy();
- VisitVariableLoad(proxy->var());
- builder().StoreAccumulatorInRegister(callee);
+ VisitVariableLoadForAccumulatorValue(proxy->var(),
+ proxy->VariableFeedbackSlot());
+ builder()->StoreAccumulatorInRegister(callee);
+ break;
+ }
+ case Call::OTHER_CALL: {
+ builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+ VisitForAccumulatorValue(callee_expr);
+ builder()->StoreAccumulatorInRegister(callee);
break;
}
+ case Call::NAMED_SUPER_PROPERTY_CALL:
+ case Call::KEYED_SUPER_PROPERTY_CALL:
case Call::LOOKUP_SLOT_CALL:
case Call::SUPER_CALL:
case Call::POSSIBLY_EVAL_CALL:
- case Call::OTHER_CALL:
UNIMPLEMENTED();
}
// Evaluate all arguments to the function call and store in sequential
// registers.
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Visit(args->at(i));
- Register arg = temporary_register_scope.NewRegister();
- DCHECK(arg.index() - i == receiver.index() + 1);
- builder().StoreAccumulatorInRegister(arg);
- }
+ Register arg = VisitArguments(args);
+ CHECK(args->length() == 0 || arg.index() == receiver.index() + 1);
// TODO(rmcilroy): Deal with possible direct eval here?
// TODO(rmcilroy): Use CallIC to allow call type feedback.
- builder().Call(callee, receiver, args->length());
+ builder()->Call(callee, receiver, args->length());
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitCallNew(CallNew* expr) {
+ Register constructor = execution_result()->NewRegister();
+ VisitForAccumulatorValue(expr->expression());
+ builder()->StoreAccumulatorInRegister(constructor);
+
+ ZoneList<Expression*>* args = expr->arguments();
+ Register first_arg = VisitArguments(args);
+ builder()->New(constructor, first_arg, args->length());
+ execution_result()->SetResultInAccumulator();
}
-void BytecodeGenerator::VisitCallNew(CallNew* expr) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ Register receiver;
+ if (expr->is_jsruntime()) {
+ // Allocate a register for the receiver and load it with undefined.
+ execution_result()->PrepareForConsecutiveAllocations(args->length() + 1);
+ receiver = execution_result()->NextConsecutiveRegister();
+ builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+ }
+ // Evaluate all arguments to the runtime call.
+ Register first_arg = VisitArguments(args);
+
+ if (expr->is_jsruntime()) {
+ DCHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
+ builder()->CallJSRuntime(expr->context_index(), receiver, args->length());
+ } else {
+ // TODO(rmcilroy): support multiple return values.
+ DCHECK_LE(expr->function()->result_size, 1);
+ Runtime::FunctionId function_id = expr->function()->function_id;
+ builder()->CallRuntime(function_id, first_arg, args->length());
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitVoid(UnaryOperation* expr) {
+ VisitForEffect(expr->expression());
+ builder()->LoadUndefined();
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
+ if (expr->expression()->IsVariableProxy()) {
+ // Typeof does not throw a reference error on global variables, hence we
+ // perform a non-contextual load in case the operand is a variable proxy.
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ VisitVariableLoadForAccumulatorValue(
+ proxy->var(), proxy->VariableFeedbackSlot(), INSIDE_TYPEOF);
+ } else {
+ VisitForAccumulatorValue(expr->expression());
+ }
+ builder()->TypeOf();
+ execution_result()->SetResultInAccumulator();
+}
-void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
+ VisitForAccumulatorValue(expr->expression());
+ builder()->LogicalNot();
+ execution_result()->SetResultInAccumulator();
+}
void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- UNIMPLEMENTED();
+ switch (expr->op()) {
+ case Token::Value::NOT:
+ VisitNot(expr);
+ break;
+ case Token::Value::TYPEOF:
+ VisitTypeOf(expr);
+ break;
+ case Token::Value::VOID:
+ VisitVoid(expr);
+ break;
+ case Token::Value::DELETE:
+ VisitDelete(expr);
+ break;
+ case Token::Value::BIT_NOT:
+ case Token::Value::ADD:
+ case Token::Value::SUB:
+ // These operators are converted to an equivalent binary operators in
+ // the parser. These operators are not expected to be visited here.
+ UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
+ if (expr->expression()->IsProperty()) {
+ // Delete of an object property is allowed both in sloppy
+ // and strict modes.
+ Property* property = expr->expression()->AsProperty();
+ Register object = VisitForRegisterValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ builder()->Delete(object, language_mode());
+ } else if (expr->expression()->IsVariableProxy()) {
+ // Delete of an unqualified identifier is allowed in sloppy mode but is
+ // not allowed in strict mode. Deleting 'this' is allowed in both modes.
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ Variable* variable = proxy->var();
+ DCHECK(is_sloppy(language_mode()) || variable->HasThisName(isolate()));
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ // Global var, let, const or variables not explicitly declared.
+ Register global_object = execution_result()->NewRegister();
+ builder()
+ ->LoadContextSlot(execution_context()->reg(),
+ Context::GLOBAL_OBJECT_INDEX)
+ .StoreAccumulatorInRegister(global_object)
+ .LoadLiteral(variable->name())
+ .Delete(global_object, language_mode());
+ break;
+ }
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ // Deleting local var/let/const, context variables, and arguments
+ // does not have any effect.
+ if (variable->HasThisName(isolate())) {
+ builder()->LoadTrue();
+ } else {
+ builder()->LoadFalse();
+ }
+ break;
+ }
+ case VariableLocation::LOOKUP: {
+ UNIMPLEMENTED();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Delete of an unresolvable reference returns true.
+ VisitForEffect(expr->expression());
+ builder()->LoadTrue();
+ }
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
- UNIMPLEMENTED();
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
+
+ // Left-hand side can only be a property, a global or a variable slot.
+ Property* property = expr->expression()->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+
+ // TODO(rmcilroy): Set is_postfix to false if visiting for effect.
+ bool is_postfix = expr->is_postfix();
+
+ // Evaluate LHS expression and get old value.
+ Register obj, key, old_value;
+ size_t name_index = kMaxUInt32;
+ switch (assign_type) {
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ VisitVariableLoadForAccumulatorValue(proxy->var(),
+ proxy->VariableFeedbackSlot());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ obj = VisitForRegisterValue(property->obj());
+ name_index = builder()->GetConstantPoolEntry(
+ property->key()->AsLiteral()->AsPropertyName());
+ builder()->LoadNamedProperty(obj, name_index, feedback_index(slot),
+ language_mode());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ obj = VisitForRegisterValue(property->obj());
+ // Use visit for accumulator here since we need the key in the accumulator
+ // for the LoadKeyedProperty.
+ key = execution_result()->NewRegister();
+ VisitForAccumulatorValue(property->key());
+ builder()->StoreAccumulatorInRegister(key).LoadKeyedProperty(
+ obj, feedback_index(slot), language_mode());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ }
+
+ // Convert old value into a number.
+ if (!is_strong(language_mode())) {
+ builder()->CastAccumulatorToNumber();
+ }
+
+ // Save result for postfix expressions.
+ if (is_postfix) {
+ old_value = execution_result()->outer()->NewRegister();
+ builder()->StoreAccumulatorInRegister(old_value);
+ }
+
+ // Perform +1/-1 operation.
+ builder()->CountOperation(expr->binary_op(), language_mode_strength());
+
+ // Store the value.
+ FeedbackVectorSlot feedback_slot = expr->CountSlot();
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* variable = expr->expression()->AsVariableProxy()->var();
+ VisitVariableAssignment(variable, feedback_slot);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ builder()->StoreNamedProperty(
+ obj, name_index, feedback_index(feedback_slot), language_mode());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ builder()->StoreKeyedProperty(obj, key, feedback_index(feedback_slot),
+ language_mode());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ }
+
+ // Restore old value for postfix expressions.
+ if (is_postfix) {
+ execution_result()->SetResultInRegister(old_value);
+ } else {
+ execution_result()->SetResultInAccumulator();
+ }
}
void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
switch (binop->op()) {
case Token::COMMA:
+ VisitCommaExpression(binop);
+ break;
case Token::OR:
+ VisitLogicalOrExpression(binop);
+ break;
case Token::AND:
- UNIMPLEMENTED();
+ VisitLogicalAndExpression(binop);
break;
default:
VisitArithmeticExpression(binop);
@@ -488,17 +1825,38 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
+ // TODO(oth): Remove PrepareForBinaryExpression/CompleteBinaryExpression
+ // once we have StatementScope that tracks hazardous loads/stores.
+ PrepareForBinaryExpression();
+ Register lhs = VisitForRegisterValue(expr->left());
+ if (builder()->RegisterIsParameterOrLocal(lhs)) {
+ // Result was returned in an existing local or parameter. See if
+ // it needs to be moved to a temporary.
+ // TODO(oth) LoadFromAliasedRegister call into VisitVariableLoad().
+ lhs = LoadFromAliasedRegister(lhs);
+ }
+ VisitForAccumulatorValue(expr->right());
+ builder()->CompareOperation(expr->op(), lhs, language_mode_strength());
+ CompleteBinaryExpression();
+ execution_result()->SetResultInAccumulator();
+}
- TemporaryRegisterScope temporary_register_scope(&builder_);
- Register temporary = temporary_register_scope.NewRegister();
- Visit(left);
- builder().StoreAccumulatorInRegister(temporary);
- Visit(right);
- builder().CompareOperation(op, temporary, language_mode());
+void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
+ // TODO(oth): Remove PrepareForBinaryExpression/CompleteBinaryExpression
+ // once we have StatementScope that tracks hazardous loads/stores.
+ PrepareForBinaryExpression();
+ Register lhs = VisitForRegisterValue(expr->left());
+ if (builder()->RegisterIsParameterOrLocal(lhs)) {
+ // Result was returned in an existing local or parameter. See if
+ // it needs to be moved to a temporary.
+ // TODO(oth) LoadFromAliasedRegister call into VisitVariableLoad().
+ lhs = LoadFromAliasedRegister(lhs);
+ }
+ VisitForAccumulatorValue(expr->right());
+ builder()->BinaryOperation(expr->op(), lhs, language_mode_strength());
+ CompleteBinaryExpression();
+ execution_result()->SetResultInAccumulator();
}
@@ -511,7 +1869,7 @@ void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
- UNIMPLEMENTED();
+ execution_result()->SetResultInRegister(Register::function_closure());
}
@@ -526,18 +1884,269 @@ void BytecodeGenerator::VisitSuperPropertyReference(
}
-void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* binop) {
- Token::Value op = binop->op();
+void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
+ VisitForEffect(binop->left());
+ Visit(binop->right());
+}
+
+
+void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
Expression* left = binop->left();
Expression* right = binop->right();
- TemporaryRegisterScope temporary_register_scope(&builder_);
- Register temporary = temporary_register_scope.NewRegister();
+ // Short-circuit evaluation- If it is known that left is always true,
+ // no need to visit right
+ if (left->ToBooleanIsTrue()) {
+ VisitForAccumulatorValue(left);
+ } else {
+ BytecodeLabel end_label;
+ VisitForAccumulatorValue(left);
+ builder()->JumpIfTrue(&end_label);
+ VisitForAccumulatorValue(right);
+ builder()->Bind(&end_label);
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
+ Expression* left = binop->left();
+ Expression* right = binop->right();
+
+ // Short-circuit evaluation- If it is known that left is always false,
+ // no need to visit right
+ if (left->ToBooleanIsFalse()) {
+ VisitForAccumulatorValue(left);
+ } else {
+ BytecodeLabel end_label;
+ VisitForAccumulatorValue(left);
+ builder()->JumpIfFalse(&end_label);
+ VisitForAccumulatorValue(right);
+ builder()->Bind(&end_label);
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitNewLocalFunctionContext() {
+ AccumulatorResultScope accumulator_execution_result(this);
+ Scope* scope = this->scope();
+
+ // Allocate a new local context.
+ if (scope->is_script_scope()) {
+ TemporaryRegisterScope temporary_register_scope(builder());
+ Register closure = temporary_register_scope.NewRegister();
+ Register scope_info = temporary_register_scope.NewRegister();
+ DCHECK(Register::AreContiguous(closure, scope_info));
+ builder()
+ ->LoadAccumulatorWithRegister(Register::function_closure())
+ .StoreAccumulatorInRegister(closure)
+ .LoadLiteral(scope->GetScopeInfo(isolate()))
+ .StoreAccumulatorInRegister(scope_info)
+ .CallRuntime(Runtime::kNewScriptContext, closure, 2);
+ } else {
+ builder()->CallRuntime(Runtime::kNewFunctionContext,
+ Register::function_closure(), 1);
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitBuildLocalActivationContext() {
+ Scope* scope = this->scope();
+
+ if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
+ UNIMPLEMENTED();
+ }
+
+ // Copy parameters into context if necessary.
+ int num_parameters = scope->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* variable = scope->parameter(i);
+ if (!variable->IsContextSlot()) continue;
+
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+ Register parameter(builder()->Parameter(i + 1));
+ // Context variable (at bottom of the context chain).
+ DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
+ builder()->LoadAccumulatorWithRegister(parameter)
+ .StoreContextSlot(execution_context()->reg(), variable->index());
+ }
+}
+
+
+void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
+ AccumulatorResultScope accumulator_execution_result(this);
+ DCHECK(scope->is_block_scope());
+
+ // Allocate a new local block context.
+ TemporaryRegisterScope temporary_register_scope(builder());
+ Register scope_info = temporary_register_scope.NewRegister();
+ Register closure = temporary_register_scope.NewRegister();
+ DCHECK(Register::AreContiguous(scope_info, closure));
+ builder()
+ ->LoadLiteral(scope->GetScopeInfo(isolate()))
+ .StoreAccumulatorInRegister(scope_info);
+ VisitFunctionClosureForContext();
+ builder()
+ ->StoreAccumulatorInRegister(closure)
+ .CallRuntime(Runtime::kPushBlockContext, scope_info, 2);
+ execution_result()->SetResultInAccumulator();
+}
+
- Visit(left);
- builder().StoreAccumulatorInRegister(temporary);
- Visit(right);
- builder().BinaryOperation(op, temporary);
+void BytecodeGenerator::VisitObjectLiteralAccessor(
+ Register home_object, ObjectLiteralProperty* property, Register value_out) {
+ // TODO(rmcilroy): Replace value_out with VisitForRegister();
+ if (property == nullptr) {
+ builder()->LoadNull().StoreAccumulatorInRegister(value_out);
+ } else {
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value_out);
+ VisitSetHomeObject(value_out, home_object, property);
+ }
+}
+
+
+void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
+ ObjectLiteralProperty* property,
+ int slot_number) {
+ Expression* expr = property->value();
+ if (!FunctionLiteral::NeedsHomeObject(expr)) return;
+
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
+ if (variable == nullptr) return;
+
+ DCHECK(variable->IsContextSlot() || variable->IsStackAllocated());
+
+ // Allocate and initialize a new arguments object and assign to the
+ // {arguments} variable.
+ CreateArgumentsType type =
+ is_strict(language_mode()) || !info()->has_simple_parameters()
+ ? CreateArgumentsType::kUnmappedArguments
+ : CreateArgumentsType::kMappedArguments;
+ builder()->CreateArguments(type);
+ VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+}
+
+
+void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
+ if (variable == nullptr) return;
+
+ // TODO(rmcilroy): Remove once we have tests which exercise this code path.
+ UNIMPLEMENTED();
+
+ // Store the closure we were called with in the this_function_var.
+ builder()->LoadAccumulatorWithRegister(Register::function_closure());
+ VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+}
+
+
+void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
+ if (variable == nullptr) return;
+
+ // Store the closure we were called with in the this_function_var.
+ builder()->CallRuntime(Runtime::kGetOriginalConstructor, Register(), 0);
+ VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+}
+
+
+void BytecodeGenerator::VisitFunctionClosureForContext() {
+ AccumulatorResultScope accumulator_execution_result(this);
+ Scope* closure_scope = execution_context()->scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function as
+ // their closure, not the anonymous closure containing the global code.
+ // Pass a SMI sentinel and let the runtime look up the empty function.
+ builder()->LoadLiteral(Smi::FromInt(0));
+ } else {
+ DCHECK(closure_scope->is_function_scope());
+ builder()->LoadAccumulatorWithRegister(Register::function_closure());
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::PrepareForBinaryExpression() {
+ if (binary_expression_depth_++ == 0) {
+ binary_expression_hazard_set_.clear();
+ }
+}
+
+
+// Visits the expression |expr| and places the result in the accumulator.
+void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
+ AccumulatorResultScope accumulator_scope(this);
+ Visit(expr);
+}
+
+
+// Visits the expression |expr| and discards the result.
+void BytecodeGenerator::VisitForEffect(Expression* expr) {
+ EffectResultScope effect_scope(this);
+ Visit(expr);
+}
+
+
+// Visits the expression |expr| and returns the register containing
+// the expression result.
+Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
+ RegisterResultScope register_scope(this);
+ Visit(expr);
+ return register_scope.ResultRegister();
+}
+
+
+Register BytecodeGenerator::LoadFromAliasedRegister(Register reg) {
+ // TODO(oth): Follow on CL to load from re-map here.
+ DCHECK(builder()->RegisterIsParameterOrLocal(reg));
+ if (binary_expression_depth_ > 0) {
+ binary_expression_hazard_set_.insert(reg.index());
+ }
+ return reg;
+}
+
+
+void BytecodeGenerator::RecordStoreToRegister(Register reg) {
+ DCHECK(builder()->RegisterIsParameterOrLocal(reg));
+ if (binary_expression_depth_ > 0) {
+ // TODO(oth): a store to a register that's be loaded needs to be
+ // remapped.
+ DCHECK(binary_expression_hazard_set_.find(reg.index()) ==
+ binary_expression_hazard_set_.end());
+ }
+}
+
+
+void BytecodeGenerator::CompleteBinaryExpression() {
+ DCHECK(binary_expression_depth_ > 0);
+ binary_expression_depth_ -= 1;
+ // TODO(oth): spill remapped registers into origins.
+ // TODO(oth): make statement/top-level.
+}
+
+
+Register BytecodeGenerator::NextContextRegister() const {
+ if (execution_context() == nullptr) {
+ // Return the incoming function context for the outermost execution context.
+ return Register::function_context();
+ }
+ Register previous = execution_context()->reg();
+ if (previous == Register::function_context()) {
+ // If the previous context was the incoming function context, then the next
+ // context register is the first local context register.
+ return builder_.first_context_register();
+ } else {
+ // Otherwise use the next local context register.
+ DCHECK_LT(previous.index(), builder_.last_context_register().index());
+ return Register(previous.index() + 1);
+ }
}
@@ -546,7 +2155,12 @@ LanguageMode BytecodeGenerator::language_mode() const {
}
-int BytecodeGenerator::feedback_index(FeedbackVectorICSlot slot) const {
+Strength BytecodeGenerator::language_mode_strength() const {
+ return strength(language_mode());
+}
+
+
+int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
return info()->feedback_vector()->GetIndex(slot);
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 99536c33fb..7284cfe9e1 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -24,25 +24,121 @@ class BytecodeGenerator : public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ // Visiting function for declarations list is overridden.
+ void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+
private:
+ class ContextScope;
+ class ControlScope;
+ class ControlScopeForIteration;
+ class ControlScopeForSwitch;
+ class ExpressionResultScope;
+ class EffectResultScope;
+ class AccumulatorResultScope;
+ class RegisterResultScope;
+
+ void MakeBytecodeBody();
+ Register NextContextRegister() const;
+
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+ // Dispatched from VisitBinaryOperation.
void VisitArithmeticExpression(BinaryOperation* binop);
+ void VisitCommaExpression(BinaryOperation* binop);
+ void VisitLogicalOrExpression(BinaryOperation* binop);
+ void VisitLogicalAndExpression(BinaryOperation* binop);
+
+ // Dispatched from VisitUnaryOperation.
+ void VisitVoid(UnaryOperation* expr);
+ void VisitTypeOf(UnaryOperation* expr);
+ void VisitNot(UnaryOperation* expr);
+ void VisitDelete(UnaryOperation* expr);
+
+ // Helper visitors which perform common operations.
+ Register VisitArguments(ZoneList<Expression*>* arguments);
+
void VisitPropertyLoad(Register obj, Property* expr);
- void VisitVariableLoad(Variable* variable);
+ void VisitPropertyLoadForAccumulator(Register obj, Property* expr);
+
+ void VisitVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ void VisitVariableLoadForAccumulatorValue(
+ Variable* variable, FeedbackVectorSlot slot,
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ MUST_USE_RESULT Register
+ VisitVariableLoadForRegisterValue(Variable* variable, FeedbackVectorSlot slot,
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ void VisitVariableAssignment(Variable* variable, FeedbackVectorSlot slot);
+
+ void VisitArgumentsObject(Variable* variable);
+ void VisitThisFunctionVariable(Variable* variable);
+ void VisitNewTargetVariable(Variable* variable);
+ void VisitNewLocalFunctionContext();
+ void VisitBuildLocalActivationContext();
+ void VisitNewLocalBlockContext(Scope* scope);
+ void VisitFunctionClosureForContext();
+ void VisitSetHomeObject(Register value, Register home_object,
+ ObjectLiteralProperty* property, int slot_number = 0);
+ void VisitObjectLiteralAccessor(Register home_object,
+ ObjectLiteralProperty* property,
+ Register value_out);
+ void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
+
+
+ // Visitors for obtaining expression result in the accumulator, in a
+ // register, or just getting the effect.
+ void VisitForAccumulatorValue(Expression* expression);
+ MUST_USE_RESULT Register VisitForRegisterValue(Expression* expression);
+ void VisitForEffect(Expression* node);
+
+ // Methods marking the start and end of binary expressions.
+ void PrepareForBinaryExpression();
+ void CompleteBinaryExpression();
+
+ // Methods for tracking and remapping register.
+ void RecordStoreToRegister(Register reg);
+ Register LoadFromAliasedRegister(Register reg);
+
+ inline BytecodeArrayBuilder* builder() { return &builder_; }
+
+ inline Isolate* isolate() const { return isolate_; }
+ inline Zone* zone() const { return zone_; }
- inline BytecodeArrayBuilder& builder() { return builder_; }
inline Scope* scope() const { return scope_; }
inline void set_scope(Scope* scope) { scope_ = scope; }
inline CompilationInfo* info() const { return info_; }
inline void set_info(CompilationInfo* info) { info_ = info; }
- LanguageMode language_mode() const;
- int feedback_index(FeedbackVectorICSlot slot) const;
+ inline ControlScope* execution_control() const { return execution_control_; }
+ inline void set_execution_control(ControlScope* scope) {
+ execution_control_ = scope;
+ }
+ inline ContextScope* execution_context() const { return execution_context_; }
+ inline void set_execution_context(ContextScope* context) {
+ execution_context_ = context;
+ }
+ inline void set_execution_result(ExpressionResultScope* execution_result) {
+ execution_result_ = execution_result;
+ }
+ ExpressionResultScope* execution_result() const { return execution_result_; }
+ ZoneVector<Handle<Object>>* globals() { return &globals_; }
+ inline LanguageMode language_mode() const;
+ Strength language_mode_strength() const;
+ int feedback_index(FeedbackVectorSlot slot) const;
+
+ Isolate* isolate_;
+ Zone* zone_;
BytecodeArrayBuilder builder_;
CompilationInfo* info_;
Scope* scope_;
+ ZoneVector<Handle<Object>> globals_;
+ ControlScope* execution_control_;
+ ContextScope* execution_context_;
+ ExpressionResultScope* execution_result_;
+
+ int binary_expression_depth_;
+ ZoneSet<int> binary_expression_hazard_set_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-traits.h b/deps/v8/src/interpreter/bytecode-traits.h
new file mode 100644
index 0000000000..fd778d7c92
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-traits.h
@@ -0,0 +1,180 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_TRAITS_H_
+#define V8_INTERPRETER_BYTECODE_TRAITS_H_
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// TODO(rmcilroy): consider simplifying this to avoid the template magic.
+
+// Template helpers to deduce the number of operands each bytecode has.
+#define OPERAND_TERM OperandType::kNone, OperandType::kNone, OperandType::kNone
+
+template <OperandType>
+struct OperandTraits {};
+
+#define DECLARE_OPERAND_SIZE(Name, Size) \
+ template <> \
+ struct OperandTraits<OperandType::k##Name> { \
+ static const OperandSize kSizeType = Size; \
+ static const int kSize = static_cast<int>(Size); \
+ };
+OPERAND_TYPE_LIST(DECLARE_OPERAND_SIZE)
+#undef DECLARE_OPERAND_SIZE
+
+
+template <OperandType... Args>
+struct BytecodeTraits {};
+
+template <OperandType operand_0, OperandType operand_1, OperandType operand_2,
+ OperandType operand_3>
+struct BytecodeTraits<operand_0, operand_1, operand_2, operand_3,
+ OPERAND_TERM> {
+ static OperandType GetOperandType(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandType kOperands[] = {operand_0, operand_1, operand_2,
+ operand_3};
+ return kOperands[i];
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandSize kOperandSizes[] =
+ {OperandTraits<operand_0>::kSizeType,
+ OperandTraits<operand_1>::kSizeType,
+ OperandTraits<operand_2>::kSizeType,
+ OperandTraits<operand_3>::kSizeType};
+ return kOperandSizes[i];
+ }
+
+ static inline int GetOperandOffset(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const int kOffset0 = 1;
+ const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
+ const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
+ const int kOffset3 = kOffset2 + OperandTraits<operand_2>::kSize;
+ const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2, kOffset3};
+ return kOperandOffsets[i];
+ }
+
+ static const int kOperandCount = 4;
+ static const int kSize =
+ 1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
+ OperandTraits<operand_2>::kSize + OperandTraits<operand_3>::kSize;
+};
+
+
+template <OperandType operand_0, OperandType operand_1, OperandType operand_2>
+struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
+ static inline OperandType GetOperandType(int i) {
+ DCHECK(0 <= i && i <= 2);
+ const OperandType kOperands[] = {operand_0, operand_1, operand_2};
+ return kOperands[i];
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandSize kOperandSizes[] =
+ {OperandTraits<operand_0>::kSizeType,
+ OperandTraits<operand_1>::kSizeType,
+ OperandTraits<operand_2>::kSizeType};
+ return kOperandSizes[i];
+ }
+
+ static inline int GetOperandOffset(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const int kOffset0 = 1;
+ const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
+ const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
+ const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2};
+ return kOperandOffsets[i];
+ }
+
+ static const int kOperandCount = 3;
+ static const int kSize =
+ 1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
+ OperandTraits<operand_2>::kSize;
+};
+
+template <OperandType operand_0, OperandType operand_1>
+struct BytecodeTraits<operand_0, operand_1, OPERAND_TERM> {
+ static inline OperandType GetOperandType(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandType kOperands[] = {operand_0, operand_1};
+ return kOperands[i];
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandSize kOperandSizes[] =
+ {OperandTraits<operand_0>::kSizeType,
+ OperandTraits<operand_1>::kSizeType};
+ return kOperandSizes[i];
+ }
+
+ static inline int GetOperandOffset(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const int kOffset0 = 1;
+ const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
+ const int kOperandOffsets[] = {kOffset0, kOffset1};
+ return kOperandOffsets[i];
+ }
+
+ static const int kOperandCount = 2;
+ static const int kSize =
+ 1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize;
+};
+
+template <OperandType operand_0>
+struct BytecodeTraits<operand_0, OPERAND_TERM> {
+ static inline OperandType GetOperandType(int i) {
+ DCHECK(i == 0);
+ return operand_0;
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ DCHECK(i == 0);
+ return OperandTraits<operand_0>::kSizeType;
+ }
+
+ static inline int GetOperandOffset(int i) {
+ DCHECK(i == 0);
+ return 1;
+ }
+
+ static const int kOperandCount = 1;
+ static const int kSize = 1 + OperandTraits<operand_0>::kSize;
+};
+
+template <>
+struct BytecodeTraits<OperandType::kNone, OPERAND_TERM> {
+ static inline OperandType GetOperandType(int i) {
+ UNREACHABLE();
+ return OperandType::kNone;
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ UNREACHABLE();
+ return OperandSize::kNone;
+ }
+
+ static inline int GetOperandOffset(int i) {
+ UNREACHABLE();
+ return 1;
+ }
+
+ static const int kOperandCount = 0;
+ static const int kSize = 1 + OperandTraits<OperandType::kNone>::kSize;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_TRAITS_H_
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index e5b9ab73a9..df2a1dd4f1 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -5,25 +5,12 @@
#include "src/interpreter/bytecodes.h"
#include "src/frames.h"
+#include "src/interpreter/bytecode-traits.h"
namespace v8 {
namespace internal {
namespace interpreter {
-// Maximum number of operands a bytecode may have.
-static const int kMaxOperands = 3;
-
-// kBytecodeTable relies on kNone being the same as zero to detect length.
-STATIC_ASSERT(static_cast<int>(OperandType::kNone) == 0);
-
-static const OperandType kBytecodeTable[][kMaxOperands] = {
-#define DECLARE_OPERAND(_, ...) \
- { __VA_ARGS__ } \
- ,
- BYTECODE_LIST(DECLARE_OPERAND)
-#undef DECLARE_OPERAND
-};
-
// static
const char* Bytecodes::ToString(Bytecode bytecode) {
@@ -42,7 +29,7 @@ const char* Bytecodes::ToString(Bytecode bytecode) {
// static
const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
switch (operand_type) {
-#define CASE(Name) \
+#define CASE(Name, _) \
case OperandType::k##Name: \
return #Name;
OPERAND_TYPE_LIST(CASE)
@@ -54,6 +41,21 @@ const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
// static
+const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
+ switch (operand_size) {
+ case OperandSize::kNone:
+ return "None";
+ case OperandSize::kByte:
+ return "Byte";
+ case OperandSize::kShort:
+ return "Short";
+ }
+ UNREACHABLE();
+ return "";
+}
+
+
+// static
uint8_t Bytecodes::ToByte(Bytecode bytecode) {
return static_cast<uint8_t>(bytecode);
}
@@ -68,38 +70,115 @@ Bytecode Bytecodes::FromByte(uint8_t value) {
// static
+int Bytecodes::Size(Bytecode bytecode) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize;
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+// static
int Bytecodes::NumberOfOperands(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
- int count;
- uint8_t row = ToByte(bytecode);
- for (count = 0; count < kMaxOperands; count++) {
- if (kBytecodeTable[row][count] == OperandType::kNone) {
- break;
- }
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kOperandCount;
+ BYTECODE_LIST(CASE)
+#undef CASE
}
- return count;
+ UNREACHABLE();
+ return 0;
}
// static
OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
- DCHECK(bytecode <= Bytecode::kLast && i < NumberOfOperands(bytecode));
- return kBytecodeTable[ToByte(bytecode)][i];
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandType(i);
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandType::kNone;
}
// static
-int Bytecodes::Size(Bytecode bytecode) {
- return 1 + NumberOfOperands(bytecode);
+OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandSize(i);
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
}
// static
-int Bytecodes::MaximumNumberOfOperands() { return kMaxOperands; }
+int Bytecodes::GetOperandOffset(Bytecode bytecode, int i) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandOffset(i);
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+// static
+OperandSize Bytecodes::SizeOfOperand(OperandType operand_type) {
+ switch (operand_type) {
+#define CASE(Name, Size) \
+ case OperandType::k##Name: \
+ return Size;
+ OPERAND_TYPE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
+}
// static
-int Bytecodes::MaximumSize() { return 1 + kMaxOperands; }
+bool Bytecodes::IsJump(Bytecode bytecode) {
+ return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpIfTrue ||
+ bytecode == Bytecode::kJumpIfFalse ||
+ bytecode == Bytecode::kJumpIfToBooleanTrue ||
+ bytecode == Bytecode::kJumpIfToBooleanFalse ||
+ bytecode == Bytecode::kJumpIfNull ||
+ bytecode == Bytecode::kJumpIfUndefined;
+}
+
+
+// static
+bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpConstant ||
+ bytecode == Bytecode::kJumpIfTrueConstant ||
+ bytecode == Bytecode::kJumpIfFalseConstant ||
+ bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+ bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
+ bytecode == Bytecode::kJumpIfNull ||
+ bytecode == Bytecode::kJumpIfUndefinedConstant;
+}
// static
@@ -114,30 +193,40 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
SNPrintF(buf, "%02x ", bytecode_start[i]);
os << buf.start();
}
- for (int i = bytecode_size; i < Bytecodes::MaximumSize(); i++) {
+ const int kBytecodeColumnSize = 6;
+ for (int i = bytecode_size; i < kBytecodeColumnSize; i++) {
os << " ";
}
os << bytecode << " ";
- const uint8_t* operands_start = bytecode_start + 1;
- int operands_size = bytecode_size - 1;
- for (int i = 0; i < operands_size; i++) {
+ int number_of_operands = NumberOfOperands(bytecode);
+ for (int i = 0; i < number_of_operands; i++) {
OperandType op_type = GetOperandType(bytecode, i);
- uint8_t operand = operands_start[i];
+ const uint8_t* operand_start =
+ &bytecode_start[GetOperandOffset(bytecode, i)];
switch (op_type) {
- case interpreter::OperandType::kCount:
- os << "#" << static_cast<unsigned int>(operand);
+ case interpreter::OperandType::kCount8:
+ os << "#" << static_cast<unsigned int>(*operand_start);
+ break;
+ case interpreter::OperandType::kIdx8:
+ os << "[" << static_cast<unsigned int>(*operand_start) << "]";
break;
- case interpreter::OperandType::kIdx:
- os << "[" << static_cast<unsigned int>(operand) << "]";
+ case interpreter::OperandType::kIdx16: {
+ os << "[" << ReadUnalignedUInt16(operand_start) << "]";
break;
+ }
case interpreter::OperandType::kImm8:
- os << "#" << static_cast<int>(static_cast<int8_t>(operand));
+ os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
break;
- case interpreter::OperandType::kReg: {
- Register reg = Register::FromOperand(operand);
- if (reg.is_parameter()) {
+ case interpreter::OperandType::kReg8:
+ case interpreter::OperandType::kMaybeReg8: {
+ Register reg = Register::FromOperand(*operand_start);
+ if (reg.is_function_context()) {
+ os << "<context>";
+ } else if (reg.is_function_closure()) {
+ os << "<closure>";
+ } else if (reg.is_parameter()) {
int parameter_index = reg.ToParameterIndex(parameter_count);
if (parameter_index == 0) {
os << "<this>";
@@ -153,7 +242,7 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
UNREACHABLE();
break;
}
- if (i != operands_size - 1) {
+ if (i != number_of_operands - 1) {
os << ", ";
}
}
@@ -171,8 +260,17 @@ std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
}
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
+ return os << Bytecodes::OperandSizeToString(operand_size);
+}
+
+
static const int kLastParamRegisterIndex =
-InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
+static const int kFunctionClosureRegisterIndex =
+ -InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
+static const int kFunctionContextRegisterIndex =
+ -InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
// Registers occupy range 0-127 in 8-bit value leaving 128 unused values.
@@ -198,6 +296,26 @@ int Register::ToParameterIndex(int parameter_count) const {
}
+Register Register::function_closure() {
+ return Register(kFunctionClosureRegisterIndex);
+}
+
+
+bool Register::is_function_closure() const {
+ return index() == kFunctionClosureRegisterIndex;
+}
+
+
+Register Register::function_context() {
+ return Register(kFunctionContextRegisterIndex);
+}
+
+
+bool Register::is_function_context() const {
+ return index() == kFunctionContextRegisterIndex;
+}
+
+
int Register::MaxParameterIndex() { return kMaxParameterIndex; }
@@ -208,6 +326,24 @@ Register Register::FromOperand(uint8_t operand) {
return Register(-static_cast<int8_t>(operand));
}
+
+bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5) {
+ if (reg1.index() + 1 != reg2.index()) {
+ return false;
+ }
+ if (reg3.is_valid() && reg2.index() + 1 != reg3.index()) {
+ return false;
+ }
+ if (reg4.is_valid() && reg3.index() + 1 != reg4.index()) {
+ return false;
+ }
+ if (reg5.is_valid() && reg4.index() + 1 != reg5.index()) {
+ return false;
+ }
+ return true;
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 3862842277..8eaf920d1b 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -16,82 +16,190 @@ namespace internal {
namespace interpreter {
// The list of operand types used by bytecodes.
-#define OPERAND_TYPE_LIST(V) \
- V(None) \
- V(Count) \
- V(Imm8) \
- V(Idx) \
- V(Reg)
+#define OPERAND_TYPE_LIST(V) \
+ \
+ /* None operand. */ \
+ V(None, OperandSize::kNone) \
+ \
+ /* Byte operands. */ \
+ V(Count8, OperandSize::kByte) \
+ V(Imm8, OperandSize::kByte) \
+ V(Idx8, OperandSize::kByte) \
+ V(Reg8, OperandSize::kByte) \
+ V(MaybeReg8, OperandSize::kByte) \
+ \
+ /* Short operands. */ \
+ V(Idx16, OperandSize::kShort)
// The list of bytecodes which are interpreted by the interpreter.
-#define BYTECODE_LIST(V) \
- \
- /* Loading the accumulator */ \
- V(LdaZero, OperandType::kNone) \
- V(LdaSmi8, OperandType::kImm8) \
- V(LdaConstant, OperandType::kIdx) \
- V(LdaUndefined, OperandType::kNone) \
- V(LdaNull, OperandType::kNone) \
- V(LdaTheHole, OperandType::kNone) \
- V(LdaTrue, OperandType::kNone) \
- V(LdaFalse, OperandType::kNone) \
- \
- /* Load globals */ \
- V(LdaGlobal, OperandType::kIdx) \
- \
- /* Register-accumulator transfers */ \
- V(Ldar, OperandType::kReg) \
- V(Star, OperandType::kReg) \
- \
- /* LoadIC operations */ \
- V(LoadIC, OperandType::kReg, OperandType::kIdx) \
- V(KeyedLoadIC, OperandType::kReg, OperandType::kIdx) \
- \
- /* StoreIC operations */ \
- V(StoreIC, OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- V(KeyedStoreIC, OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- \
- /* Binary Operators */ \
- V(Add, OperandType::kReg) \
- V(Sub, OperandType::kReg) \
- V(Mul, OperandType::kReg) \
- V(Div, OperandType::kReg) \
- V(Mod, OperandType::kReg) \
- \
- /* Call operations. */ \
- V(Call, OperandType::kReg, OperandType::kReg, OperandType::kCount) \
- \
- /* Test Operators */ \
- V(TestEqual, OperandType::kReg) \
- V(TestNotEqual, OperandType::kReg) \
- V(TestEqualStrict, OperandType::kReg) \
- V(TestNotEqualStrict, OperandType::kReg) \
- V(TestLessThan, OperandType::kReg) \
- V(TestGreaterThan, OperandType::kReg) \
- V(TestLessThanOrEqual, OperandType::kReg) \
- V(TestGreaterThanOrEqual, OperandType::kReg) \
- V(TestInstanceOf, OperandType::kReg) \
- V(TestIn, OperandType::kReg) \
- \
- /* Cast operators */ \
- V(ToBoolean, OperandType::kNone) \
- \
- /* Control Flow */ \
- V(Jump, OperandType::kImm8) \
- V(JumpConstant, OperandType::kIdx) \
- V(JumpIfTrue, OperandType::kImm8) \
- V(JumpIfTrueConstant, OperandType::kIdx) \
- V(JumpIfFalse, OperandType::kImm8) \
- V(JumpIfFalseConstant, OperandType::kIdx) \
+#define BYTECODE_LIST(V) \
+ \
+ /* Loading the accumulator */ \
+ V(LdaZero, OperandType::kNone) \
+ V(LdaSmi8, OperandType::kImm8) \
+ V(LdaUndefined, OperandType::kNone) \
+ V(LdaNull, OperandType::kNone) \
+ V(LdaTheHole, OperandType::kNone) \
+ V(LdaTrue, OperandType::kNone) \
+ V(LdaFalse, OperandType::kNone) \
+ V(LdaConstant, OperandType::kIdx8) \
+ V(LdaConstantWide, OperandType::kIdx16) \
+ \
+ /* Globals */ \
+ V(LdaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalInsideTypeofSloppy, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalInsideTypeofStrict, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(LdaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(LdaGlobalInsideTypeofSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(LdaGlobalInsideTypeofStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(StaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8) \
+ V(StaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8) \
+ V(StaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(StaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+ \
+ /* Context operations */ \
+ V(PushContext, OperandType::kReg8) \
+ V(PopContext, OperandType::kReg8) \
+ V(LdaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
+ V(StaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
+ \
+ /* Register-accumulator transfers */ \
+ V(Ldar, OperandType::kReg8) \
+ V(Star, OperandType::kReg8) \
+ \
+ /* LoadIC operations */ \
+ V(LoadICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LoadICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+ V(KeyedLoadICSloppy, OperandType::kReg8, OperandType::kIdx8) \
+ V(KeyedLoadICStrict, OperandType::kReg8, OperandType::kIdx8) \
+ /* TODO(rmcilroy): Wide register operands too? */ \
+ V(LoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16, \
+ OperandType::kIdx16) \
+ V(LoadICStrictWide, OperandType::kReg8, OperandType::kIdx16, \
+ OperandType::kIdx16) \
+ V(KeyedLoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16) \
+ V(KeyedLoadICStrictWide, OperandType::kReg8, OperandType::kIdx16) \
+ \
+ /* StoreIC operations */ \
+ V(StoreICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+ V(StoreICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+ V(KeyedStoreICSloppy, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kIdx8) \
+ V(KeyedStoreICStrict, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kIdx8) \
+ /* TODO(rmcilroy): Wide register operands too? */ \
+ V(StoreICSloppyWide, OperandType::kReg8, OperandType::kIdx16, \
+ OperandType::kIdx16) \
+ V(StoreICStrictWide, OperandType::kReg8, OperandType::kIdx16, \
+ OperandType::kIdx16) \
+ V(KeyedStoreICSloppyWide, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kIdx16) \
+ V(KeyedStoreICStrictWide, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kIdx16) \
+ \
+ /* Binary Operators */ \
+ V(Add, OperandType::kReg8) \
+ V(Sub, OperandType::kReg8) \
+ V(Mul, OperandType::kReg8) \
+ V(Div, OperandType::kReg8) \
+ V(Mod, OperandType::kReg8) \
+ V(BitwiseOr, OperandType::kReg8) \
+ V(BitwiseXor, OperandType::kReg8) \
+ V(BitwiseAnd, OperandType::kReg8) \
+ V(ShiftLeft, OperandType::kReg8) \
+ V(ShiftRight, OperandType::kReg8) \
+ V(ShiftRightLogical, OperandType::kReg8) \
+ \
+ /* Unary Operators */ \
+ V(Inc, OperandType::kNone) \
+ V(Dec, OperandType::kNone) \
+ V(LogicalNot, OperandType::kNone) \
+ V(TypeOf, OperandType::kNone) \
+ V(DeletePropertyStrict, OperandType::kReg8) \
+ V(DeletePropertySloppy, OperandType::kReg8) \
+ \
+ /* Call operations */ \
+ V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8) \
+ V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8, \
+ OperandType::kCount8) \
+ V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8, \
+ OperandType::kCount8) \
+ \
+ /* New operator */ \
+ V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kCount8) \
+ \
+ /* Test Operators */ \
+ V(TestEqual, OperandType::kReg8) \
+ V(TestNotEqual, OperandType::kReg8) \
+ V(TestEqualStrict, OperandType::kReg8) \
+ V(TestNotEqualStrict, OperandType::kReg8) \
+ V(TestLessThan, OperandType::kReg8) \
+ V(TestGreaterThan, OperandType::kReg8) \
+ V(TestLessThanOrEqual, OperandType::kReg8) \
+ V(TestGreaterThanOrEqual, OperandType::kReg8) \
+ V(TestInstanceOf, OperandType::kReg8) \
+ V(TestIn, OperandType::kReg8) \
+ \
+ /* Cast operators */ \
+ V(ToBoolean, OperandType::kNone) \
+ V(ToName, OperandType::kNone) \
+ V(ToNumber, OperandType::kNone) \
+ V(ToObject, OperandType::kNone) \
+ \
+ /* Literals */ \
+ V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kReg8) \
+ V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kImm8) \
+ V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kImm8) \
+ \
+ /* Closure allocation */ \
+ V(CreateClosure, OperandType::kImm8) \
+ \
+ /* Arguments allocation */ \
+ V(CreateMappedArguments, OperandType::kNone) \
+ V(CreateUnmappedArguments, OperandType::kNone) \
+ \
+ /* Control Flow */ \
+ V(Jump, OperandType::kImm8) \
+ V(JumpConstant, OperandType::kIdx8) \
+ V(JumpIfTrue, OperandType::kImm8) \
+ V(JumpIfTrueConstant, OperandType::kIdx8) \
+ V(JumpIfFalse, OperandType::kImm8) \
+ V(JumpIfFalseConstant, OperandType::kIdx8) \
+ V(JumpIfToBooleanTrue, OperandType::kImm8) \
+ V(JumpIfToBooleanTrueConstant, OperandType::kIdx8) \
+ V(JumpIfToBooleanFalse, OperandType::kImm8) \
+ V(JumpIfToBooleanFalseConstant, OperandType::kIdx8) \
+ V(JumpIfNull, OperandType::kImm8) \
+ V(JumpIfNullConstant, OperandType::kIdx8) \
+ V(JumpIfUndefined, OperandType::kImm8) \
+ V(JumpIfUndefinedConstant, OperandType::kIdx8) \
+ \
+ /* Complex flow control For..in */ \
+ V(ForInPrepare, OperandType::kReg8) \
+ V(ForInNext, OperandType::kReg8, OperandType::kReg8) \
+ V(ForInDone, OperandType::kReg8) \
+ \
+ /* Non-local flow control */ \
+ V(Throw, OperandType::kNone) \
V(Return, OperandType::kNone)
+// Enumeration of the size classes of operand types used by bytecodes.
+enum class OperandSize : uint8_t {
+ kNone = 0,
+ kByte = 1,
+ kShort = 2,
+};
+
+
// Enumeration of operand types used by bytecodes.
enum class OperandType : uint8_t {
-#define DECLARE_OPERAND_TYPE(Name) k##Name,
+#define DECLARE_OPERAND_TYPE(Name, _) k##Name,
OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE)
#undef DECLARE_OPERAND_TYPE
-#define COUNT_OPERAND_TYPES(x) +1
+#define COUNT_OPERAND_TYPES(x, _) +1
// The COUNT_OPERAND macro will turn this into kLast = -1 +1 +1... which will
// evaluate to the same value as the last operand.
kLast = -1 OPERAND_TYPE_LIST(COUNT_OPERAND_TYPES)
@@ -112,7 +220,7 @@ enum class Bytecode : uint8_t {
};
-// An interpreter register which is located in the function's register file
+// An interpreter Register which is located in the function's Register file
// in its stack-frame. Register hold parameters, this, and expression values.
class Register {
public:
@@ -131,14 +239,41 @@ class Register {
return index_;
}
bool is_parameter() const { return index() < 0; }
+ bool is_valid() const { return index_ != kIllegalIndex; }
static Register FromParameterIndex(int index, int parameter_count);
int ToParameterIndex(int parameter_count) const;
static int MaxParameterIndex();
+ // Returns the register for the function's closure object.
+ static Register function_closure();
+ bool is_function_closure() const;
+
+ // Returns the register for the function's outer context.
+ static Register function_context();
+ bool is_function_context() const;
+
static Register FromOperand(uint8_t operand);
uint8_t ToOperand() const;
+ static bool AreContiguous(Register reg1, Register reg2,
+ Register reg3 = Register(),
+ Register reg4 = Register(),
+ Register reg5 = Register());
+
+ bool operator==(const Register& other) const {
+ return index() == other.index();
+ }
+ bool operator!=(const Register& other) const {
+ return index() != other.index();
+ }
+ bool operator<(const Register& other) const {
+ return index() < other.index();
+ }
+ bool operator<=(const Register& other) const {
+ return index() <= other.index();
+ }
+
private:
static const int kIllegalIndex = kMaxInt;
@@ -157,6 +292,9 @@ class Bytecodes {
// Returns string representation of |operand_type|.
static const char* OperandTypeToString(OperandType operand_type);
+ // Returns string representation of |operand_size|.
+ static const char* OperandSizeToString(OperandSize operand_size);
+
// Returns byte value of bytecode.
static uint8_t ToByte(Bytecode bytecode);
@@ -169,14 +307,26 @@ class Bytecodes {
// Return the i-th operand of |bytecode|.
static OperandType GetOperandType(Bytecode bytecode, int i);
+ // Return the size of the i-th operand of |bytecode|.
+ static OperandSize GetOperandSize(Bytecode bytecode, int i);
+
+ // Returns the offset of the i-th operand of |bytecode| relative to the start
+ // of the bytecode.
+ static int GetOperandOffset(Bytecode bytecode, int i);
+
// Returns the size of the bytecode including its operands.
static int Size(Bytecode bytecode);
- // The maximum number of operands across all bytecodes.
- static int MaximumNumberOfOperands();
+ // Returns the size of |operand|.
+ static OperandSize SizeOfOperand(OperandType operand);
+
+ // Return true if the bytecode is a jump or a conditional jump taking
+ // an immediate byte operand (OperandType::kImm8).
+ static bool IsJump(Bytecode bytecode);
- // Maximum size of a bytecode and its operands.
- static int MaximumSize();
+ // Return true if the bytecode is a jump or conditional jump taking a
+ // constant pool entry (OperandType::kIdx).
+ static bool IsJumpConstant(Bytecode bytecode);
// Decode a single bytecode and operands to |os|.
static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
@@ -188,6 +338,7 @@ class Bytecodes {
std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_type);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
new file mode 100644
index 0000000000..3ecabe4351
--- /dev/null
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -0,0 +1,95 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/control-flow-builders.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+
+BreakableControlFlowBuilder::~BreakableControlFlowBuilder() {
+ DCHECK(break_sites_.empty());
+}
+
+
+void BreakableControlFlowBuilder::SetBreakTarget(const BytecodeLabel& target) {
+ BindLabels(target, &break_sites_);
+}
+
+
+void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->Jump(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfTrue(
+ ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->JumpIfTrue(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfUndefined(
+ ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->JumpIfUndefined(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfNull(
+ ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->JumpIfNull(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites,
+ int index) {
+ builder()->Jump(&sites->at(index));
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfTrue(
+ ZoneVector<BytecodeLabel>* sites, int index) {
+ builder()->JumpIfTrue(&sites->at(index));
+}
+
+
+void BreakableControlFlowBuilder::BindLabels(const BytecodeLabel& target,
+ ZoneVector<BytecodeLabel>* sites) {
+ for (size_t i = 0; i < sites->size(); i++) {
+ BytecodeLabel& site = sites->at(i);
+ builder()->Bind(target, &site);
+ }
+ sites->clear();
+}
+
+
+LoopBuilder::~LoopBuilder() { DCHECK(continue_sites_.empty()); }
+
+
+void LoopBuilder::SetContinueTarget(const BytecodeLabel& target) {
+ BindLabels(target, &continue_sites_);
+}
+
+
+SwitchBuilder::~SwitchBuilder() {
+#ifdef DEBUG
+ for (auto site : case_sites_) {
+ DCHECK(site.is_bound());
+ }
+#endif
+}
+
+
+void SwitchBuilder::SetCaseTarget(int index) {
+ BytecodeLabel& site = case_sites_.at(index);
+ builder()->Bind(&site);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
new file mode 100644
index 0000000000..c9be6dcdc7
--- /dev/null
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -0,0 +1,126 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_CONTROL_FLOW_BUILDERS_H_
+#define V8_INTERPRETER_CONTROL_FLOW_BUILDERS_H_
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ControlFlowBuilder BASE_EMBEDDED {
+ public:
+ explicit ControlFlowBuilder(BytecodeArrayBuilder* builder)
+ : builder_(builder) {}
+ virtual ~ControlFlowBuilder() {}
+
+ protected:
+ BytecodeArrayBuilder* builder() const { return builder_; }
+
+ private:
+ BytecodeArrayBuilder* builder_;
+
+ DISALLOW_COPY_AND_ASSIGN(ControlFlowBuilder);
+};
+
+class BreakableControlFlowBuilder : public ControlFlowBuilder {
+ public:
+ explicit BreakableControlFlowBuilder(BytecodeArrayBuilder* builder)
+ : ControlFlowBuilder(builder),
+ break_sites_(builder->zone()) {}
+ virtual ~BreakableControlFlowBuilder();
+
+ // This method should be called by the control flow owner before
+ // destruction to update sites that emit jumps for break.
+ void SetBreakTarget(const BytecodeLabel& break_target);
+
+ // This method is called when visiting break statements in the AST.
+ // Inserts a jump to a unbound label that is patched when the corresponding
+ // SetBreakTarget is called.
+ void Break() { EmitJump(&break_sites_); }
+ void BreakIfTrue() { EmitJumpIfTrue(&break_sites_); }
+ void BreakIfUndefined() { EmitJumpIfUndefined(&break_sites_); }
+ void BreakIfNull() { EmitJumpIfNull(&break_sites_); }
+
+ protected:
+ void EmitJump(ZoneVector<BytecodeLabel>* labels);
+ void EmitJump(ZoneVector<BytecodeLabel>* labels, int index);
+ void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels);
+ void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels, int index);
+ void EmitJumpIfUndefined(ZoneVector<BytecodeLabel>* labels);
+ void EmitJumpIfNull(ZoneVector<BytecodeLabel>* labels);
+
+ void BindLabels(const BytecodeLabel& target, ZoneVector<BytecodeLabel>* site);
+
+ private:
+ // Unbound labels that identify jumps for break statements in the code.
+ ZoneVector<BytecodeLabel> break_sites_;
+};
+
+// A class to help with co-ordinating break and continue statements with
+// their loop.
+// TODO(oth): add support for TF branch/merge info.
+class LoopBuilder final : public BreakableControlFlowBuilder {
+ public:
+ explicit LoopBuilder(BytecodeArrayBuilder* builder)
+ : BreakableControlFlowBuilder(builder),
+ continue_sites_(builder->zone()) {}
+ ~LoopBuilder();
+
+ // This methods should be called by the LoopBuilder owner before
+ // destruction to update sites that emit jumps for continue.
+ void SetContinueTarget(const BytecodeLabel& continue_target);
+
+ // This method is called when visiting continue statements in the AST.
+ // Inserts a jump to a unbound label that is patched when the corresponding
+ // SetContinueTarget is called.
+ void Continue() { EmitJump(&continue_sites_); }
+ void ContinueIfTrue() { EmitJumpIfTrue(&continue_sites_); }
+ void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_sites_); }
+ void ContinueIfNull() { EmitJumpIfNull(&continue_sites_); }
+
+ private:
+ // Unbound labels that identify jumps for continue statements in the code.
+ ZoneVector<BytecodeLabel> continue_sites_;
+};
+
+// A class to help with co-ordinating break statements with their switch.
+// TODO(oth): add support for TF branch/merge info.
+class SwitchBuilder final : public BreakableControlFlowBuilder {
+ public:
+ explicit SwitchBuilder(BytecodeArrayBuilder* builder, int number_of_cases)
+ : BreakableControlFlowBuilder(builder),
+ case_sites_(builder->zone()) {
+ case_sites_.resize(number_of_cases);
+ }
+ ~SwitchBuilder();
+
+ // This method should be called by the SwitchBuilder owner when the case
+ // statement with |index| is emitted to update the case jump site.
+ void SetCaseTarget(int index);
+
+ // This method is called when visiting case comparison operation for |index|.
+ // Inserts a JumpIfTrue to a unbound label that is patched when the
+ // corresponding SetCaseTarget is called.
+ void Case(int index) { EmitJumpIfTrue(&case_sites_, index); }
+
+ // This method is called when all cases comparisons have been emitted if there
+ // is a default case statement. Inserts a Jump to a unbound label that is
+ // patched when the corresponding SetCaseTarget is called.
+ void DefaultAt(int index) { EmitJump(&case_sites_, index); }
+
+ private:
+ // Unbound labels that identify jumps for case statements in the code.
+ ZoneVector<BytecodeLabel> case_sites_;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_CONTROL_FLOW_BUILDERS_H_
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 2d97fc8ef2..e089a5d475 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -59,21 +59,17 @@ void Interpreter::Initialize() {
bool Interpreter::MakeBytecode(CompilationInfo* info) {
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
-
BytecodeGenerator generator(info->isolate(), info->zone());
info->EnsureFeedbackVector();
Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
if (FLAG_print_bytecode) {
- bytecodes->Print();
- }
-
- DCHECK(shared_info->function_data()->IsUndefined());
- if (!shared_info->function_data()->IsUndefined()) {
- return false;
+ OFStream os(stdout);
+ os << "Function: " << info->GetDebugName().get() << std::endl;
+ bytecodes->Print(os);
+ os << std::flush;
}
- shared_info->set_function_data(*bytecodes);
+ info->SetBytecodeArray(bytecodes);
info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline());
return true;
}
@@ -100,17 +96,14 @@ void Interpreter::DoLdaZero(compiler::InterpreterAssembler* assembler) {
//
// Load an 8-bit integer literal into the accumulator as a Smi.
void Interpreter::DoLdaSmi8(compiler::InterpreterAssembler* assembler) {
- Node* raw_int = __ BytecodeOperandImm8(0);
+ Node* raw_int = __ BytecodeOperandImm(0);
Node* smi_int = __ SmiTag(raw_int);
__ SetAccumulator(smi_int);
__ Dispatch();
}
-// LdaConstant <idx>
-//
-// Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadConstant(compiler::InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
__ SetAccumulator(constant);
@@ -118,6 +111,22 @@ void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
}
+// LdaConstant <idx>
+//
+// Load constant literal at |idx| in the constant pool into the accumulator.
+void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+ DoLoadConstant(assembler);
+}
+
+
+// LdaConstantWide <idx>
+//
+// Load constant literal at |idx| in the constant pool into the accumulator.
+void Interpreter::DoLdaConstantWide(compiler::InterpreterAssembler* assembler) {
+ DoLoadConstant(assembler);
+}
+
+
// LdaUndefined
//
// Load Undefined into the accumulator.
@@ -191,20 +200,279 @@ void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
}
-// LdaGlobal <slot_index>
+void Interpreter::DoLoadGlobal(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ // Get the global object.
+ Node* context = __ GetContext();
+ Node* global = __ LoadContextSlot(context, Context::GLOBAL_OBJECT_INDEX);
+
+ // Load the global via the LoadIC.
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* constant_index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(constant_index);
+ Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = __ CallIC(ic.descriptor(), code_target, global, name, smi_slot,
+ type_feedback_vector);
+ __ SetAccumulator(result);
+
+ __ Dispatch();
+}
+
+
+// LdaGlobalSloppy <name_index> <slot>
//
-// Load the global at |slot_index| into the accumulator.
-void Interpreter::DoLdaGlobal(compiler::InterpreterAssembler* assembler) {
- Node* slot_index = __ BytecodeOperandIdx(0);
- Node* smi_slot_index = __ SmiTag(slot_index);
- Node* result = __ CallRuntime(Runtime::kLoadGlobalViaContext, smi_slot_index);
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalSloppy <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofSloppy <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalInsideTypeofSloppy(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofStrict <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalInsideTypeofStrict(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalInsideTypeofSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalInsideTypeofStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+void Interpreter::DoStoreGlobal(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ // Get the global object.
+ Node* context = __ GetContext();
+ Node* global = __ LoadContextSlot(context, Context::GLOBAL_OBJECT_INDEX);
+
+ // Store the global via the StoreIC.
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* constant_index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(constant_index);
+ Node* value = __ GetAccumulator();
+ Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ __ CallIC(ic.descriptor(), code_target, global, name, value, smi_slot,
+ type_feedback_vector);
+
+ __ Dispatch();
+}
+
+
+// StaGlobalSloppy <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoStaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoStoreGlobal(ic, assembler);
+}
+
+
+// StaGlobalStrict <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoStaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoStoreGlobal(ic, assembler);
+}
+
+
+// StaGlobalSloppyWide <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoStaGlobalSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoStoreGlobal(ic, assembler);
+}
+
+
+// StaGlobalStrictWide <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoStaGlobalStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoStoreGlobal(ic, assembler);
+}
+
+
+// LdaContextSlot <context> <slot_index>
+//
+// Load the object in |slot_index| of |context| into the accumulator.
+void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* result = __ LoadContextSlot(context, slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
-void Interpreter::DoPropertyLoadIC(Callable ic,
- compiler::InterpreterAssembler* assembler) {
+// StaContextSlot <context> <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of |context|.
+void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ __ StoreContextSlot(context, slot_index, value);
+ __ Dispatch();
+}
+
+
+void Interpreter::DoLoadIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* register_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(register_index);
+ Node* constant_index = __ BytecodeOperandIdx(1);
+ Node* name = __ LoadConstantPoolEntry(constant_index);
+ Node* raw_slot = __ BytecodeOperandIdx(2);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
+ type_feedback_vector);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// LoadICSloppy <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICSloppy(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadIC(ic, assembler);
+}
+
+
+// LoadICStrict <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICStrict(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadIC(ic, assembler);
+}
+
+
+// LoadICSloppyWide <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadIC(ic, assembler);
+}
+
+
+// LoadICStrictWide <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadIC(ic, assembler);
+}
+
+
+void Interpreter::DoKeyedLoadIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
@@ -219,30 +487,123 @@ void Interpreter::DoPropertyLoadIC(Callable ic,
}
-// LoadIC <object> <slot>
+// KeyedLoadICSloppy <object> <slot>
//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name
-// in the accumulator.
-void Interpreter::DoLoadIC(compiler::InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- SLOPPY, UNINITIALIZED);
- DoPropertyLoadIC(ic, assembler);
+// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICSloppy(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoKeyedLoadIC(ic, assembler);
+}
+
+
+// KeyedLoadICStrict <object> <slot>
+//
+// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICStrict(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoKeyedLoadIC(ic, assembler);
}
-// KeyedLoadIC <object> <slot>
+// KeyedLoadICSloppyWide <object> <slot>
//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator.
-void Interpreter::DoKeyedLoadIC(compiler::InterpreterAssembler* assembler) {
+// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoPropertyLoadIC(ic, assembler);
+ DoKeyedLoadIC(ic, assembler);
+}
+
+
+// KeyedLoadICStrictWide <object> <slot>
+//
+// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoKeyedLoadIC(ic, assembler);
+}
+
+
+void Interpreter::DoStoreIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* object_reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(object_reg_index);
+ Node* constant_index = __ BytecodeOperandIdx(1);
+ Node* name = __ LoadConstantPoolEntry(constant_index);
+ Node* value = __ GetAccumulator();
+ Node* raw_slot = __ BytecodeOperandIdx(2);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
+ type_feedback_vector);
+ __ Dispatch();
+}
+
+
+// StoreICSloppy <object> <name_index> <slot>
+//
+// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICSloppy(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoStoreIC(ic, assembler);
+}
+
+
+// StoreICStrict <object> <name_index> <slot>
+//
+// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICStrict(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoStoreIC(ic, assembler);
}
-void Interpreter::DoPropertyStoreIC(Callable ic,
- compiler::InterpreterAssembler* assembler) {
+// StoreICSloppyWide <object> <name_index> <slot>
+//
+// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoStoreIC(ic, assembler);
+}
+
+
+// StoreICStrictWide <object> <name_index> <slot>
+//
+// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoStoreIC(ic, assembler);
+}
+
+
+void Interpreter::DoKeyedStoreIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -252,32 +613,80 @@ void Interpreter::DoPropertyStoreIC(Callable ic,
Node* raw_slot = __ BytecodeOperandIdx(2);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Node* result = __ CallIC(ic.descriptor(), code_target, object, name, value,
- smi_slot, type_feedback_vector);
- __ SetAccumulator(result);
+ __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
+ type_feedback_vector);
__ Dispatch();
}
-// StoreIC <object> <name> <slot>
+// KeyedStoreICSloppy <object> <key> <slot>
//
-// Calls the StoreIC at FeedBackVector slot <slot> for <object> and the name
-// <name> with the value in the accumulator.
-void Interpreter::DoStoreIC(compiler::InterpreterAssembler* assembler) {
+// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICSloppy(
+ compiler::InterpreterAssembler* assembler) {
Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoPropertyStoreIC(ic, assembler);
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoKeyedStoreIC(ic, assembler);
+}
+
+
+// KeyedStoreICStore <object> <key> <slot>
+//
+// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICStrict(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoKeyedStoreIC(ic, assembler);
}
-// KeyedStoreIC <object> <key> <slot>
+// KeyedStoreICSloppyWide <object> <key> <slot>
//
-// Calls the KeyStoreIC at FeedBackVector slot <slot> for <object> and the key
-// <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreIC(compiler::InterpreterAssembler* assembler) {
+// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoPropertyStoreIC(ic, assembler);
+ DoKeyedStoreIC(ic, assembler);
+}
+
+
+// KeyedStoreICStoreWide <object> <key> <slot>
+//
+// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoKeyedStoreIC(ic, assembler);
+}
+
+
+// PushContext <context>
+//
+// Pushes the accumulator as the current context, and saves it in <context>
+void Interpreter::DoPushContext(compiler::InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ GetAccumulator();
+ __ SetContext(context);
+ __ StoreRegister(context, reg_index);
+ __ Dispatch();
+}
+
+
+// PopContext <context>
+//
+// Pops the current context and sets <context> as the new context.
+void Interpreter::DoPopContext(compiler::InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ __ SetContext(context);
+ __ Dispatch();
}
@@ -334,10 +743,149 @@ void Interpreter::DoMod(compiler::InterpreterAssembler* assembler) {
}
-// Call <receiver> <arg_count>
+// BitwiseOr <src>
+//
+// BitwiseOr register <src> to accumulator.
+void Interpreter::DoBitwiseOr(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kBitwiseOr, assembler);
+}
+
+
+// BitwiseXor <src>
+//
+// BitwiseXor register <src> to accumulator.
+void Interpreter::DoBitwiseXor(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kBitwiseXor, assembler);
+}
+
+
+// BitwiseAnd <src>
//
-// Call a JS function with receiver and |arg_count| arguments in subsequent
-// registers. The JSfunction or Callable to call is in the accumulator.
+// BitwiseAnd register <src> to accumulator.
+void Interpreter::DoBitwiseAnd(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kBitwiseAnd, assembler);
+}
+
+
+// ShiftLeft <src>
+//
+// Left shifts register <src> by the count specified in the accumulator.
+// Register <src> is converted to an int32 and the accumulator to uint32
+// before the operation. 5 lsb bits from the accumulator are used as count
+// i.e. <src> << (accumulator & 0x1F).
+void Interpreter::DoShiftLeft(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kShiftLeft, assembler);
+}
+
+
+// ShiftRight <src>
+//
+// Right shifts register <src> by the count specified in the accumulator.
+// Result is sign extended. Register <src> is converted to an int32 and the
+// accumulator to uint32 before the operation. 5 lsb bits from the accumulator
+// are used as count i.e. <src> >> (accumulator & 0x1F).
+void Interpreter::DoShiftRight(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kShiftRight, assembler);
+}
+
+
+// ShiftRightLogical <src>
+//
+// Right Shifts register <src> by the count specified in the accumulator.
+// Result is zero-filled. The accumulator and register <src> are converted to
+// uint32 before the operation 5 lsb bits from the accumulator are used as
+// count i.e. <src> << (accumulator & 0x1F).
+void Interpreter::DoShiftRightLogical(
+ compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kShiftRightLogical, assembler);
+}
+
+
+void Interpreter::DoCountOp(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* one = __ NumberConstant(1);
+ Node* result = __ CallRuntime(function_id, value, one);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// Inc
+//
+// Increments value in the accumulator by one.
+void Interpreter::DoInc(compiler::InterpreterAssembler* assembler) {
+ DoCountOp(Runtime::kAdd, assembler);
+}
+
+
+// Dec
+//
+// Decrements value in the accumulator by one.
+void Interpreter::DoDec(compiler::InterpreterAssembler* assembler) {
+ DoCountOp(Runtime::kSubtract, assembler);
+}
+
+
+// LogicalNot
+//
+// Perform logical-not on the accumulator, first casting the
+// accumulator to a boolean value if required.
+void Interpreter::DoLogicalNot(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kInterpreterLogicalNot, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// TypeOf
+//
+// Load the accumulator with the string representating type of the
+// object in the accumulator.
+void Interpreter::DoTypeOf(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kInterpreterTypeOf, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+void Interpreter::DoDelete(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+ Node* key = __ GetAccumulator();
+ Node* result = __ CallRuntime(function_id, object, key);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// DeletePropertyStrict
+//
+// Delete the property specified in the accumulator from the object
+// referenced by the register operand following strict mode semantics.
+void Interpreter::DoDeletePropertyStrict(
+ compiler::InterpreterAssembler* assembler) {
+ DoDelete(Runtime::kDeleteProperty_Strict, assembler);
+}
+
+
+// DeletePropertySloppy
+//
+// Delete the property specified in the accumulator from the object
+// referenced by the register operand following sloppy mode semantics.
+void Interpreter::DoDeletePropertySloppy(
+ compiler::InterpreterAssembler* assembler) {
+ DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
+}
+
+
+// Call <callable> <receiver> <arg_count>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
Node* function_reg = __ BytecodeOperandReg(0);
Node* function = __ LoadRegister(function_reg);
@@ -350,6 +898,65 @@ void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
}
+// CallRuntime <function_id> <first_arg> <arg_count>
+//
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntime(compiler::InterpreterAssembler* assembler) {
+ Node* function_id = __ BytecodeOperandIdx(0);
+ Node* first_arg_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+ Node* result = __ CallRuntime(function_id, first_arg, args_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// CallJSRuntime <context_index> <receiver> <arg_count>
+//
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
+ Node* context_index = __ BytecodeOperandIdx(0);
+ Node* receiver_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(receiver_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+
+ // Get the function to call from the native context.
+ Node* context = __ GetContext();
+ Node* global = __ LoadContextSlot(context, Context::GLOBAL_OBJECT_INDEX);
+ Node* native_context =
+ __ LoadObjectField(global, JSGlobalObject::kNativeContextOffset);
+ Node* function = __ LoadContextSlot(native_context, context_index);
+
+ // Call the function.
+ Node* result = __ CallJS(function, first_arg, args_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// New <constructor> <arg_count>
+//
+// Call operator new with |constructor| and the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+//
+void Interpreter::DoNew(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
+ Node* constructor_reg = __ BytecodeOperandReg(0);
+ Node* constructor = __ LoadRegister(constructor_reg);
+ Node* first_arg_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+ Node* result =
+ __ CallConstruct(constructor, constructor, first_arg, args_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
// TestEqual <src>
//
// Test if the value in the <src> register equals the accumulator.
@@ -442,17 +1049,51 @@ void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
//
// Cast the object referenced by the accumulator to a boolean.
void Interpreter::DoToBoolean(compiler::InterpreterAssembler* assembler) {
- // TODO(oth): The next CL for test operations has interpreter specific
- // runtime calls. This looks like another candidate.
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ToName
+//
+// Cast the object referenced by the accumulator to a name.
+void Interpreter::DoToName(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kToName, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ToNumber
+//
+// Cast the object referenced by the accumulator to a number.
+void Interpreter::DoToNumber(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kToNumber, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ToObject
+//
+// Cast the object referenced by the accumulator to a JSObject.
+void Interpreter::DoToObject(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kToObject, accumulator);
+ __ SetAccumulator(result);
__ Dispatch();
}
// Jump <imm8>
//
-// Jump by number of bytes represented by an immediate operand.
+// Jump by number of bytes represented by the immediate operand |imm8|.
void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
- Node* relative_jump = __ BytecodeOperandImm8(0);
+ Node* relative_jump = __ BytecodeOperandImm(0);
__ Jump(relative_jump);
}
@@ -474,7 +1115,7 @@ void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
// accumulator contains true.
void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm8(0);
+ Node* relative_jump = __ BytecodeOperandImm(0);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
@@ -501,7 +1142,7 @@ void Interpreter::DoJumpIfTrueConstant(
// accumulator contains false.
void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm8(0);
+ Node* relative_jump = __ BytecodeOperandImm(0);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
@@ -522,14 +1163,297 @@ void Interpreter::DoJumpIfFalseConstant(
}
+// JumpIfToBooleanTrue <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is true when the object is cast to boolean.
+void Interpreter::DoJumpIfToBooleanTrue(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* to_boolean_value =
+ __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* true_value = __ BooleanConstant(true);
+ __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+}
+
+
+// JumpIfToBooleanTrueConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// if the object referenced by the accumulator is true when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanTrueConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* to_boolean_value =
+ __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ Node* true_value = __ BooleanConstant(true);
+ __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+}
+
+
+// JumpIfToBooleanFalse <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is false when the object is cast to boolean.
+void Interpreter::DoJumpIfToBooleanFalse(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* to_boolean_value =
+ __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* false_value = __ BooleanConstant(false);
+ __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+}
+
+
+// JumpIfToBooleanFalseConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// if the object referenced by the accumulator is false when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanFalseConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* to_boolean_value =
+ __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ Node* false_value = __ BooleanConstant(false);
+ __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+}
+
+
+// JumpIfNull <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ __ JumpIfWordEqual(accumulator, null_value, relative_jump);
+}
+
+
+// JumpIfNullConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// if the object referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNullConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ __ JumpIfWordEqual(accumulator, null_value, relative_jump);
+}
+
+
+// JumpIfUndefined <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* undefined_value =
+ __ HeapConstant(isolate_->factory()->undefined_value());
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
+}
+
+
+// JumpIfUndefinedConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// if the object referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefinedConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* undefined_value =
+ __ HeapConstant(isolate_->factory()->undefined_value());
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
+}
+
+
+// CreateRegExpLiteral <idx> <flags_reg>
+//
+// Creates a regular expression literal for literal index <idx> with flags held
+// in <flags_reg> and the pattern in the accumulator.
+void Interpreter::DoCreateRegExpLiteral(
+ compiler::InterpreterAssembler* assembler) {
+ Node* pattern = __ GetAccumulator();
+ Node* literal_index_raw = __ BytecodeOperandIdx(0);
+ Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* flags_reg = __ BytecodeOperandReg(1);
+ Node* flags = __ LoadRegister(flags_reg);
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* literals_array =
+ __ LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* result = __ CallRuntime(Runtime::kMaterializeRegExpLiteral,
+ literals_array, literal_index, pattern, flags);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler) {
+ Node* constant_elements = __ GetAccumulator();
+ Node* literal_index_raw = __ BytecodeOperandIdx(0);
+ Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* flags_raw = __ BytecodeOperandImm(1);
+ Node* flags = __ SmiTag(flags_raw);
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* literals_array =
+ __ LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* result = __ CallRuntime(function_id, literals_array, literal_index,
+ constant_elements, flags);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// CreateArrayLiteral <idx> <flags>
+//
+// Creates an array literal for literal index <idx> with flags <flags> and
+// constant elements in the accumulator.
+void Interpreter::DoCreateArrayLiteral(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+}
+
+
+// CreateObjectLiteral <idx> <flags>
+//
+// Creates an object literal for literal index <idx> with flags <flags> and
+// constant elements in the accumulator.
+void Interpreter::DoCreateObjectLiteral(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+}
+
+
+// CreateClosure <tenured>
+//
+// Creates a new closure for SharedFunctionInfo in the accumulator with the
+// PretenureFlag <tenured>.
+void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
+ // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
+ // calling into the runtime.
+ Node* shared = __ GetAccumulator();
+ Node* tenured_raw = __ BytecodeOperandImm(0);
+ Node* tenured = __ SmiTag(tenured_raw);
+ Node* result =
+ __ CallRuntime(Runtime::kInterpreterNewClosure, shared, tenured);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// CreateMappedArguments
+//
+// Creates a new mapped arguments object.
+void Interpreter::DoCreateMappedArguments(
+ compiler::InterpreterAssembler* assembler) {
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, closure);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// CreateUnmappedArguments
+//
+// Creates a new unmapped arguments object.
+void Interpreter::DoCreateUnmappedArguments(
+ compiler::InterpreterAssembler* assembler) {
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* result = __ CallRuntime(Runtime::kNewStrictArguments_Generic, closure);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// Throw
+//
+// Throws the exception in the accumulator.
+void Interpreter::DoThrow(compiler::InterpreterAssembler* assembler) {
+ Node* exception = __ GetAccumulator();
+ __ CallRuntime(Runtime::kThrow, exception);
+ // We shouldn't ever return from a throw.
+ __ Abort(kUnexpectedReturnFromThrow);
+}
+
+
// Return
//
-// Return the value in register 0.
+// Return the value in the accumulator.
void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
__ Return();
}
+// ForInPrepare <receiver>
+//
+// Returns state for for..in loop execution based on the |receiver| and
+// the property names in the accumulator.
+void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
+ Node* receiver_reg = __ BytecodeOperandReg(0);
+ Node* receiver = __ LoadRegister(receiver_reg);
+ Node* property_names = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, receiver,
+ property_names);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ForInNext <for_in_state> <index>
+//
+// Returns the next key in a for..in loop. The state associated with the
+// iteration is contained in |for_in_state| and |index| is the current
+// zero-based iteration count.
+void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
+ Node* for_in_state_reg = __ BytecodeOperandReg(0);
+ Node* for_in_state = __ LoadRegister(for_in_state_reg);
+ Node* receiver = __ LoadFixedArrayElement(for_in_state, 0);
+ Node* cache_array = __ LoadFixedArrayElement(for_in_state, 1);
+ Node* cache_type = __ LoadFixedArrayElement(for_in_state, 2);
+ Node* index_reg = __ BytecodeOperandReg(1);
+ Node* index = __ LoadRegister(index_reg);
+ Node* result = __ CallRuntime(Runtime::kForInNext, receiver, cache_array,
+ cache_type, index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ForInDone <for_in_state>
+//
+// Returns the next key in a for..in loop. The accumulator contains the current
+// zero-based iteration count and |for_in_state| is the state returned by an
+// earlier invocation of ForInPrepare.
+void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
+ Node* index = __ GetAccumulator();
+ Node* for_in_state_reg = __ BytecodeOperandReg(0);
+ Node* for_in_state = __ LoadRegister(for_in_state_reg);
+ Node* cache_length = __ LoadFixedArrayElement(for_in_state, 3);
+ Node* result = __ CallRuntime(Runtime::kForInDone, index, cache_length);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index c32b6831d0..560aba19d7 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -54,17 +54,43 @@ class Interpreter {
void DoBinaryOp(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
+ // Generates code to perform the count operations via |function_id|.
+ void DoCountOp(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
+
// Generates code to perform the comparison operation associated with
// |compare_op|.
void DoCompareOp(Token::Value compare_op,
compiler::InterpreterAssembler* assembler);
- // Generates code to perform a property load via |ic|.
- void DoPropertyLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+ // Generates code to load a constant from the constant pool.
+ void DoLoadConstant(compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a global load via |ic|.
+ void DoLoadGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a global store via |ic|.
+ void DoStoreGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a named property load via |ic|.
+ void DoLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a keyed property load via |ic|.
+ void DoKeyedLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a namedproperty store via |ic|.
+ void DoStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a keyed property store via |ic|.
+ void DoKeyedStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code ro create a literal via |function_id|.
+ void DoCreateLiteral(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
- // Generates code to perform a property store via |ic|.
- void DoPropertyStoreIC(Callable ic,
- compiler::InterpreterAssembler* assembler);
+ // Generates code to perform delete via function_id.
+ void DoDelete(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index c281c24639..89f285b2ee 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -72,8 +72,8 @@ bool Isolate::is_catchable_by_javascript(Object* exception) {
}
-Handle<GlobalObject> Isolate::global_object() {
- return Handle<GlobalObject>(context()->global_object());
+Handle<JSGlobalObject> Isolate::global_object() {
+ return Handle<JSGlobalObject>(context()->global_object());
}
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 481cb42f05..bb0cdf44bb 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -18,14 +18,13 @@
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compilation-statistics.h"
+#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
-#include "src/hydrogen.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
-#include "src/lithium-allocator.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/profiler/cpu-profiler.h"
@@ -317,9 +316,8 @@ static bool IsVisibleInStackTrace(JSFunction* fun,
// exposed, in which case the native flag is set.
// The --builtins-in-stack-traces command line flag allows including
// internal call sites in the stack trace for debugging purposes.
- if (!FLAG_builtins_in_stack_traces) {
- if (receiver->IsJSBuiltinsObject()) return false;
- if (fun->IsBuiltin()) return fun->shared()->native();
+ if (!FLAG_builtins_in_stack_traces && fun->shared()->IsBuiltin()) {
+ return fun->shared()->native();
}
return true;
}
@@ -782,12 +780,12 @@ bool Isolate::IsInternallyUsedPropertyName(Object* name) {
}
-bool Isolate::MayAccess(Handle<JSObject> receiver) {
+bool Isolate::MayAccess(Handle<Context> accessing_context,
+ Handle<JSObject> receiver) {
DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
- DCHECK(context());
{
DisallowHeapAllocation no_gc;
@@ -801,7 +799,8 @@ bool Isolate::MayAccess(Handle<JSObject> receiver) {
// Get the native context of current top context.
// avoid using Isolate::native_context() because it uses Handle.
- Context* native_context = context()->global_object()->native_context();
+ Context* native_context =
+ accessing_context->global_object()->native_context();
if (receiver_context == native_context) return true;
if (Context::cast(receiver_context)->security_token() ==
@@ -812,23 +811,34 @@ bool Isolate::MayAccess(Handle<JSObject> receiver) {
HandleScope scope(this);
Handle<Object> data;
- v8::NamedSecurityCallback callback;
+ v8::AccessCheckCallback callback = nullptr;
+ v8::NamedSecurityCallback named_callback = nullptr;
{ DisallowHeapAllocation no_gc;
AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
if (!access_check_info) return false;
- Object* fun_obj = access_check_info->named_callback();
- callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
- if (!callback) return false;
- data = handle(access_check_info->data(), this);
+ Object* fun_obj = access_check_info->callback();
+ callback = v8::ToCData<v8::AccessCheckCallback>(fun_obj);
+ if (!callback) {
+ fun_obj = access_check_info->named_callback();
+ named_callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
+ if (!named_callback) return false;
+ data = handle(access_check_info->data(), this);
+ }
}
LOG(this, ApiSecurityCheck());
- // Leaving JavaScript.
- VMState<EXTERNAL> state(this);
- Handle<Object> key = factory()->undefined_value();
- return callback(v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(key),
- v8::ACCESS_HAS, v8::Utils::ToLocal(data));
+ {
+ // Leaving JavaScript.
+ VMState<EXTERNAL> state(this);
+ if (callback) {
+ return callback(v8::Utils::ToLocal(accessing_context),
+ v8::Utils::ToLocal(receiver));
+ }
+ Handle<Object> key = factory()->undefined_value();
+ return named_callback(v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(key),
+ v8::ACCESS_HAS, v8::Utils::ToLocal(data));
+ }
}
@@ -1331,7 +1341,7 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
for (int i = 1; i < elements_limit; i += 4) {
Handle<JSFunction> fun =
handle(JSFunction::cast(elements->get(i + 1)), this);
- if (!fun->IsSubjectToDebugging()) continue;
+ if (!fun->shared()->IsSubjectToDebugging()) continue;
Object* script = fun->shared()->script();
if (script->IsScript() &&
@@ -1778,8 +1788,10 @@ Isolate::Isolate(bool enable_serializer)
deferred_handles_head_(NULL),
optimizing_compile_dispatcher_(NULL),
stress_deopt_count_(0),
- vector_store_virtual_register_(NULL),
+ virtual_handler_register_(NULL),
+ virtual_slot_register_(NULL),
next_optimization_id_(0),
+ js_calls_from_api_counter_(0),
#if TRACE_MAPS
next_unique_sfi_id_(0),
#endif
@@ -1862,8 +1874,6 @@ void Isolate::ClearSerializerData() {
external_reference_table_ = NULL;
delete external_reference_map_;
external_reference_map_ = NULL;
- delete root_index_map_;
- root_index_map_ = NULL;
}
@@ -1923,6 +1933,9 @@ void Isolate::Deinit() {
delete cpu_profiler_;
cpu_profiler_ = NULL;
+ delete root_index_map_;
+ root_index_map_ = NULL;
+
ClearSerializerData();
}
@@ -2571,6 +2584,7 @@ Handle<JSObject> Isolate::GetSymbolRegistry() {
SetUpSubregistry(registry, map, "for");
SetUpSubregistry(registry, map, "for_api");
SetUpSubregistry(registry, map, "keyFor");
+ SetUpSubregistry(registry, map, "private_api");
}
return Handle<JSObject>::cast(factory()->symbol_registry());
}
@@ -2668,9 +2682,9 @@ void Isolate::RunMicrotasks() {
SaveContext save(this);
set_context(microtask_function->context()->native_context());
MaybeHandle<Object> maybe_exception;
- MaybeHandle<Object> result =
- Execution::TryCall(microtask_function, factory()->undefined_value(),
- 0, NULL, &maybe_exception);
+ MaybeHandle<Object> result = Execution::TryCall(
+ this, microtask_function, factory()->undefined_value(), 0, NULL,
+ &maybe_exception);
// If execution is terminating, just bail out.
Handle<Object> exception;
if (result.is_null() && maybe_exception.is_null()) {
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index e77dcd705e..79988f8f27 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -617,7 +617,7 @@ class Isolate {
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
- inline Handle<GlobalObject> global_object();
+ inline Handle<JSGlobalObject> global_object();
// Returns the global proxy object of the current context.
JSObject* global_proxy() {
@@ -679,11 +679,11 @@ class Isolate {
Handle<JSArray> GetDetailedFromSimpleStackTrace(
Handle<JSObject> error_object);
- // Returns if the top context may access the given global object. If
+ // Returns if the given context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
// set.
+ bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
- bool MayAccess(Handle<JSObject> receiver);
bool IsInternallyUsedPropertyName(Handle<Object> name);
bool IsInternallyUsedPropertyName(Object* name);
@@ -1027,10 +1027,12 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
- void* vector_store_virtual_register_address() {
- return &vector_store_virtual_register_;
+ void* virtual_handler_register_address() {
+ return &virtual_handler_register_;
}
+ void* virtual_slot_register_address() { return &virtual_slot_register_; }
+
base::RandomNumberGenerator* random_number_generator();
// Given an address occupied by a live code object, return that object.
@@ -1044,6 +1046,12 @@ class Isolate {
return id;
}
+ void IncrementJsCallsFromApiCounter() { ++js_calls_from_api_counter_; }
+
+ unsigned int js_calls_from_api_counter() {
+ return js_calls_from_api_counter_;
+ }
+
// Get (and lazily initialize) the registry for per-isolate symbols.
Handle<JSObject> GetSymbolRegistry();
@@ -1306,10 +1314,14 @@ class Isolate {
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
- Address vector_store_virtual_register_;
+ Address virtual_handler_register_;
+ Address virtual_slot_register_;
int next_optimization_id_;
+ // Counts javascript calls from the API. Wraps around on overflow.
+ unsigned int js_calls_from_api_counter_;
+
#if TRACE_MAPS
int next_unique_sfi_id_;
#endif
@@ -1556,6 +1568,7 @@ class CodeTracer final : public Malloced {
int scope_depth_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ISOLATE_H_
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/js/array-iterator.js
index bf17a0ac8c..f0754ad093 100644
--- a/deps/v8/src/array-iterator.js
+++ b/deps/v8/src/js/array-iterator.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $arrayValues;
-
(function(global, utils) {
"use strict";
@@ -20,7 +18,9 @@ var arrayIteratorNextIndexSymbol =
var arrayIteratorObjectSymbol =
utils.ImportNow("array_iterator_object_symbol");
var GlobalArray = global.Array;
+var IteratorPrototype = utils.ImportNow("IteratorPrototype");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
macro TYPED_ARRAYS(FUNCTION)
@@ -41,6 +41,10 @@ endmacro
TYPED_ARRAYS(COPY_FROM_GLOBAL)
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+})
+
// -----------------------------------------------------------------------
function ArrayIterator() {}
@@ -123,7 +127,7 @@ function ArrayKeys() {
}
-%FunctionSetPrototype(ArrayIterator, {__proto__: $iteratorPrototype});
+%FunctionSetPrototype(ArrayIterator, {__proto__: IteratorPrototype});
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
@@ -161,7 +165,9 @@ TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
// -------------------------------------------------------------------
// Exports
-$arrayValues = ArrayValues;
+utils.Export(function(to) {
+ to.ArrayValues = ArrayValues;
+});
%InstallToContext(["array_values_iterator", ArrayValues]);
diff --git a/deps/v8/src/array.js b/deps/v8/src/js/array.js
index bf04bb7e7b..294e474be6 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/js/array.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, utils) {
+(function(global, utils, extrasUtils) {
"use strict";
@@ -11,28 +11,48 @@
// -------------------------------------------------------------------
// Imports
+var AddIndexedProperty;
var Delete;
+var FLAG_harmony_tolength;
+var GetIterator;
+var GetMethod;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
-var MathMin;
+var MakeTypeError;
+var MaxSimple;
+var MinSimple;
+var ObjectDefineProperty;
var ObjectHasOwnProperty;
var ObjectIsFrozen;
var ObjectIsSealed;
var ObjectToString;
-var ToNumber;
-var ToString;
+var ObserveBeginPerformSplice;
+var ObserveEndPerformSplice;
+var ObserveEnqueueSpliceRecord;
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
+ AddIndexedProperty = from.AddIndexedProperty;
Delete = from.Delete;
- MathMin = from.MathMin;
+ GetIterator = from.GetIterator;
+ GetMethod = from.GetMethod;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
+ ObjectDefineProperty = from.ObjectDefineProperty;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
ObjectIsFrozen = from.ObjectIsFrozen;
ObjectIsSealed = from.ObjectIsSealed;
ObjectToString = from.ObjectToString;
- ToNumber = from.ToNumber;
- ToString = from.ToString;
+ ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
+ ObserveEndPerformSplice = from.ObserveEndPerformSplice;
+ ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
+});
+
+utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_tolength = from.FLAG_harmony_tolength;
});
// -------------------------------------------------------------------
@@ -221,11 +241,7 @@ function ConvertToLocaleString(e) {
if (IS_NULL_OR_UNDEFINED(e)) {
return '';
} else {
- // According to ES5, section 15.4.4.3, the toLocaleString conversion
- // must throw a TypeError if ToObject(e).toLocaleString isn't
- // callable.
- var e_obj = TO_OBJECT(e);
- return TO_STRING(e_obj.toLocaleString());
+ return TO_STRING(e.toLocaleString());
}
}
@@ -240,7 +256,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = start_i; i < limit; ++i) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
- %AddElement(deleted_elements, i - start_i, current);
+ AddIndexedProperty(deleted_elements, i - start_i, current);
}
}
} else {
@@ -251,7 +267,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
if (key >= start_i) {
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
- %AddElement(deleted_elements, key - start_i, current);
+ AddIndexedProperty(deleted_elements, key - start_i, current);
}
}
}
@@ -268,7 +284,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
var new_array = new InternalArray(
// Clamp array length to 2^32-1 to avoid early RangeError.
- MathMin(len - del_count + num_additional_args, 0xffffffff));
+ MinSimple(len - del_count + num_additional_args, 0xffffffff));
var big_indices;
var indices = %GetArrayKeys(array, len);
if (IS_NUMBER(indices)) {
@@ -331,9 +347,9 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
var index = start_i + i;
if (HAS_INDEX(array, index, is_array)) {
var current = array[index];
- // The spec requires [[DefineOwnProperty]] here, %AddElement is close
- // enough (in that it ignores the prototype).
- %AddElement(deleted_elements, i, current);
+ // The spec requires [[DefineOwnProperty]] here, AddIndexedProperty is
+ // close enough (in that it ignores the prototype).
+ AddIndexedProperty(deleted_elements, i, current);
}
}
}
@@ -389,7 +405,7 @@ function ArrayToString() {
func = array.join;
}
if (!IS_CALLABLE(func)) {
- return %_CallFunction(array, ObjectToString);
+ return %_Call(ObjectToString, array);
}
return %_Call(func, array);
}
@@ -445,12 +461,12 @@ function ObservedArrayPop(n) {
var value = this[n];
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
delete this[n];
this.length = n;
} finally {
- $observeEndPerformSplice(this);
- $observeEnqueueSpliceRecord(this, n, [value], 0);
+ ObserveEndPerformSplice(this);
+ ObserveEnqueueSpliceRecord(this, n, [value], 0);
}
return value;
@@ -485,15 +501,15 @@ function ObservedArrayPush() {
var m = %_ArgumentsLength();
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
}
var new_length = n + m;
this.length = new_length;
} finally {
- $observeEndPerformSplice(this);
- $observeEnqueueSpliceRecord(this, n, [], m);
+ ObserveEndPerformSplice(this);
+ ObserveEnqueueSpliceRecord(this, n, [], m);
}
return new_length;
@@ -512,6 +528,15 @@ function ArrayPush() {
var n = TO_LENGTH_OR_UINT32(array.length);
var m = %_ArgumentsLength();
+ // It appears that there is no enforced, absolute limit on the number of
+ // arguments, but it would surely blow the stack to use 2**30 or more.
+ // To avoid integer overflow, do the comparison to the max safe integer
+ // after subtracting 2**30 from both sides. (2**31 would seem like a
+ // natural value, but it is negative in JS, and 2**32 is 1.)
+ if (m > (1 << 30) || (n - (1 << 30)) + m > kMaxSafeInteger - (1 << 30)) {
+ throw MakeTypeError(kPushPastSafeLength, m, n);
+ }
+
for (var i = 0; i < m; i++) {
array[i+n] = %_Arguments(i);
}
@@ -625,12 +650,12 @@ function ObservedArrayShift(len) {
var first = this[0];
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
SimpleMove(this, 0, 1, len, 0);
this.length = len - 1;
} finally {
- $observeEndPerformSplice(this);
- $observeEnqueueSpliceRecord(this, 0, [first], 0);
+ ObserveEndPerformSplice(this);
+ ObserveEnqueueSpliceRecord(this, 0, [first], 0);
}
return first;
@@ -672,7 +697,7 @@ function ObservedArrayUnshift() {
var num_arguments = %_ArgumentsLength();
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
for (var i = 0; i < num_arguments; i++) {
this[i] = %_Arguments(i);
@@ -680,8 +705,8 @@ function ObservedArrayUnshift() {
var new_length = len + num_arguments;
this.length = new_length;
} finally {
- $observeEndPerformSplice(this);
- $observeEnqueueSpliceRecord(this, 0, [], num_arguments);
+ ObserveEndPerformSplice(this);
+ ObserveEnqueueSpliceRecord(this, 0, [], num_arguments);
}
return new_length;
@@ -799,7 +824,7 @@ function ObservedArraySplice(start, delete_count) {
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
SimpleSlice(this, start_i, del_count, len, deleted_elements);
SimpleMove(this, start_i, del_count, len, num_elements_to_add);
@@ -815,12 +840,12 @@ function ObservedArraySplice(start, delete_count) {
this.length = len - del_count + num_elements_to_add;
} finally {
- $observeEndPerformSplice(this);
+ ObserveEndPerformSplice(this);
if (deleted_elements.length || num_elements_to_add) {
- $observeEnqueueSpliceRecord(this,
- start_i,
- deleted_elements.slice(),
- num_elements_to_add);
+ ObserveEnqueueSpliceRecord(this,
+ start_i,
+ deleted_elements.slice(),
+ num_elements_to_add);
}
}
@@ -892,8 +917,8 @@ function InnerArraySort(array, length, comparefn) {
if (%_IsSmi(x) && %_IsSmi(y)) {
return %SmiLexicographicCompare(x, y);
}
- x = ToString(x);
- y = ToString(y);
+ x = TO_STRING(x);
+ y = TO_STRING(y);
if (x == y) return 0;
else return x < y ? -1 : 1;
};
@@ -1187,7 +1212,7 @@ function InnerArrayFilter(f, receiver, array, length) {
var accumulator = new InternalArray();
var accumulator_length = 0;
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
+ var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
@@ -1203,6 +1228,7 @@ function InnerArrayFilter(f, receiver, array, length) {
return result;
}
+
function ArrayFilter(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
@@ -1213,11 +1239,12 @@ function ArrayFilter(f, receiver) {
return InnerArrayFilter(f, receiver, array, length);
}
+
function InnerArrayForEach(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
+ var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
@@ -1228,6 +1255,7 @@ function InnerArrayForEach(f, receiver, array, length) {
}
}
+
function ArrayForEach(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
@@ -1243,7 +1271,7 @@ function InnerArraySome(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
+ var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
@@ -1273,7 +1301,7 @@ function InnerArrayEvery(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
+ var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
@@ -1301,7 +1329,7 @@ function InnerArrayMap(f, receiver, array, length) {
var accumulator = new InternalArray(length);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
+ var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
@@ -1469,7 +1497,7 @@ function InnerArrayReduce(callback, current, array, length, argumentsLength) {
throw MakeTypeError(kReduceNoInitial);
}
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
+ var stepping = DEBUG_IS_STEPPING(callback);
for (; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
@@ -1512,7 +1540,7 @@ function InnerArrayReduceRight(callback, current, array, length,
throw MakeTypeError(kReduceNoInitial);
}
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
+ var stepping = DEBUG_IS_STEPPING(callback);
for (; i >= 0; i--) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
@@ -1536,12 +1564,254 @@ function ArrayReduceRight(callback, current) {
%_ArgumentsLength());
}
+
+function InnerArrayCopyWithin(target, start, end, array, length) {
+ target = TO_INTEGER(target);
+ var to;
+ if (target < 0) {
+ to = MaxSimple(length + target, 0);
+ } else {
+ to = MinSimple(target, length);
+ }
+
+ start = TO_INTEGER(start);
+ var from;
+ if (start < 0) {
+ from = MaxSimple(length + start, 0);
+ } else {
+ from = MinSimple(start, length);
+ }
+
+ end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
+ var final;
+ if (end < 0) {
+ final = MaxSimple(length + end, 0);
+ } else {
+ final = MinSimple(end, length);
+ }
+
+ var count = MinSimple(final - from, length - to);
+ var direction = 1;
+ if (from < to && to < (from + count)) {
+ direction = -1;
+ from = from + count - 1;
+ to = to + count - 1;
+ }
+
+ while (count > 0) {
+ if (from in array) {
+ array[to] = array[from];
+ } else {
+ delete array[to];
+ }
+ from = from + direction;
+ to = to + direction;
+ count--;
+ }
+
+ return array;
+}
+
+
+// ES6 draft 03-17-15, section 22.1.3.3
+function ArrayCopyWithin(target, start, end) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
+
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH(array.length);
+
+ return InnerArrayCopyWithin(target, start, end, array, length);
+}
+
+
+function InnerArrayFind(predicate, thisArg, array, length) {
+ if (!IS_CALLABLE(predicate)) {
+ throw MakeTypeError(kCalledNonCallable, predicate);
+ }
+
+ for (var i = 0; i < length; i++) {
+ var element = array[i];
+ if (%_Call(predicate, thisArg, element, i, array)) {
+ return element;
+ }
+ }
+
+ return;
+}
+
+
+// ES6 draft 07-15-13, section 15.4.3.23
+function ArrayFind(predicate, thisArg) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
+
+ var array = TO_OBJECT(this);
+ var length = TO_INTEGER(array.length);
+
+ return InnerArrayFind(predicate, thisArg, array, length);
+}
+
+
+function InnerArrayFindIndex(predicate, thisArg, array, length) {
+ if (!IS_CALLABLE(predicate)) {
+ throw MakeTypeError(kCalledNonCallable, predicate);
+ }
+
+ for (var i = 0; i < length; i++) {
+ var element = array[i];
+ if (%_Call(predicate, thisArg, element, i, array)) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+
+// ES6 draft 07-15-13, section 15.4.3.24
+function ArrayFindIndex(predicate, thisArg) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
+
+ var array = TO_OBJECT(this);
+ var length = TO_INTEGER(array.length);
+
+ return InnerArrayFindIndex(predicate, thisArg, array, length);
+}
+
+
+// ES6, draft 04-05-14, section 22.1.3.6
+function InnerArrayFill(value, start, end, array, length) {
+ var i = IS_UNDEFINED(start) ? 0 : TO_INTEGER(start);
+ var end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
+
+ if (i < 0) {
+ i += length;
+ if (i < 0) i = 0;
+ } else {
+ if (i > length) i = length;
+ }
+
+ if (end < 0) {
+ end += length;
+ if (end < 0) end = 0;
+ } else {
+ if (end > length) end = length;
+ }
+
+ if ((end - i) > 0 && ObjectIsFrozen(array)) {
+ throw MakeTypeError(kArrayFunctionsOnFrozen);
+ }
+
+ for (; i < end; i++)
+ array[i] = value;
+ return array;
+}
+
+
+// ES6, draft 04-05-14, section 22.1.3.6
+function ArrayFill(value, start, end) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
+
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH_OR_UINT32(array.length);
+
+ return InnerArrayFill(value, start, end, array, length);
+}
+
+
// ES5, 15.4.3.2
function ArrayIsArray(obj) {
return IS_ARRAY(obj);
}
+function AddArrayElement(constructor, array, i, value) {
+ if (constructor === GlobalArray) {
+ AddIndexedProperty(array, i, value);
+ } else {
+ ObjectDefineProperty(array, i, {
+ value: value, writable: true, configurable: true, enumerable: true
+ });
+ }
+}
+
+
+// ES6, draft 10-14-14, section 22.1.2.1
+function ArrayFrom(arrayLike, mapfn, receiver) {
+ var items = TO_OBJECT(arrayLike);
+ var mapping = !IS_UNDEFINED(mapfn);
+
+ if (mapping) {
+ if (!IS_CALLABLE(mapfn)) {
+ throw MakeTypeError(kCalledNonCallable, mapfn);
+ }
+ }
+
+ var iterable = GetMethod(items, iteratorSymbol);
+ var k;
+ var result;
+ var mappedValue;
+ var nextValue;
+
+ if (!IS_UNDEFINED(iterable)) {
+ result = %IsConstructor(this) ? new this() : [];
+
+ var iterator = GetIterator(items, iterable);
+
+ k = 0;
+ while (true) {
+ var next = iterator.next();
+
+ if (!IS_OBJECT(next)) {
+ throw MakeTypeError(kIteratorResultNotAnObject, next);
+ }
+
+ if (next.done) {
+ result.length = k;
+ return result;
+ }
+
+ nextValue = next.value;
+ if (mapping) {
+ mappedValue = %_Call(mapfn, receiver, nextValue, k);
+ } else {
+ mappedValue = nextValue;
+ }
+ AddArrayElement(this, result, k, mappedValue);
+ k++;
+ }
+ } else {
+ var len = TO_LENGTH(items.length);
+ result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
+
+ for (k = 0; k < len; ++k) {
+ nextValue = items[k];
+ if (mapping) {
+ mappedValue = %_Call(mapfn, receiver, nextValue, k);
+ } else {
+ mappedValue = nextValue;
+ }
+ AddArrayElement(this, result, k, mappedValue);
+ }
+
+ result.length = k;
+ return result;
+ }
+}
+
+
+// ES6, draft 05-22-14, section 22.1.2.3
+function ArrayOf() {
+ var length = %_ArgumentsLength();
+ var constructor = this;
+ // TODO: Implement IsConstructor (ES6 section 7.2.5)
+ var array = %IsConstructor(constructor) ? new constructor(length) : [];
+ for (var i = 0; i < length; i++) {
+ AddArrayElement(constructor, array, i, %_Arguments(i));
+ }
+ array.length = length;
+ return array;
+}
+
// -------------------------------------------------------------------
// Set up non-enumerable constructor property on the Array.prototype
@@ -1563,9 +1833,13 @@ var unscopables = {
%AddNamedProperty(GlobalArray.prototype, unscopablesSymbol, unscopables,
DONT_ENUM | READ_ONLY);
+%FunctionSetLength(ArrayFrom, 1);
+
// Set up non-enumerable functions on the Array object.
utils.InstallFunctions(GlobalArray, DONT_ENUM, [
- "isArray", ArrayIsArray
+ "isArray", ArrayIsArray,
+ "from", ArrayFrom,
+ "of", ArrayOf
]);
var specialFunctions = %SpecialArrayFunctions();
@@ -1605,7 +1879,11 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"indexOf", getFunction("indexOf", ArrayIndexOf, 1),
"lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
"reduce", getFunction("reduce", ArrayReduce, 1),
- "reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
+ "reduceRight", getFunction("reduceRight", ArrayReduceRight, 1),
+ "copyWithin", getFunction("copyWithin", ArrayCopyWithin, 2),
+ "find", getFunction("find", ArrayFind, 1),
+ "findIndex", getFunction("findIndex", ArrayFindIndex, 1),
+ "fill", getFunction("fill", ArrayFill, 1)
]);
%FinishArrayPrototypeSetup(GlobalArray.prototype);
@@ -1630,16 +1908,32 @@ utils.SetUpLockedPrototype(InternalPackedArray, GlobalArray(), [
"shift", getFunction("shift", ArrayShift)
]);
+// V8 extras get a separate copy of InternalPackedArray. We give them the basic
+// manipulation methods.
+utils.SetUpLockedPrototype(extrasUtils.InternalPackedArray, GlobalArray(), [
+ "push", getFunction("push", ArrayPush),
+ "pop", getFunction("pop", ArrayPop),
+ "shift", getFunction("shift", ArrayShift),
+ "unshift", getFunction("unshift", ArrayUnshift),
+ "splice", getFunction("splice", ArraySplice),
+ "slice", getFunction("slice", ArraySlice)
+]);
+
// -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
+ to.ArrayFrom = ArrayFrom;
to.ArrayIndexOf = ArrayIndexOf;
to.ArrayJoin = ArrayJoin;
to.ArrayPush = ArrayPush;
to.ArrayToString = ArrayToString;
+ to.InnerArrayCopyWithin = InnerArrayCopyWithin;
to.InnerArrayEvery = InnerArrayEvery;
+ to.InnerArrayFill = InnerArrayFill;
to.InnerArrayFilter = InnerArrayFilter;
+ to.InnerArrayFind = InnerArrayFind;
+ to.InnerArrayFindIndex = InnerArrayFindIndex;
to.InnerArrayForEach = InnerArrayForEach;
to.InnerArrayIndexOf = InnerArrayIndexOf;
to.InnerArrayJoin = InnerArrayJoin;
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/js/arraybuffer.js
index 0db0c2bf04..1159488160 100644
--- a/deps/v8/src/arraybuffer.js
+++ b/deps/v8/src/js/arraybuffer.js
@@ -13,22 +13,24 @@
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalObject = global.Object;
-var MathMax;
-var MathMin;
-var ToNumber;
+var MakeTypeError;
+var MaxSimple;
+var MinSimple;
+var ToPositiveInteger;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- MathMax = from.MathMax;
- MathMin = from.MathMin;
- ToNumber = from.ToNumber;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
+ ToPositiveInteger = from.ToPositiveInteger;
});
// -------------------------------------------------------------------
function ArrayBufferConstructor(length) { // length = 1
if (%_IsConstructCall()) {
- var byteLength = $toPositiveInteger(length, kInvalidArrayBufferLength);
+ var byteLength = ToPositiveInteger(length, kInvalidArrayBufferLength);
%ArrayBufferInitialize(this, byteLength, kNotShared);
} else {
throw MakeTypeError(kConstructorNotFunction, "ArrayBuffer");
@@ -57,16 +59,16 @@ function ArrayBufferSlice(start, end) {
var first;
var byte_length = %_ArrayBufferGetByteLength(this);
if (relativeStart < 0) {
- first = MathMax(byte_length + relativeStart, 0);
+ first = MaxSimple(byte_length + relativeStart, 0);
} else {
- first = MathMin(relativeStart, byte_length);
+ first = MinSimple(relativeStart, byte_length);
}
var relativeEnd = IS_UNDEFINED(end) ? byte_length : end;
var fin;
if (relativeEnd < 0) {
- fin = MathMax(byte_length + relativeEnd, 0);
+ fin = MaxSimple(byte_length + relativeEnd, 0);
} else {
- fin = MathMin(relativeEnd, byte_length);
+ fin = MinSimple(relativeEnd, byte_length);
}
if (fin < first) {
diff --git a/deps/v8/src/code-stubs.js b/deps/v8/src/js/code-stubs.js
index ab06f6c63b..7cb10d726a 100644
--- a/deps/v8/src/code-stubs.js
+++ b/deps/v8/src/js/code-stubs.js
@@ -35,7 +35,7 @@ code_stubs.MathFloorStub = function MathFloorStub(call_conv, minor_key) {
// |tv| is the calling function's type vector
// |v| is the value to floor
if (f !== %_FixedArrayGet(tv, i|0)) {
- return %_CallFunction(receiver, v, f);
+ return %_Call(f, receiver, v);
}
var r = %_MathFloor(+v);
if (%_IsMinusZero(r)) {
diff --git a/deps/v8/src/collection-iterator.js b/deps/v8/src/js/collection-iterator.js
index c799d6f9cd..621d7266fc 100644
--- a/deps/v8/src/collection-iterator.js
+++ b/deps/v8/src/js/collection-iterator.js
@@ -2,21 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $mapEntries;
-var $mapIteratorNext;
-var $setIteratorNext;
-var $setValues;
-
(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalMap = global.Map;
var GlobalSet = global.Set;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var MakeTypeError;
+var MapIterator = utils.ImportNow("MapIterator");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+var SetIterator = utils.ImportNow("SetIterator");
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
// -------------------------------------------------------------------
@@ -70,7 +75,6 @@ function SetValues() {
// -------------------------------------------------------------------
%SetCode(SetIterator, SetIteratorConstructor);
-%FunctionSetPrototype(SetIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(SetIterator, 'Set Iterator');
utils.InstallFunctions(SetIterator.prototype, DONT_ENUM, [
'next', SetIteratorNextJS
@@ -87,9 +91,6 @@ utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
%AddNamedProperty(GlobalSet.prototype, iteratorSymbol, SetValues, DONT_ENUM);
-$setIteratorNext = SetIteratorNextJS;
-$setValues = SetValues;
-
// -------------------------------------------------------------------
function MapIteratorConstructor(map, kind) {
@@ -152,7 +153,6 @@ function MapValues() {
// -------------------------------------------------------------------
%SetCode(MapIterator, MapIteratorConstructor);
-%FunctionSetPrototype(MapIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(MapIterator, 'Map Iterator');
utils.InstallFunctions(MapIterator.prototype, DONT_ENUM, [
'next', MapIteratorNextJS
@@ -170,7 +170,14 @@ utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
%AddNamedProperty(GlobalMap.prototype, iteratorSymbol, MapEntries, DONT_ENUM);
-$mapEntries = MapEntries;
-$mapIteratorNext = MapIteratorNextJS;
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.MapEntries = MapEntries;
+ to.MapIteratorNext = MapIteratorNextJS;
+ to.SetIteratorNext = SetIteratorNextJS;
+ to.SetValues = SetValues;
+});
})
diff --git a/deps/v8/src/collection.js b/deps/v8/src/js/collection.js
index 8bf6ec3515..050c37b5bd 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/js/collection.js
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $getHash;
-var $getExistingHash;
-
(function(global, utils) {
"use strict";
@@ -18,16 +15,18 @@ var GlobalObject = global.Object;
var GlobalSet = global.Set;
var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
var IntRandom;
+var MakeTypeError;
+var MapIterator;
+var NumberIsNaN;
+var SetIterator;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
IntRandom = from.IntRandom;
-});
-
-var NumberIsNaN;
-
-utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+ MapIterator = from.MapIterator;
NumberIsNaN = from.NumberIsNaN;
+ SetIterator = from.SetIterator;
});
// -------------------------------------------------------------------
@@ -249,7 +248,7 @@ function SetForEach(f, receiver) {
var iterator = new SetIterator(this, ITERATOR_KIND_VALUES);
var key;
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
+ var stepping = DEBUG_IS_STEPPING(f);
var value_array = [UNDEFINED];
while (%SetIteratorNext(iterator, value_array)) {
if (stepping) %DebugPrepareStepInIfStepping(f);
@@ -432,7 +431,7 @@ function MapForEach(f, receiver) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var iterator = new MapIterator(this, ITERATOR_KIND_ENTRIES);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
+ var stepping = DEBUG_IS_STEPPING(f);
var value_array = [UNDEFINED, UNDEFINED];
while (%MapIteratorNext(iterator, value_array)) {
if (stepping) %DebugPrepareStepInIfStepping(f);
@@ -462,10 +461,6 @@ utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
"forEach", MapForEach
]);
-// Expose to the global scope.
-$getHash = GetHash;
-$getExistingHash = GetExistingHash;
-
function MapFromArray(array) {
var map = new GlobalMap;
var length = array.length;
@@ -501,4 +496,9 @@ function SetFromArray(array) {
"set_from_array",SetFromArray,
]);
+utils.Export(function(to) {
+ to.GetExistingHash = GetExistingHash;
+ to.GetHash = GetHash;
+});
+
})
diff --git a/deps/v8/src/date.js b/deps/v8/src/js/date.js
index d2d59152ef..a99d8e4d51 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/js/date.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $createDate;
-
// -------------------------------------------------------------------
(function(global, utils) {
@@ -19,22 +17,23 @@ var GlobalDate = global.Date;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
var IsFinite;
+var MakeRangeError;
var MathAbs;
var MathFloor;
-var ToNumber;
+var NaN = %GetRootNaN();
utils.Import(function(from) {
IsFinite = from.IsFinite;
+ MakeRangeError = from.MakeRangeError;
MathAbs = from.MathAbs;
MathFloor = from.MathFloor;
- ToNumber = from.ToNumber;
});
// -------------------------------------------------------------------
// This file contains date support implemented in JavaScript.
-var timezone_cache_time = NAN;
+var timezone_cache_time = NaN;
var timezone_cache_timezone;
function LocalTimezone(t) {
@@ -60,10 +59,10 @@ function UTC(time) {
// ECMA 262 - 15.9.1.11
function MakeTime(hour, min, sec, ms) {
- if (!IsFinite(hour)) return NAN;
- if (!IsFinite(min)) return NAN;
- if (!IsFinite(sec)) return NAN;
- if (!IsFinite(ms)) return NAN;
+ if (!IsFinite(hour)) return NaN;
+ if (!IsFinite(min)) return NaN;
+ if (!IsFinite(sec)) return NaN;
+ if (!IsFinite(ms)) return NaN;
return TO_INTEGER(hour) * msPerHour
+ TO_INTEGER(min) * msPerMinute
+ TO_INTEGER(sec) * msPerSecond
@@ -84,7 +83,7 @@ function TimeInYear(year) {
// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
function MakeDay(year, month, date) {
- if (!IsFinite(year) || !IsFinite(month) || !IsFinite(date)) return NAN;
+ if (!IsFinite(year) || !IsFinite(month) || !IsFinite(date)) return NaN;
// Convert to integer and map -0 to 0.
year = TO_INTEGER_MAP_MINUS_ZERO(year);
@@ -93,7 +92,7 @@ function MakeDay(year, month, date) {
if (year < kMinYear || year > kMaxYear ||
month < kMinMonth || month > kMaxMonth) {
- return NAN;
+ return NaN;
}
// Now we rely on year and month being SMIs.
@@ -109,16 +108,16 @@ function MakeDate(day, time) {
// is no way that the time can be within range even after UTC
// conversion we return NaN immediately instead of relying on
// TimeClip to do it.
- if (MathAbs(time) > MAX_TIME_BEFORE_UTC) return NAN;
+ if (MathAbs(time) > MAX_TIME_BEFORE_UTC) return NaN;
return time;
}
// ECMA 262 - 15.9.1.14
function TimeClip(time) {
- if (!IsFinite(time)) return NAN;
- if (MathAbs(time) > MAX_TIME_MS) return NAN;
- return TO_INTEGER(time);
+ if (!IsFinite(time)) return NaN;
+ if (MathAbs(time) > MAX_TIME_MS) return NaN;
+ return TO_INTEGER(time) + 0;
}
@@ -135,7 +134,7 @@ var Date_cache = {
function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
if (!%_IsConstructCall()) {
// ECMA 262 - 15.9.2
- return %_CallFunction(new GlobalDate(), DateToString);
+ return %_Call(DateToString, new GlobalDate());
}
// ECMA 262 - 15.9.3
@@ -146,7 +145,7 @@ function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
SET_UTC_DATE_VALUE(this, value);
} else if (argc == 1) {
if (IS_NUMBER(year)) {
- value = year;
+ value = TimeClip(year);
} else if (IS_STRING(year)) {
// Probe the Date cache. If we already have a time value for the
@@ -168,17 +167,17 @@ function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
} else {
var time = TO_PRIMITIVE(year);
- value = IS_STRING(time) ? DateParse(time) : ToNumber(time);
+ value = IS_STRING(time) ? DateParse(time) : TO_NUMBER(time);
}
SET_UTC_DATE_VALUE(this, value);
} else {
- year = ToNumber(year);
- month = ToNumber(month);
- date = argc > 2 ? ToNumber(date) : 1;
- hours = argc > 3 ? ToNumber(hours) : 0;
- minutes = argc > 4 ? ToNumber(minutes) : 0;
- seconds = argc > 5 ? ToNumber(seconds) : 0;
- ms = argc > 6 ? ToNumber(ms) : 0;
+ year = TO_NUMBER(year);
+ month = TO_NUMBER(month);
+ date = argc > 2 ? TO_NUMBER(date) : 1;
+ hours = argc > 3 ? TO_NUMBER(hours) : 0;
+ minutes = argc > 4 ? TO_NUMBER(minutes) : 0;
+ seconds = argc > 5 ? TO_NUMBER(seconds) : 0;
+ ms = argc > 6 ? TO_NUMBER(ms) : 0;
year = (!NUMBER_IS_NAN(year) &&
0 <= TO_INTEGER(year) &&
TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
@@ -267,7 +266,7 @@ var parse_buffer = new InternalArray(8);
// ECMA 262 - 15.9.4.2
function DateParse(string) {
var arr = %DateParseString(string, parse_buffer);
- if (IS_NULL(arr)) return NAN;
+ if (IS_NULL(arr)) return NaN;
var day = MakeDay(arr[0], arr[1], arr[2]);
var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
@@ -283,14 +282,14 @@ function DateParse(string) {
// ECMA 262 - 15.9.4.3
function DateUTC(year, month, date, hours, minutes, seconds, ms) {
- year = ToNumber(year);
- month = ToNumber(month);
+ year = TO_NUMBER(year);
+ month = TO_NUMBER(month);
var argc = %_ArgumentsLength();
- date = argc > 2 ? ToNumber(date) : 1;
- hours = argc > 3 ? ToNumber(hours) : 0;
- minutes = argc > 4 ? ToNumber(minutes) : 0;
- seconds = argc > 5 ? ToNumber(seconds) : 0;
- ms = argc > 6 ? ToNumber(ms) : 0;
+ date = argc > 2 ? TO_NUMBER(date) : 1;
+ hours = argc > 3 ? TO_NUMBER(hours) : 0;
+ minutes = argc > 4 ? TO_NUMBER(minutes) : 0;
+ seconds = argc > 5 ? TO_NUMBER(seconds) : 0;
+ ms = argc > 6 ? TO_NUMBER(ms) : 0;
year = (!NUMBER_IS_NAN(year) &&
0 <= TO_INTEGER(year) &&
TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
@@ -338,7 +337,7 @@ function DateToTimeString() {
// ECMA 262 - 15.9.5.5
function DateToLocaleString() {
CHECK_DATE(this);
- return %_CallFunction(this, DateToString);
+ return %_Call(DateToString, this);
}
@@ -496,7 +495,7 @@ function DateGetTimezoneOffset() {
// ECMA 262 - 15.9.5.27
function DateSetTime(ms) {
CHECK_DATE(this);
- SET_UTC_DATE_VALUE(this, ToNumber(ms));
+ SET_UTC_DATE_VALUE(this, TO_NUMBER(ms));
return UTC_DATE_VALUE(this);
}
@@ -505,7 +504,7 @@ function DateSetTime(ms) {
function DateSetMilliseconds(ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- ms = ToNumber(ms);
+ ms = TO_NUMBER(ms);
var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
}
@@ -515,7 +514,7 @@ function DateSetMilliseconds(ms) {
function DateSetUTCMilliseconds(ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- ms = ToNumber(ms);
+ ms = TO_NUMBER(ms);
var time = MakeTime(UTC_HOUR(this),
UTC_MIN(this),
UTC_SEC(this),
@@ -528,8 +527,8 @@ function DateSetUTCMilliseconds(ms) {
function DateSetSeconds(sec, ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- sec = ToNumber(sec);
- ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : ToNumber(ms);
+ sec = TO_NUMBER(sec);
+ ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : TO_NUMBER(ms);
var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), sec, ms);
return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
}
@@ -539,8 +538,8 @@ function DateSetSeconds(sec, ms) {
function DateSetUTCSeconds(sec, ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- sec = ToNumber(sec);
- ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : ToNumber(ms);
+ sec = TO_NUMBER(sec);
+ ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : TO_NUMBER(ms);
var time = MakeTime(UTC_HOUR(this), UTC_MIN(this), sec, ms);
return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
}
@@ -550,10 +549,10 @@ function DateSetUTCSeconds(sec, ms) {
function DateSetMinutes(min, sec, ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- min = ToNumber(min);
+ min = TO_NUMBER(min);
var argc = %_ArgumentsLength();
- sec = argc < 2 ? LOCAL_SEC(this) : ToNumber(sec);
- ms = argc < 3 ? LOCAL_MS(this) : ToNumber(ms);
+ sec = argc < 2 ? LOCAL_SEC(this) : TO_NUMBER(sec);
+ ms = argc < 3 ? LOCAL_MS(this) : TO_NUMBER(ms);
var time = MakeTime(LOCAL_HOUR(this), min, sec, ms);
return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
}
@@ -563,10 +562,10 @@ function DateSetMinutes(min, sec, ms) {
function DateSetUTCMinutes(min, sec, ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- min = ToNumber(min);
+ min = TO_NUMBER(min);
var argc = %_ArgumentsLength();
- sec = argc < 2 ? UTC_SEC(this) : ToNumber(sec);
- ms = argc < 3 ? UTC_MS(this) : ToNumber(ms);
+ sec = argc < 2 ? UTC_SEC(this) : TO_NUMBER(sec);
+ ms = argc < 3 ? UTC_MS(this) : TO_NUMBER(ms);
var time = MakeTime(UTC_HOUR(this), min, sec, ms);
return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
}
@@ -576,11 +575,11 @@ function DateSetUTCMinutes(min, sec, ms) {
function DateSetHours(hour, min, sec, ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- hour = ToNumber(hour);
+ hour = TO_NUMBER(hour);
var argc = %_ArgumentsLength();
- min = argc < 2 ? LOCAL_MIN(this) : ToNumber(min);
- sec = argc < 3 ? LOCAL_SEC(this) : ToNumber(sec);
- ms = argc < 4 ? LOCAL_MS(this) : ToNumber(ms);
+ min = argc < 2 ? LOCAL_MIN(this) : TO_NUMBER(min);
+ sec = argc < 3 ? LOCAL_SEC(this) : TO_NUMBER(sec);
+ ms = argc < 4 ? LOCAL_MS(this) : TO_NUMBER(ms);
var time = MakeTime(hour, min, sec, ms);
return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
}
@@ -590,11 +589,11 @@ function DateSetHours(hour, min, sec, ms) {
function DateSetUTCHours(hour, min, sec, ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- hour = ToNumber(hour);
+ hour = TO_NUMBER(hour);
var argc = %_ArgumentsLength();
- min = argc < 2 ? UTC_MIN(this) : ToNumber(min);
- sec = argc < 3 ? UTC_SEC(this) : ToNumber(sec);
- ms = argc < 4 ? UTC_MS(this) : ToNumber(ms);
+ min = argc < 2 ? UTC_MIN(this) : TO_NUMBER(min);
+ sec = argc < 3 ? UTC_SEC(this) : TO_NUMBER(sec);
+ ms = argc < 4 ? UTC_MS(this) : TO_NUMBER(ms);
var time = MakeTime(hour, min, sec, ms);
return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
}
@@ -604,7 +603,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
function DateSetDate(date) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- date = ToNumber(date);
+ date = TO_NUMBER(date);
var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
}
@@ -614,7 +613,7 @@ function DateSetDate(date) {
function DateSetUTCDate(date) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- date = ToNumber(date);
+ date = TO_NUMBER(date);
var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
}
@@ -624,8 +623,8 @@ function DateSetUTCDate(date) {
function DateSetMonth(month, date) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- month = ToNumber(month);
- date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : ToNumber(date);
+ month = TO_NUMBER(month);
+ date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : TO_NUMBER(date);
var day = MakeDay(LOCAL_YEAR(this), month, date);
return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
}
@@ -635,8 +634,8 @@ function DateSetMonth(month, date) {
function DateSetUTCMonth(month, date) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- month = ToNumber(month);
- date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : ToNumber(date);
+ month = TO_NUMBER(month);
+ date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : TO_NUMBER(date);
var day = MakeDay(UTC_YEAR(this), month, date);
return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
}
@@ -646,16 +645,16 @@ function DateSetUTCMonth(month, date) {
function DateSetFullYear(year, month, date) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- year = ToNumber(year);
+ year = TO_NUMBER(year);
var argc = %_ArgumentsLength();
var time ;
if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : ToNumber(month);
- date = argc < 3 ? 1 : ToNumber(date);
+ month = argc < 2 ? 0 : TO_NUMBER(month);
+ date = argc < 3 ? 1 : TO_NUMBER(date);
time = 0;
} else {
- month = argc < 2 ? LOCAL_MONTH(this) : ToNumber(month);
- date = argc < 3 ? LOCAL_DAY(this) : ToNumber(date);
+ month = argc < 2 ? LOCAL_MONTH(this) : TO_NUMBER(month);
+ date = argc < 3 ? LOCAL_DAY(this) : TO_NUMBER(date);
time = LOCAL_TIME_IN_DAY(this);
}
var day = MakeDay(year, month, date);
@@ -667,16 +666,16 @@ function DateSetFullYear(year, month, date) {
function DateSetUTCFullYear(year, month, date) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- year = ToNumber(year);
+ year = TO_NUMBER(year);
var argc = %_ArgumentsLength();
var time ;
if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : ToNumber(month);
- date = argc < 3 ? 1 : ToNumber(date);
+ month = argc < 2 ? 0 : TO_NUMBER(month);
+ date = argc < 3 ? 1 : TO_NUMBER(date);
time = 0;
} else {
- month = argc < 2 ? UTC_MONTH(this) : ToNumber(month);
- date = argc < 3 ? UTC_DAY(this) : ToNumber(date);
+ month = argc < 2 ? UTC_MONTH(this) : TO_NUMBER(month);
+ date = argc < 3 ? UTC_DAY(this) : TO_NUMBER(date);
time = UTC_TIME_IN_DAY(this);
}
var day = MakeDay(year, month, date);
@@ -708,8 +707,8 @@ function DateGetYear() {
// ECMA 262 - B.2.5
function DateSetYear(year) {
CHECK_DATE(this);
- year = ToNumber(year);
- if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NAN);
+ year = TO_NUMBER(year);
+ if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NaN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var t = LOCAL_DATE_VALUE(this);
@@ -736,7 +735,7 @@ function DateSetYear(year) {
// do that either. Instead, we create a new function whose name
// property will return toGMTString.
function DateToGMTString() {
- return %_CallFunction(this, DateToUTCString);
+ return %_Call(DateToUTCString, this);
}
@@ -785,7 +784,7 @@ function DateToJSON(key) {
var date_cache_version_holder;
-var date_cache_version = NAN;
+var date_cache_version = NaN;
function CheckDateCacheCurrent() {
@@ -799,11 +798,11 @@ function CheckDateCacheCurrent() {
date_cache_version = date_cache_version_holder[0];
// Reset the timezone cache:
- timezone_cache_time = NAN;
+ timezone_cache_time = NaN;
timezone_cache_timezone = UNDEFINED;
// Reset the date cache:
- Date_cache.time = NAN;
+ Date_cache.time = NaN;
Date_cache.string = null;
}
diff --git a/deps/v8/src/generator.js b/deps/v8/src/js/generator.js
index 56579c59d4..2f61b3f22c 100644
--- a/deps/v8/src/generator.js
+++ b/deps/v8/src/js/generator.js
@@ -11,11 +11,15 @@
// -------------------------------------------------------------------
// Imports
+var GeneratorFunctionPrototype = utils.ImportNow("GeneratorFunctionPrototype");
+var GeneratorFunction = utils.ImportNow("GeneratorFunction");
var GlobalFunction = global.Function;
+var MakeTypeError;
var NewFunctionString;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
NewFunctionString = from.NewFunctionString;
});
@@ -33,7 +37,7 @@ function GeneratorObjectNext(value) {
var continuation = %GeneratorGetContinuation(this);
if (continuation > 0) {
// Generator is suspended.
- if (DEBUG_IS_ACTIVE) %DebugPrepareStepInIfStepping(this);
+ DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
try {
return %_GeneratorNext(this, value);
} catch (e) {
@@ -80,9 +84,11 @@ function GeneratorFunctionConstructor(arg1) { // length == 1
var global_proxy = %GlobalProxy(GeneratorFunctionConstructor);
// Compile the string in the constructor and not a helper so that errors
// appear to come from here.
- var f = %_CallFunction(global_proxy, %CompileString(source, true));
- %FunctionMarkNameShouldPrintAsAnonymous(f);
- return f;
+ var func = %_Call(%CompileString(source, true), global_proxy);
+ // Set name-should-print-as-anonymous flag on the ShareFunctionInfo and
+ // ensure that |func| uses correct initial map from |new.target| if
+ // it's available.
+ return %CompleteFunctionConstruction(func, GeneratorFunction, new.target);
}
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/harmony-array-includes.js b/deps/v8/src/js/harmony-array-includes.js
index a6b59137d2..bb1f01cd57 100644
--- a/deps/v8/src/harmony-array-includes.js
+++ b/deps/v8/src/js/harmony-array-includes.js
@@ -8,7 +8,17 @@
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalArray = global.Array;
+var MakeTypeError;
+var SameValueZero;
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+ SameValueZero = from.SameValueZero;
+});
// -------------------------------------------------------------------
@@ -34,7 +44,7 @@ function InnerArrayIncludes(searchElement, fromIndex, array, length) {
while (k < length) {
var elementK = array[k];
- if ($sameValueZero(searchElement, elementK)) {
+ if (SameValueZero(searchElement, elementK)) {
return true;
}
diff --git a/deps/v8/src/harmony-atomics.js b/deps/v8/src/js/harmony-atomics.js
index b1b529fe86..71125a9680 100644
--- a/deps/v8/src/harmony-atomics.js
+++ b/deps/v8/src/js/harmony-atomics.js
@@ -12,13 +12,13 @@
// Imports
var GlobalObject = global.Object;
-var MathMax;
-var ToNumber;
+var MakeTypeError;
+var MaxSimple;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- MathMax = from.MathMax;
- ToNumber = from.ToNumber;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
});
// -------------------------------------------------------------------
@@ -45,8 +45,8 @@ function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
- oldValue = ToNumber(oldValue);
- newValue = ToNumber(newValue);
+ oldValue = TO_NUMBER(oldValue);
+ newValue = TO_NUMBER(newValue);
return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
}
@@ -65,7 +65,7 @@ function AtomicsStoreJS(sta, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsStore(sta, index, value);
}
@@ -75,7 +75,7 @@ function AtomicsAddJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsAdd(ia, index, value);
}
@@ -85,7 +85,7 @@ function AtomicsSubJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsSub(ia, index, value);
}
@@ -95,7 +95,7 @@ function AtomicsAndJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsAnd(ia, index, value);
}
@@ -105,7 +105,7 @@ function AtomicsOrJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsOr(ia, index, value);
}
@@ -115,7 +115,7 @@ function AtomicsXorJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsXor(ia, index, value);
}
@@ -125,7 +125,7 @@ function AtomicsExchangeJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsExchange(ia, index, value);
}
@@ -144,11 +144,11 @@ function AtomicsFutexWaitJS(ia, index, value, timeout) {
if (IS_UNDEFINED(timeout)) {
timeout = INFINITY;
} else {
- timeout = ToNumber(timeout);
+ timeout = TO_NUMBER(timeout);
if (NUMBER_IS_NAN(timeout)) {
timeout = INFINITY;
} else {
- timeout = MathMax(0, timeout);
+ timeout = MaxSimple(0, timeout);
}
}
return %AtomicsFutexWait(ia, index, value, timeout);
@@ -160,14 +160,14 @@ function AtomicsFutexWakeJS(ia, index, count) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- count = MathMax(0, TO_INTEGER(count));
+ count = MaxSimple(0, TO_INTEGER(count));
return %AtomicsFutexWake(ia, index, count);
}
function AtomicsFutexWakeOrRequeueJS(ia, index1, count, value, index2) {
CheckSharedInteger32TypedArray(ia);
index1 = TO_INTEGER(index1);
- count = MathMax(0, TO_INTEGER(count));
+ count = MaxSimple(0, TO_INTEGER(count));
value = TO_INT32(value);
index2 = TO_INTEGER(index2);
if (index1 < 0 || index1 >= %_TypedArrayGetLength(ia) ||
diff --git a/deps/v8/src/js/harmony-object-observe.js b/deps/v8/src/js/harmony-object-observe.js
new file mode 100644
index 0000000000..95dd298f0d
--- /dev/null
+++ b/deps/v8/src/js/harmony-object-observe.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var ObserveArrayMethods = utils.ImportNow("ObserveArrayMethods");
+var ObserveObjectMethods = utils.ImportNow("ObserveObjectMethods");;
+
+utils.InstallFunctions(global.Object, DONT_ENUM, ObserveObjectMethods);
+utils.InstallFunctions(global.Array, DONT_ENUM, ObserveArrayMethods);
+
+})
diff --git a/deps/v8/src/js/harmony-reflect.js b/deps/v8/src/js/harmony-reflect.js
new file mode 100644
index 0000000000..bbca0fef61
--- /dev/null
+++ b/deps/v8/src/js/harmony-reflect.js
@@ -0,0 +1,37 @@
+// Copyright 2013-2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+'use strict';
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalReflect = global.Reflect;
+var MakeTypeError;
+var ReflectApply = utils.ImportNow("reflect_apply");
+var ReflectConstruct = utils.ImportNow("reflect_construct");
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+
+function ReflectEnumerate(obj) {
+ if (!IS_SPEC_OBJECT(obj))
+ throw MakeTypeError(kCalledOnNonObject, "Reflect.enumerate")
+ return (function* () { for (var x in obj) yield x })();
+}
+
+utils.InstallFunctions(GlobalReflect, DONT_ENUM, [
+ "apply", ReflectApply,
+ "construct", ReflectConstruct,
+ "enumerate", ReflectEnumerate
+]);
+
+})
diff --git a/deps/v8/src/js/harmony-regexp.js b/deps/v8/src/js/harmony-regexp.js
new file mode 100644
index 0000000000..eadf1d237c
--- /dev/null
+++ b/deps/v8/src/js/harmony-regexp.js
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+'use strict';
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalRegExp = global.RegExp;
+var MakeTypeError;
+var regExpFlagsSymbol = utils.ImportNow("regexp_flags_symbol");
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+
+// ES6 draft 12-06-13, section 21.2.5.3
+// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
+function RegExpGetFlags() {
+ if (!IS_SPEC_OBJECT(this)) {
+ throw MakeTypeError(
+ kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
+ }
+ var result = '';
+ if (this.global) result += 'g';
+ if (this.ignoreCase) result += 'i';
+ if (this.multiline) result += 'm';
+ if (this.unicode) result += 'u';
+ if (this.sticky) result += 'y';
+ return result;
+}
+
+
+// ES6 21.2.5.12.
+function RegExpGetSticky() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.sticky");
+ }
+ return !!REGEXP_STICKY(this);
+}
+%FunctionSetName(RegExpGetSticky, "RegExp.prototype.sticky");
+%SetNativeFlag(RegExpGetSticky);
+
+
+// ES6 21.2.5.15.
+function RegExpGetUnicode() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.unicode");
+ }
+ return !!REGEXP_UNICODE(this);
+}
+%FunctionSetName(RegExpGetUnicode, "RegExp.prototype.unicode");
+%SetNativeFlag(RegExpGetUnicode);
+
+utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
+utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
+utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
+
+})
diff --git a/deps/v8/src/harmony-sharedarraybuffer.js b/deps/v8/src/js/harmony-sharedarraybuffer.js
index 3a72d6c353..b4c34151a3 100644
--- a/deps/v8/src/harmony-sharedarraybuffer.js
+++ b/deps/v8/src/js/harmony-sharedarraybuffer.js
@@ -10,13 +10,20 @@
var GlobalSharedArrayBuffer = global.SharedArrayBuffer;
var GlobalObject = global.Object;
+var MakeTypeError;
+var ToPositiveInteger;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+ ToPositiveInteger = from.ToPositiveInteger;
+})
+
// -------------------------------------------------------------------
function SharedArrayBufferConstructor(length) { // length = 1
if (%_IsConstructCall()) {
- var byteLength = $toPositiveInteger(length, kInvalidArrayBufferLength);
+ var byteLength = ToPositiveInteger(length, kInvalidArrayBufferLength);
%ArrayBufferInitialize(this, byteLength, kShared);
} else {
throw MakeTypeError(kConstructorNotFunction, "SharedArrayBuffer");
diff --git a/deps/v8/src/harmony-simd.js b/deps/v8/src/js/harmony-simd.js
index ef3d9948b1..6847f2279a 100644
--- a/deps/v8/src/harmony-simd.js
+++ b/deps/v8/src/js/harmony-simd.js
@@ -12,8 +12,15 @@
// Imports
var GlobalSIMD = global.SIMD;
+var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+
macro SIMD_FLOAT_TYPES(FUNCTION)
FUNCTION(Float32x4, float32x4, 4)
endmacro
@@ -162,7 +169,7 @@ endmacro
macro DECLARE_NUMERIC_FUNCTIONS(NAME, TYPE, LANES)
function NAMEReplaceLaneJS(instance, lane, value) {
- return %NAMEReplaceLane(instance, lane, TO_NUMBER_INLINE(value));
+ return %NAMEReplaceLane(instance, lane, TO_NUMBER(value));
}
function NAMESelectJS(selector, a, b) {
@@ -426,22 +433,22 @@ SIMD_X16_TYPES(DECLARE_X16_FUNCTIONS)
function Float32x4Constructor(c0, c1, c2, c3) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Float32x4");
- return %CreateFloat32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
+ return %CreateFloat32x4(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3));
}
function Int32x4Constructor(c0, c1, c2, c3) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int32x4");
- return %CreateInt32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
+ return %CreateInt32x4(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3));
}
function Uint32x4Constructor(c0, c1, c2, c3) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint32x4");
- return %CreateUint32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
+ return %CreateUint32x4(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3));
}
@@ -453,19 +460,19 @@ function Bool32x4Constructor(c0, c1, c2, c3) {
function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int16x8");
- return %CreateInt16x8(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
- TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
- TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7));
+ return %CreateInt16x8(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3),
+ TO_NUMBER(c4), TO_NUMBER(c5),
+ TO_NUMBER(c6), TO_NUMBER(c7));
}
function Uint16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint16x8");
- return %CreateUint16x8(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
- TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
- TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7));
+ return %CreateUint16x8(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3),
+ TO_NUMBER(c4), TO_NUMBER(c5),
+ TO_NUMBER(c6), TO_NUMBER(c7));
}
@@ -478,28 +485,28 @@ function Bool16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int8x16");
- return %CreateInt8x16(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
- TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
- TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7),
- TO_NUMBER_INLINE(c8), TO_NUMBER_INLINE(c9),
- TO_NUMBER_INLINE(c10), TO_NUMBER_INLINE(c11),
- TO_NUMBER_INLINE(c12), TO_NUMBER_INLINE(c13),
- TO_NUMBER_INLINE(c14), TO_NUMBER_INLINE(c15));
+ return %CreateInt8x16(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3),
+ TO_NUMBER(c4), TO_NUMBER(c5),
+ TO_NUMBER(c6), TO_NUMBER(c7),
+ TO_NUMBER(c8), TO_NUMBER(c9),
+ TO_NUMBER(c10), TO_NUMBER(c11),
+ TO_NUMBER(c12), TO_NUMBER(c13),
+ TO_NUMBER(c14), TO_NUMBER(c15));
}
function Uint8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint8x16");
- return %CreateUint8x16(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
- TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
- TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7),
- TO_NUMBER_INLINE(c8), TO_NUMBER_INLINE(c9),
- TO_NUMBER_INLINE(c10), TO_NUMBER_INLINE(c11),
- TO_NUMBER_INLINE(c12), TO_NUMBER_INLINE(c13),
- TO_NUMBER_INLINE(c14), TO_NUMBER_INLINE(c15));
+ return %CreateUint8x16(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3),
+ TO_NUMBER(c4), TO_NUMBER(c5),
+ TO_NUMBER(c6), TO_NUMBER(c7),
+ TO_NUMBER(c8), TO_NUMBER(c9),
+ TO_NUMBER(c10), TO_NUMBER(c11),
+ TO_NUMBER(c12), TO_NUMBER(c13),
+ TO_NUMBER(c14), TO_NUMBER(c15));
}
diff --git a/deps/v8/src/i18n.js b/deps/v8/src/js/i18n.js
index b9d659c442..f2b9dd4445 100644
--- a/deps/v8/src/i18n.js
+++ b/deps/v8/src/js/i18n.js
@@ -27,6 +27,9 @@ var GlobalDate = global.Date;
var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
+var MakeError;
+var MakeRangeError;
+var MakeTypeError;
var MathFloor;
var ObjectDefineProperties = utils.ImportNow("ObjectDefineProperties");
var ObjectDefineProperty = utils.ImportNow("ObjectDefineProperty");
@@ -45,6 +48,9 @@ utils.Import(function(from) {
ArrayPush = from.ArrayPush;
IsFinite = from.IsFinite;
IsNaN = from.IsNaN;
+ MakeError = from.MakeError;
+ MakeRangeError = from.MakeRangeError;
+ MakeTypeError = from.MakeTypeError;
MathFloor = from.MathFloor;
RegExpTest = from.RegExpTest;
StringIndexOf = from.StringIndexOf;
@@ -54,7 +60,6 @@ utils.Import(function(from) {
StringSplit = from.StringSplit;
StringSubstr = from.StringSubstr;
StringSubstring = from.StringSubstring;
- ToNumber = from.ToNumber;
});
// -------------------------------------------------------------------
@@ -244,7 +249,7 @@ function addBoundMethod(obj, methodName, implementation, length) {
* Parameter locales is treated as a priority list.
*/
function supportedLocalesOf(service, locales, options) {
- if (IS_NULL(%_CallFunction(service, GetServiceRE(), StringMatch))) {
+ if (IS_NULL(%_Call(StringMatch, service, GetServiceRE()))) {
throw MakeError(kWrongServiceType, service);
}
@@ -292,20 +297,22 @@ function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
var matchedLocales = [];
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove -u- extension.
- var locale = %_CallFunction(requestedLocales[i], GetUnicodeExtensionRE(),
- '', StringReplace);
+ var locale = %_Call(StringReplace,
+ requestedLocales[i],
+ GetUnicodeExtensionRE(),
+ '');
do {
if (!IS_UNDEFINED(availableLocales[locale])) {
// Push requested locale not the resolved one.
- %_CallFunction(matchedLocales, requestedLocales[i], ArrayPush);
+ %_Call(ArrayPush, matchedLocales, requestedLocales[i]);
break;
}
// Truncate locale if possible, if not break.
- var pos = %_CallFunction(locale, '-', StringLastIndexOf);
+ var pos = %_Call(StringLastIndexOf, locale, '-');
if (pos === -1) {
break;
}
- locale = %_CallFunction(locale, 0, pos, StringSubstring);
+ locale = %_Call(StringSubstring, locale, 0, pos);
} while (true);
}
@@ -350,8 +357,7 @@ function getGetOption(options, caller) {
throw MakeError(kWrongValueType);
}
- if (!IS_UNDEFINED(values) &&
- %_CallFunction(values, value, ArrayIndexOf) === -1) {
+ if (!IS_UNDEFINED(values) && %_Call(ArrayIndexOf, values, value) === -1) {
throw MakeRangeError(kValueOutOfRange, value, caller, property);
}
@@ -400,7 +406,7 @@ function resolveLocale(service, requestedLocales, options) {
* lookup algorithm.
*/
function lookupMatcher(service, requestedLocales) {
- if (IS_NULL(%_CallFunction(service, GetServiceRE(), StringMatch))) {
+ if (IS_NULL(%_Call(StringMatch, service, GetServiceRE()))) {
throw MakeError(kWrongServiceType, service);
}
@@ -411,23 +417,22 @@ function lookupMatcher(service, requestedLocales) {
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove all extensions.
- var locale = %_CallFunction(requestedLocales[i], GetAnyExtensionRE(), '',
- StringReplace);
+ var locale = %_Call(StringReplace, requestedLocales[i],
+ GetAnyExtensionRE(), '');
do {
if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
// Return the resolved locale and extension.
var extensionMatch =
- %_CallFunction(requestedLocales[i], GetUnicodeExtensionRE(),
- StringMatch);
+ %_Call(StringMatch, requestedLocales[i], GetUnicodeExtensionRE());
var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
return {'locale': locale, 'extension': extension, 'position': i};
}
// Truncate locale if possible.
- var pos = %_CallFunction(locale, '-', StringLastIndexOf);
+ var pos = %_Call(StringLastIndexOf, locale, '-');
if (pos === -1) {
break;
}
- locale = %_CallFunction(locale, 0, pos, StringSubstring);
+ locale = %_Call(StringSubstring, locale, 0, pos);
} while (true);
}
@@ -456,7 +461,7 @@ function bestFitMatcher(service, requestedLocales) {
* We are not concerned with the validity of the values at this point.
*/
function parseExtension(extension) {
- var extensionSplit = %_CallFunction(extension, '-', StringSplit);
+ var extensionSplit = %_Call(StringSplit, extension, '-');
// Assume ['', 'u', ...] input, but don't throw.
if (extensionSplit.length <= 2 ||
@@ -593,7 +598,7 @@ function getOptimalLanguageTag(original, resolved) {
// Preserve extensions of resolved locale, but swap base tags with original.
var resolvedBase = new GlobalRegExp('^' + locales[1].base);
- return %_CallFunction(resolved, resolvedBase, locales[0].base, StringReplace);
+ return %_Call(StringReplace, resolved, resolvedBase, locales[0].base);
}
@@ -608,8 +613,8 @@ function getAvailableLocalesOf(service) {
for (var i in available) {
if (%HasOwnProperty(available, i)) {
- var parts = %_CallFunction(i, /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/,
- StringMatch);
+ var parts =
+ %_Call(StringMatch, i, /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
if (parts !== null) {
// Build xx-ZZ. We don't care about the actual value,
// as long it's not undefined.
@@ -669,8 +674,8 @@ function addWECPropertyIfDefined(object, property, value) {
* Returns titlecased word, aMeRricA -> America.
*/
function toTitleCaseWord(word) {
- return %StringToUpperCase(%_CallFunction(word, 0, 1, StringSubstr)) +
- %StringToLowerCase(%_CallFunction(word, 1, StringSubstr));
+ return %StringToUpperCase(%_Call(StringSubstr, word, 0, 1)) +
+ %StringToLowerCase(%_Call(StringSubstr, word, 1));
}
/**
@@ -714,7 +719,7 @@ function initializeLocaleList(locales) {
} else {
// We allow single string localeID.
if (typeof locales === 'string') {
- %_CallFunction(seen, canonicalizeLanguageTag(locales), ArrayPush);
+ %_Call(ArrayPush, seen, canonicalizeLanguageTag(locales));
return freezeArray(seen);
}
@@ -727,8 +732,8 @@ function initializeLocaleList(locales) {
var tag = canonicalizeLanguageTag(value);
- if (%_CallFunction(seen, tag, ArrayIndexOf) === -1) {
- %_CallFunction(seen, tag, ArrayPush);
+ if (%_Call(ArrayIndexOf, seen, tag) === -1) {
+ %_Call(ArrayPush, seen, tag);
}
}
}
@@ -749,40 +754,40 @@ function initializeLocaleList(locales) {
*/
function isValidLanguageTag(locale) {
// Check if it's well-formed, including grandfadered tags.
- if (!%_CallFunction(GetLanguageTagRE(), locale, RegExpTest)) {
+ if (!%_Call(RegExpTest, GetLanguageTagRE(), locale)) {
return false;
}
// Just return if it's a x- form. It's all private.
- if (%_CallFunction(locale, 'x-', StringIndexOf) === 0) {
+ if (%_Call(StringIndexOf, locale, 'x-') === 0) {
return true;
}
// Check if there are any duplicate variants or singletons (extensions).
// Remove private use section.
- locale = %_CallFunction(locale, /-x-/, StringSplit)[0];
+ locale = %_Call(StringSplit, locale, /-x-/)[0];
// Skip language since it can match variant regex, so we start from 1.
// We are matching i-klingon here, but that's ok, since i-klingon-klingon
// is not valid and would fail LANGUAGE_TAG_RE test.
var variants = [];
var extensions = [];
- var parts = %_CallFunction(locale, /-/, StringSplit);
+ var parts = %_Call(StringSplit, locale, /-/);
for (var i = 1; i < parts.length; i++) {
var value = parts[i];
- if (%_CallFunction(GetLanguageVariantRE(), value, RegExpTest) &&
+ if (%_Call(RegExpTest, GetLanguageVariantRE(), value) &&
extensions.length === 0) {
- if (%_CallFunction(variants, value, ArrayIndexOf) === -1) {
- %_CallFunction(variants, value, ArrayPush);
+ if (%_Call(ArrayIndexOf, variants, value) === -1) {
+ %_Call(ArrayPush, variants, value);
} else {
return false;
}
}
- if (%_CallFunction(GetLanguageSingletonRE(), value, RegExpTest)) {
- if (%_CallFunction(extensions, value, ArrayIndexOf) === -1) {
- %_CallFunction(extensions, value, ArrayPush);
+ if (%_Call(RegExpTest, GetLanguageSingletonRE(), value)) {
+ if (%_Call(ArrayIndexOf, extensions, value) === -1) {
+ %_Call(ArrayPush, extensions, value);
} else {
return false;
}
@@ -896,8 +901,7 @@ function initializeCollator(collator, locales, options) {
'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
];
- if (%_CallFunction(ALLOWED_CO_VALUES, extensionMap.co, ArrayIndexOf) !==
- -1) {
+ if (%_Call(ArrayIndexOf, ALLOWED_CO_VALUES, extensionMap.co) !== -1) {
extension = '-u-co-' + extensionMap.co;
// ICU can't tell us what the collation is, so save user's input.
collation = extensionMap.co;
@@ -1037,7 +1041,7 @@ addBoundMethod(Intl.Collator, 'compare', compare, 2);
function isWellFormedCurrencyCode(currency) {
return typeof currency == "string" &&
currency.length == 3 &&
- %_CallFunction(currency, /[^A-Za-z]/, StringMatch) == null;
+ %_Call(StringMatch, currency, /[^A-Za-z]/) == null;
}
@@ -1110,7 +1114,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
if (!IS_UNDEFINED(mxfd) || internalOptions.style !== 'currency') {
var min_mxfd = internalOptions.style === 'percent' ? 0 : 3;
mnfd = IS_UNDEFINED(mnfd) ? 0 : mnfd;
- fallback_limit = (mnfd > min_mxfd) ? mnfd : min_mxfd;
+ var fallback_limit = (mnfd > min_mxfd) ? mnfd : min_mxfd;
mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, fallback_limit);
defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
}
@@ -1279,7 +1283,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
*/
function formatNumber(formatter, value) {
// Spec treats -0 and +0 as 0.
- var number = ToNumber(value) + 0;
+ var number = TO_NUMBER(value) + 0;
return %InternalNumberFormat(%GetImplFromInitializedIntlObject(formatter),
number);
@@ -1366,58 +1370,57 @@ function appendToLDMLString(option, pairs) {
*/
function fromLDMLString(ldmlString) {
// First remove '' quoted text, so we lose 'Uhr' strings.
- ldmlString = %_CallFunction(ldmlString, GetQuotedStringRE(), '',
- StringReplace);
+ ldmlString = %_Call(StringReplace, ldmlString, GetQuotedStringRE(), '');
var options = {};
- var match = %_CallFunction(ldmlString, /E{3,5}/g, StringMatch);
+ var match = %_Call(StringMatch, ldmlString, /E{3,5}/g);
options = appendToDateTimeObject(
options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
- match = %_CallFunction(ldmlString, /G{3,5}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /G{3,5}/g);
options = appendToDateTimeObject(
options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
- match = %_CallFunction(ldmlString, /y{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /y{1,2}/g);
options = appendToDateTimeObject(
options, 'year', match, {y: 'numeric', yy: '2-digit'});
- match = %_CallFunction(ldmlString, /M{1,5}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /M{1,5}/g);
options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
// Sometimes we get L instead of M for month - standalone name.
- match = %_CallFunction(ldmlString, /L{1,5}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /L{1,5}/g);
options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
- match = %_CallFunction(ldmlString, /d{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /d{1,2}/g);
options = appendToDateTimeObject(
options, 'day', match, {d: 'numeric', dd: '2-digit'});
- match = %_CallFunction(ldmlString, /h{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /h{1,2}/g);
if (match !== null) {
options['hour12'] = true;
}
options = appendToDateTimeObject(
options, 'hour', match, {h: 'numeric', hh: '2-digit'});
- match = %_CallFunction(ldmlString, /H{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /H{1,2}/g);
if (match !== null) {
options['hour12'] = false;
}
options = appendToDateTimeObject(
options, 'hour', match, {H: 'numeric', HH: '2-digit'});
- match = %_CallFunction(ldmlString, /m{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /m{1,2}/g);
options = appendToDateTimeObject(
options, 'minute', match, {m: 'numeric', mm: '2-digit'});
- match = %_CallFunction(ldmlString, /s{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /s{1,2}/g);
options = appendToDateTimeObject(
options, 'second', match, {s: 'numeric', ss: '2-digit'});
- match = %_CallFunction(ldmlString, /z|zzzz/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /z|zzzz/g);
options = appendToDateTimeObject(
options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
@@ -1704,7 +1707,7 @@ function formatDate(formatter, dateValue) {
if (IS_UNDEFINED(dateValue)) {
dateMs = %DateCurrentTime();
} else {
- dateMs = ToNumber(dateValue);
+ dateMs = TO_NUMBER(dateValue);
}
if (!IsFinite(dateMs)) throw MakeRangeError(kDateRange);
@@ -1750,7 +1753,7 @@ function canonicalizeTimeZoneID(tzID) {
// We expect only _ and / beside ASCII letters.
// All inputs should conform to Area/Location from now on.
- var match = %_CallFunction(tzID, GetTimezoneNameCheckRE(), StringMatch);
+ var match = %_Call(StringMatch, tzID, GetTimezoneNameCheckRE());
if (IS_NULL(match)) throw MakeRangeError(kExpectedLocation, tzID);
var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
@@ -2012,11 +2015,10 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
- var normalizationForm =
- %_CallFunction(NORMALIZATION_FORMS, form, ArrayIndexOf);
+ var normalizationForm = %_Call(ArrayIndexOf, NORMALIZATION_FORMS, form);
if (normalizationForm === -1) {
throw MakeRangeError(kNormalizationForm,
- %_CallFunction(NORMALIZATION_FORMS, ', ', ArrayJoin));
+ %_Call(ArrayJoin, NORMALIZATION_FORMS, ', '));
}
return %StringNormalize(s, normalizationForm);
diff --git a/deps/v8/src/iterator-prototype.js b/deps/v8/src/js/iterator-prototype.js
index 2f49d90b1c..6f2501979d 100644
--- a/deps/v8/src/iterator-prototype.js
+++ b/deps/v8/src/js/iterator-prototype.js
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $iteratorPrototype;
-
(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
var GlobalObject = global.Object;
+ var IteratorPrototype = utils.ImportNow("IteratorPrototype");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
// 25.1.2.1 %IteratorPrototype% [ @@iterator ] ( )
@@ -17,6 +16,6 @@ var $iteratorPrototype;
}
utils.SetFunctionName(IteratorPrototypeIterator, iteratorSymbol);
- %AddNamedProperty($iteratorPrototype, iteratorSymbol,
+ %AddNamedProperty(IteratorPrototype, iteratorSymbol,
IteratorPrototypeIterator, DONT_ENUM);
})
diff --git a/deps/v8/src/json.js b/deps/v8/src/js/json.js
index 6f8489088b..38c46af6d6 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/js/json.js
@@ -13,17 +13,17 @@
var GlobalJSON = global.JSON;
var InternalArray = utils.InternalArray;
-var MathMax;
-var MathMin;
+var MakeTypeError;
+var MaxSimple;
+var MinSimple;
var ObjectHasOwnProperty;
-var ToNumber;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- MathMax = from.MathMax;
- MathMin = from.MathMin;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
- ToNumber = from.ToNumber;
});
// -------------------------------------------------------------------
@@ -164,7 +164,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
if (IS_ARRAY(value)) {
return SerializeArray(value, replacer, stack, indent, gap);
} else if (IS_NUMBER_WRAPPER(value)) {
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return JSON_NUMBER_TO_STRING(value);
} else if (IS_STRING_WRAPPER(value)) {
return %QuoteJSONString(TO_STRING(value));
@@ -210,14 +210,14 @@ function JSONStringify(value, replacer, space) {
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
if (IS_NUMBER_WRAPPER(space)) {
- space = ToNumber(space);
+ space = TO_NUMBER(space);
} else if (IS_STRING_WRAPPER(space)) {
space = TO_STRING(space);
}
}
var gap;
if (IS_NUMBER(space)) {
- space = MathMax(0, MathMin(TO_INTEGER(space), 10));
+ space = MaxSimple(0, MinSimple(TO_INTEGER(space), 10));
gap = %_SubString(" ", 0, space);
} else if (IS_STRING(space)) {
if (space.length > 10) {
diff --git a/deps/v8/src/macros.py b/deps/v8/src/js/macros.py
index 6de9120fb2..a4370d2181 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -73,6 +73,9 @@ define kSafeArgumentsLength = 0x800000;
# 2^53 - 1
define kMaxSafeInteger = 9007199254740991;
+# 2^32 - 1
+define kMaxUint32 = 4294967295;
+
# Strict mode flags for passing to %SetProperty
define kSloppyMode = 0;
define kStrictMode = 1;
@@ -141,24 +144,24 @@ define kBoundArgumentsStartIndex = 2;
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
+macro TO_BOOLEAN(arg) = (!!(arg));
macro TO_INTEGER(arg) = (%_ToInteger(arg));
-macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
-macro TO_INT32(arg) = (arg | 0);
-macro TO_UINT32(arg) = (arg >>> 0);
-macro TO_LENGTH(arg) = (%ToLength(arg));
-macro TO_LENGTH_OR_UINT32(arg) = (harmony_tolength ? TO_LENGTH(arg) : TO_UINT32(arg));
+macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(arg));
+macro TO_INT32(arg) = ((arg) | 0);
+macro TO_UINT32(arg) = ((arg) >>> 0);
+macro TO_LENGTH(arg) = (%_ToLength(arg));
+macro TO_LENGTH_OR_UINT32(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_UINT32(arg));
+macro TO_LENGTH_OR_INTEGER(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_INTEGER(arg));
macro TO_STRING(arg) = (%_ToString(arg));
-macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : $nonNumberToNumber(arg));
+macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
macro TO_PRIMITIVE(arg) = (%_ToPrimitive(arg));
macro TO_PRIMITIVE_NUMBER(arg) = (%_ToPrimitive_Number(arg));
macro TO_PRIMITIVE_STRING(arg) = (%_ToPrimitive_String(arg));
macro TO_NAME(arg) = (%_ToName(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
-macro HAS_OWN_PROPERTY(arg, index) = (%_CallFunction(arg, index, ObjectHasOwnProperty));
+macro HAS_OWN_PROPERTY(arg, index) = (%_Call(ObjectHasOwnProperty, arg, index));
macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array))) ? (index < array.length) : (index in array));
-macro MAX_SIMPLE(argA, argB) = (argA < argB ? argB : argA);
-macro MIN_SIMPLE(argA, argB) = (argA < argB ? argA : argB);
# Private names.
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
@@ -168,7 +171,6 @@ macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
# Constants. The compiler constant folds them.
-define NAN = $NaN;
define INFINITY = (1/0);
define UNDEFINED = (void 0);
@@ -179,6 +181,14 @@ python macro CHAR_CODE(str) = ord(str[1]);
define REGEXP_NUMBER_OF_CAPTURES = 0;
define REGEXP_FIRST_CAPTURE = 3;
+# Macros for internal slot access.
+macro REGEXP_GLOBAL(regexp) = (%_RegExpFlags(regexp) & 1);
+macro REGEXP_IGNORE_CASE(regexp) = (%_RegExpFlags(regexp) & 2);
+macro REGEXP_MULTILINE(regexp) = (%_RegExpFlags(regexp) & 4);
+macro REGEXP_STICKY(regexp) = (%_RegExpFlags(regexp) & 8);
+macro REGEXP_UNICODE(regexp) = (%_RegExpFlags(regexp) & 16);
+macro REGEXP_SOURCE(regexp) = (%_RegExpSource(regexp));
+
# We can't put macros in macros so we use constants here.
# REGEXP_NUMBER_OF_CAPTURES
macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
@@ -305,7 +315,7 @@ define NOT_FOUND = -1;
# Check whether debug is active.
define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
macro DEBUG_IS_STEPPING(function) = (%_DebugIsActive() != 0 && %DebugCallbackSupportsStepping(function));
-macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (DEBUG_IS_STEPPING(function)) %DebugPrepareStepInIfStepping(function);
+macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (%_DebugIsActive() != 0) %DebugPrepareStepInIfStepping(function);
# SharedFlag equivalents
define kNotShared = false;
diff --git a/deps/v8/src/math.js b/deps/v8/src/js/math.js
index 05eb9e46d7..ba2b2186f4 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/js/math.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var rngstate; // Initialized to a Uint32Array during genesis.
-
(function(global, utils) {
"use strict";
@@ -15,8 +13,21 @@ var rngstate; // Initialized to a Uint32Array during genesis.
var GlobalMath = global.Math;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
+var NaN = %GetRootNaN();
+var rngstate_0;
+var rngstate_1;
+var rngstate_2;
+var rngstate_3;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.InitializeRNG = function() {
+ var rngstate = %InitializeRNG();
+ rngstate_0 = rngstate[0];
+ rngstate_1 = rngstate[1];
+ rngstate_2 = rngstate[2];
+ rngstate_3 = rngstate[3];
+};
+
//-------------------------------------------------------------------
// ECMA 262 - 15.8.2.1
@@ -56,7 +67,7 @@ function MathCeil(x) {
// ECMA 262 - 15.8.2.8
function MathExp(x) {
- return %MathExpRT(TO_NUMBER_INLINE(x));
+ return %MathExpRT(TO_NUMBER(x));
}
// ECMA 262 - 15.8.2.9
@@ -66,15 +77,15 @@ function MathFloorJS(x) {
// ECMA 262 - 15.8.2.10
function MathLog(x) {
- return %_MathLogRT(TO_NUMBER_INLINE(x));
+ return %_MathLogRT(TO_NUMBER(x));
}
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- arg1 = TO_NUMBER_INLINE(arg1);
- arg2 = TO_NUMBER_INLINE(arg2);
+ arg1 = TO_NUMBER(arg1);
+ arg2 = TO_NUMBER(arg2);
if (arg2 > arg1) return arg2;
if (arg1 > arg2) return arg1;
if (arg1 == arg2) {
@@ -82,12 +93,12 @@ function MathMax(arg1, arg2) { // length == 2
return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg2 : arg1;
}
// All comparisons failed, one of the arguments must be NaN.
- return NAN;
+ return NaN;
}
var r = -INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
- n = TO_NUMBER_INLINE(n);
+ n = TO_NUMBER(n);
// Make sure +0 is considered greater than -0.
if (NUMBER_IS_NAN(n) || n > r || (r === 0 && n === 0 && %_IsMinusZero(r))) {
r = n;
@@ -100,8 +111,8 @@ function MathMax(arg1, arg2) { // length == 2
function MathMin(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- arg1 = TO_NUMBER_INLINE(arg1);
- arg2 = TO_NUMBER_INLINE(arg2);
+ arg1 = TO_NUMBER(arg1);
+ arg2 = TO_NUMBER(arg2);
if (arg2 > arg1) return arg1;
if (arg1 > arg2) return arg2;
if (arg1 == arg2) {
@@ -109,12 +120,12 @@ function MathMin(arg1, arg2) { // length == 2
return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg1 : arg2;
}
// All comparisons failed, one of the arguments must be NaN.
- return NAN;
+ return NaN;
}
var r = INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
- n = TO_NUMBER_INLINE(n);
+ n = TO_NUMBER(n);
// Make sure -0 is considered less than +0.
if (NUMBER_IS_NAN(n) || n < r || (r === 0 && n === 0 && %_IsMinusZero(n))) {
r = n;
@@ -125,32 +136,35 @@ function MathMin(arg1, arg2) { // length == 2
// ECMA 262 - 15.8.2.13
function MathPowJS(x, y) {
- return %_MathPow(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
+ return %_MathPow(TO_NUMBER(x), TO_NUMBER(y));
}
// ECMA 262 - 15.8.2.14
function MathRandom() {
- var r0 = (MathImul(18030, rngstate[0] & 0xFFFF) + (rngstate[0] >>> 16)) | 0;
- rngstate[0] = r0;
- var r1 = (MathImul(36969, rngstate[1] & 0xFFFF) + (rngstate[1] >>> 16)) | 0;
- rngstate[1] = r1;
- var x = ((r0 << 16) + (r1 & 0xFFFF)) | 0;
- // Division by 0x100000000 through multiplication by reciprocal.
- return (x < 0 ? (x + 0x100000000) : x) * 2.3283064365386962890625e-10;
+ var r0 = (MathImul(18030, rngstate_0) + rngstate_1) | 0;
+ var r1 = (MathImul(36969, rngstate_2) + rngstate_3) | 0;
+ rngstate_0 = r0 & 0xFFFF;
+ rngstate_1 = r0 >>> 16;
+ rngstate_2 = r1 & 0xFFFF;
+ rngstate_3 = r1 >>> 16;
+ // Construct a double number 1.<32-bits of randomness> and subtract 1.
+ return %_ConstructDouble(0x3FF00000 | (r0 & 0x000FFFFF), r1 & 0xFFF00000) - 1;
}
function MathRandomRaw() {
- var r0 = (MathImul(18030, rngstate[0] & 0xFFFF) + (rngstate[0] >>> 16)) | 0;
- rngstate[0] = r0;
- var r1 = (MathImul(36969, rngstate[1] & 0xFFFF) + (rngstate[1] >>> 16)) | 0;
- rngstate[1] = r1;
+ var r0 = (MathImul(18030, rngstate_0) + rngstate_1) | 0;
+ var r1 = (MathImul(36969, rngstate_2) + rngstate_3) | 0;
+ rngstate_0 = r0 & 0xFFFF;
+ rngstate_1 = r0 >>> 16;
+ rngstate_2 = r1 & 0xFFFF;
+ rngstate_3 = r1 >>> 16;
var x = ((r0 << 16) + (r1 & 0xFFFF)) | 0;
- return x & 0x3fffffff;
+ return x & 0x3FFFFFFF;
}
// ECMA 262 - 15.8.2.15
function MathRound(x) {
- return %RoundNumber(TO_NUMBER_INLINE(x));
+ return %RoundNumber(TO_NUMBER(x));
}
// ECMA 262 - 15.8.2.17
@@ -160,7 +174,7 @@ function MathSqrtJS(x) {
// Non-standard extension.
function MathImul(x, y) {
- return %NumberImul(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
+ return %NumberImul(TO_NUMBER(x), TO_NUMBER(y));
}
// ES6 draft 09-27-13, section 20.2.2.28.
@@ -181,21 +195,9 @@ function MathTrunc(x) {
return x;
}
-// ES6 draft 09-27-13, section 20.2.2.33.
-function MathTanh(x) {
- x = TO_NUMBER_INLINE(x);
- // Idempotent for +/-0.
- if (x === 0) return x;
- // Returns +/-1 for +/-Infinity.
- if (!NUMBER_IS_FINITE(x)) return MathSign(x);
- var exp1 = MathExp(x);
- var exp2 = MathExp(-x);
- return (exp1 - exp2) / (exp1 + exp2);
-}
-
// ES6 draft 09-27-13, section 20.2.2.5.
function MathAsinh(x) {
- x = TO_NUMBER_INLINE(x);
+ x = TO_NUMBER(x);
// Idempotent for NaN, +/-0 and +/-Infinity.
if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
if (x > 0) return MathLog(x + %_MathSqrt(x * x + 1));
@@ -205,8 +207,8 @@ function MathAsinh(x) {
// ES6 draft 09-27-13, section 20.2.2.3.
function MathAcosh(x) {
- x = TO_NUMBER_INLINE(x);
- if (x < 1) return NAN;
+ x = TO_NUMBER(x);
+ if (x < 1) return NaN;
// Idempotent for NaN and +Infinity.
if (!NUMBER_IS_FINITE(x)) return x;
return MathLog(x + %_MathSqrt(x + 1) * %_MathSqrt(x - 1));
@@ -214,11 +216,11 @@ function MathAcosh(x) {
// ES6 draft 09-27-13, section 20.2.2.7.
function MathAtanh(x) {
- x = TO_NUMBER_INLINE(x);
+ x = TO_NUMBER(x);
// Idempotent for +/-0.
if (x === 0) return x;
// Returns NaN for NaN and +/- Infinity.
- if (!NUMBER_IS_FINITE(x)) return NAN;
+ if (!NUMBER_IS_FINITE(x)) return NaN;
return 0.5 * MathLog((1 + x) / (1 - x));
}
@@ -232,7 +234,7 @@ function MathHypot(x, y) { // Function length is 2.
var max = 0;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
- n = TO_NUMBER_INLINE(n);
+ n = TO_NUMBER(n);
if (n === INFINITY || n === -INFINITY) return INFINITY;
n = MathAbs(n);
if (n > max) max = n;
@@ -256,7 +258,7 @@ function MathHypot(x, y) { // Function length is 2.
// ES6 draft 09-27-13, section 20.2.2.16.
function MathFroundJS(x) {
- return %MathFround(TO_NUMBER_INLINE(x));
+ return %MathFround(TO_NUMBER(x));
}
// ES6 draft 07-18-14, section 20.2.2.11
@@ -269,7 +271,7 @@ function MathClz32JS(x) {
// Using initial approximation adapted from Kahan's cbrt and 4 iterations
// of Newton's method.
function MathCbrt(x) {
- x = TO_NUMBER_INLINE(x);
+ x = TO_NUMBER(x);
if (x == 0 || !NUMBER_IS_FINITE(x)) return x;
return x >= 0 ? CubeRoot(x) : -CubeRoot(-x);
}
@@ -328,7 +330,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"imul", MathImul,
"sign", MathSign,
"trunc", MathTrunc,
- "tanh", MathTanh,
"asinh", MathAsinh,
"acosh", MathAcosh,
"atanh", MathAtanh,
diff --git a/deps/v8/src/messages.js b/deps/v8/src/js/messages.js
index 5441cfe34a..581f457a31 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/js/messages.js
@@ -4,15 +4,6 @@
// -------------------------------------------------------------------
-var $errorToString;
-var MakeError;
-var MakeEvalError;
-var MakeRangeError;
-var MakeReferenceError;
-var MakeSyntaxError;
-var MakeTypeError;
-var MakeURIError;
-
(function(global, utils) {
%CheckIsBootstrapping();
@@ -44,6 +35,7 @@ var InternalArray = utils.InternalArray;
var internalErrorSymbol = utils.ImportNow("internal_error_symbol");
var ObjectDefineProperty;
var ObjectToString;
+var Script = utils.ImportNow("Script");
var stackTraceSymbol = utils.ImportNow("stack_trace_symbol");
var StringCharAt;
var StringIndexOf;
@@ -99,26 +91,26 @@ function NoSideEffectToString(obj) {
if (IS_UNDEFINED(obj)) return 'undefined';
if (IS_NULL(obj)) return 'null';
if (IS_FUNCTION(obj)) {
- var str = %_CallFunction(obj, obj, FunctionSourceString);
+ var str = %_Call(FunctionSourceString, obj, obj);
if (str.length > 128) {
str = %_SubString(str, 0, 111) + "...<omitted>..." +
%_SubString(str, str.length - 2, str.length);
}
return str;
}
- if (IS_SYMBOL(obj)) return %_CallFunction(obj, SymbolToString);
+ if (IS_SYMBOL(obj)) return %_Call(SymbolToString, obj);
if (IS_SIMD_VALUE(obj)) {
switch (typeof(obj)) {
- case 'float32x4': return %_CallFunction(obj, Float32x4ToString);
- case 'int32x4': return %_CallFunction(obj, Int32x4ToString);
- case 'int16x8': return %_CallFunction(obj, Int16x8ToString);
- case 'int8x16': return %_CallFunction(obj, Int8x16ToString);
- case 'uint32x4': return %_CallFunction(obj, Uint32x4ToString);
- case 'uint16x8': return %_CallFunction(obj, Uint16x8ToString);
- case 'uint8x16': return %_CallFunction(obj, Uint8x16ToString);
- case 'bool32x4': return %_CallFunction(obj, Bool32x4ToString);
- case 'bool16x8': return %_CallFunction(obj, Bool16x8ToString);
- case 'bool8x16': return %_CallFunction(obj, Bool8x16ToString);
+ case 'float32x4': return %_Call(Float32x4ToString, obj);
+ case 'int32x4': return %_Call(Int32x4ToString, obj);
+ case 'int16x8': return %_Call(Int16x8ToString, obj);
+ case 'int8x16': return %_Call(Int8x16ToString, obj);
+ case 'uint32x4': return %_Call(Uint32x4ToString, obj);
+ case 'uint16x8': return %_Call(Uint16x8ToString, obj);
+ case 'uint8x16': return %_Call(Uint8x16ToString, obj);
+ case 'bool32x4': return %_Call(Bool32x4ToString, obj);
+ case 'bool16x8': return %_Call(Bool16x8ToString, obj);
+ case 'bool8x16': return %_Call(Bool8x16ToString, obj);
}
}
if (IS_OBJECT(obj)
@@ -132,10 +124,10 @@ function NoSideEffectToString(obj) {
}
}
if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
- return %_CallFunction(obj, ErrorToString);
+ return %_Call(ErrorToString, obj);
}
- return %_CallFunction(obj, NoSideEffectsObjectToString);
+ return %_Call(NoSideEffectsObjectToString, obj);
}
// To determine whether we can safely stringify an object using ErrorToString
@@ -166,7 +158,7 @@ function CanBeSafelyTreatedAsAnErrorObject(obj) {
// objects between script tags in a browser setting.
function ToStringCheckErrorObject(obj) {
if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
- return %_CallFunction(obj, ErrorToString);
+ return %_Call(ErrorToString, obj);
} else {
return TO_STRING(obj);
}
@@ -304,7 +296,7 @@ function ScriptLocationFromPosition(position,
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- if (end > 0 && %_CallFunction(this.source, end - 1, StringCharAt) == '\r') {
+ if (end > 0 && %_Call(StringCharAt, this.source, end - 1) == '\r') {
end--;
}
var column = position - start;
@@ -427,7 +419,7 @@ function ScriptSourceLine(opt_line) {
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- return %_CallFunction(this.source, start, end, StringSubstring);
+ return %_Call(StringSubstring, this.source, start, end);
}
@@ -526,10 +518,7 @@ function SourceLocation(script, position, line, column, start, end) {
* Source text for this location.
*/
function SourceLocationSourceText() {
- return %_CallFunction(this.script.source,
- this.start,
- this.end,
- StringSubstring);
+ return %_Call(StringSubstring, this.script.source, this.start, this.end);
}
@@ -571,10 +560,10 @@ function SourceSlice(script, from_line, to_line, from_position, to_position) {
* the line terminating characters (if any)
*/
function SourceSliceSourceText() {
- return %_CallFunction(this.script.source,
- this.from_position,
- this.to_position,
- StringSubstring);
+ return %_Call(StringSubstring,
+ this.script.source,
+ this.from_position,
+ this.to_position);
}
utils.SetUpLockedPrototype(SourceSlice,
@@ -702,13 +691,12 @@ function CallSiteToString() {
var typeName = GetTypeName(GET_PRIVATE(this, callSiteReceiverSymbol), true);
var methodName = this.getMethodName();
if (functionName) {
- if (typeName &&
- %_CallFunction(functionName, typeName, StringIndexOf) != 0) {
+ if (typeName && %_Call(StringIndexOf, functionName, typeName) != 0) {
line += typeName + ".";
}
line += functionName;
if (methodName &&
- (%_CallFunction(functionName, "." + methodName, StringIndexOf) !=
+ (%_Call(StringIndexOf, functionName, "." + methodName) !=
functionName.length - methodName.length - 1)) {
line += " [as " + methodName + "]";
}
@@ -790,7 +778,7 @@ function FormatEvalOrigin(script) {
function FormatErrorString(error) {
try {
- return %_CallFunction(error, ErrorToString);
+ return %_Call(ErrorToString, error);
} catch (e) {
try {
return "<error: " + e + ">";
@@ -856,7 +844,7 @@ function FormatStackTrace(obj, raw_stack) {
}
lines.push(" at " + line);
}
- return %_CallFunction(lines, "\n", ArrayJoin);
+ return %_Call(ArrayJoin, lines, "\n");
}
@@ -865,12 +853,12 @@ function GetTypeName(receiver, requireConstructor) {
var constructor = receiver.constructor;
if (!constructor) {
return requireConstructor ? null :
- %_CallFunction(receiver, NoSideEffectsObjectToString);
+ %_Call(NoSideEffectsObjectToString, receiver);
}
var constructorName = constructor.name;
if (!constructorName) {
return requireConstructor ? null :
- %_CallFunction(receiver, NoSideEffectsObjectToString);
+ %_Call(NoSideEffectsObjectToString, receiver);
}
return constructorName;
}
@@ -972,6 +960,9 @@ GlobalURIError = DefineError(global, function URIError() { });
%AddNamedProperty(GlobalError.prototype, 'message', '', DONT_ENUM);
+utils.InstallFunctions(GlobalError.prototype, DONT_ENUM,
+ ['toString', ErrorToString]);
+
function ErrorToString() {
if (!IS_SPEC_OBJECT(this)) {
throw MakeTypeError(kCalledOnNonObject, "Error.prototype.toString");
@@ -980,37 +971,31 @@ function ErrorToString() {
return %ErrorToStringRT(this);
}
-utils.InstallFunctions(GlobalError.prototype, DONT_ENUM,
- ['toString', ErrorToString]);
-
-$errorToString = ErrorToString;
-
-MakeError = function(type, arg0, arg1, arg2) {
+function MakeError(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalError, type, arg0, arg1, arg2);
}
-MakeRangeError = function(type, arg0, arg1, arg2) {
+function MakeRangeError(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalRangeError, type, arg0, arg1, arg2);
}
-MakeSyntaxError = function(type, arg0, arg1, arg2) {
+function MakeSyntaxError(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalSyntaxError, type, arg0, arg1, arg2);
}
-MakeTypeError = function(type, arg0, arg1, arg2) {
+function MakeTypeError(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalTypeError, type, arg0, arg1, arg2);
}
-MakeURIError = function() {
+function MakeURIError() {
return MakeGenericError(GlobalURIError, kURIMalformed);
}
// Boilerplate for exceptions for stack overflows. Used from
// Isolate::StackOverflow().
var StackOverflowBoilerplate = MakeRangeError(kStackOverflow);
-%DefineAccessorPropertyUnchecked(StackOverflowBoilerplate, 'stack',
- StackTraceGetter, StackTraceSetter,
- DONT_ENUM);
+utils.InstallGetterSetter(StackOverflowBoilerplate, 'stack',
+ StackTraceGetter, StackTraceSetter)
// Define actual captureStackTrace function after everything has been set up.
captureStackTrace = function captureStackTrace(obj, cons_opt) {
@@ -1043,4 +1028,13 @@ GlobalError.captureStackTrace = captureStackTrace;
"uri_error_function", GlobalURIError,
]);
+utils.Export(function(to) {
+ to.ErrorToString = ErrorToString;
+ to.MakeError = MakeError;
+ to.MakeRangeError = MakeRangeError;
+ to.MakeSyntaxError = MakeSyntaxError;
+ to.MakeTypeError = MakeTypeError;
+ to.MakeURIError = MakeURIError;
+});
+
});
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/js/object-observe.js
index 80296586d2..2c297a59c2 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/js/object-observe.js
@@ -2,13 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $observeEnqueueSpliceRecord;
-var $observeBeginPerformSplice;
-var $observeEndPerformSplice;
-
-var $observeObjectMethods;
-var $observeArrayMethods;
-
(function(global, utils) {
"use strict";
@@ -18,14 +11,17 @@ var $observeArrayMethods;
// -------------------------------------------------------------------
// Imports
+var GetHash;
var GlobalArray = global.Array;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
-
+var MakeTypeError;
var ObjectFreeze;
var ObjectIsFrozen;
utils.Import(function(from) {
+ GetHash = from.GetHash;
+ MakeTypeError = from.MakeTypeError;
ObjectFreeze = from.ObjectFreeze;
ObjectIsFrozen = from.ObjectIsFrozen;
});
@@ -207,7 +203,7 @@ function ObjectInfoGetOrCreate(object) {
performingCount: 0,
};
%WeakCollectionSet(GetObservationStateJS().objectInfoMap,
- object, objectInfo, $getHash(object));
+ object, objectInfo, GetHash(object));
}
return objectInfo;
}
@@ -215,13 +211,13 @@ function ObjectInfoGetOrCreate(object) {
function ObjectInfoGet(object) {
return %WeakCollectionGet(GetObservationStateJS().objectInfoMap, object,
- $getHash(object));
+ GetHash(object));
}
function ObjectInfoGetFromNotifier(notifier) {
return %WeakCollectionGet(GetObservationStateJS().notifierObjectInfoMap,
- notifier, $getHash(notifier));
+ notifier, GetHash(notifier));
}
@@ -230,7 +226,7 @@ function ObjectInfoGetNotifier(objectInfo) {
var notifier = { __proto__: notifierPrototype };
objectInfo.notifier = notifier;
%WeakCollectionSet(GetObservationStateJS().notifierObjectInfoMap,
- notifier, objectInfo, $getHash(notifier));
+ notifier, objectInfo, GetHash(notifier));
}
return objectInfo.notifier;
@@ -342,13 +338,13 @@ function ConvertAcceptListToTypeMap(arg) {
// normalizes. When delivery clears any pending change records, it re-optimizes.
function CallbackInfoGet(callback) {
return %WeakCollectionGet(GetObservationStateJS().callbackInfoMap, callback,
- $getHash(callback));
+ GetHash(callback));
}
function CallbackInfoSet(callback, callbackInfo) {
%WeakCollectionSet(GetObservationStateJS().callbackInfoMap,
- callback, callbackInfo, $getHash(callback));
+ callback, callbackInfo, GetHash(callback));
}
@@ -684,13 +680,14 @@ utils.InstallFunctions(notifierPrototype, DONT_ENUM, [
"performChange", ObjectNotifierPerformChange
]);
-$observeObjectMethods = [
+var ObserveObjectMethods = [
"deliverChangeRecords", ObjectDeliverChangeRecords,
"getNotifier", ObjectGetNotifier,
"observe", ObjectObserve,
"unobserve", ObjectUnobserve
];
-$observeArrayMethods = [
+
+var ObserveArrayMethods = [
"observe", ArrayObserve,
"unobserve", ArrayUnobserve
];
@@ -700,12 +697,8 @@ $observeArrayMethods = [
var removePrototypeFn = function(f, i) {
if (i % 2 === 1) %FunctionRemovePrototype(f);
};
-$observeObjectMethods.forEach(removePrototypeFn);
-$observeArrayMethods.forEach(removePrototypeFn);
-
-$observeEnqueueSpliceRecord = EnqueueSpliceRecord;
-$observeBeginPerformSplice = BeginPerformSplice;
-$observeEndPerformSplice = EndPerformSplice;
+ObserveObjectMethods.forEach(removePrototypeFn);
+ObserveArrayMethods.forEach(removePrototypeFn);
%InstallToContext([
"native_object_get_notifier", NativeObjectGetNotifier,
@@ -717,4 +710,12 @@ $observeEndPerformSplice = EndPerformSplice;
"observers_notify_change", NotifyChange,
]);
+utils.Export(function(to) {
+ to.ObserveArrayMethods = ObserveArrayMethods;
+ to.ObserveBeginPerformSplice = BeginPerformSplice;
+ to.ObserveEndPerformSplice = EndPerformSplice;
+ to.ObserveEnqueueSpliceRecord = EnqueueSpliceRecord;
+ to.ObserveObjectMethods = ObserveObjectMethods;
+});
+
})
diff --git a/deps/v8/src/prologue.js b/deps/v8/src/js/prologue.js
index a54de36563..0f605be649 100644
--- a/deps/v8/src/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -14,6 +14,14 @@
var imports = UNDEFINED;
var imports_from_experimental = UNDEFINED;
var exports_container = %ExportFromRuntime({});
+var typed_array_setup = UNDEFINED;
+
+// Register context value to be initialized with a typed array in
+// Genesis::InitializeBuiltinTypedArrays.
+function SetupTypedArray(f) {
+ f.next = typed_array_setup;
+ typed_array_setup = f;
+}
// Export to other scripts.
// In normal natives, this exports functions to other normal natives.
@@ -94,21 +102,20 @@ function InstallFunctions(object, attributes, functions) {
// Helper function to install a getter-only accessor property.
-function InstallGetter(object, name, getter, attributes) {
+function InstallGetter(object, name, getter, attributes, prefix) {
%CheckIsBootstrapping();
- if (typeof attributes == "undefined") {
- attributes = DONT_ENUM;
- }
- SetFunctionName(getter, name, "get");
+ if (IS_UNDEFINED(attributes)) attributes = DONT_ENUM;
+ SetFunctionName(getter, name, IS_UNDEFINED(prefix) ? "get" : prefix);
%FunctionRemovePrototype(getter);
- %DefineAccessorPropertyUnchecked(object, name, getter, null, attributes);
+ %DefineGetterPropertyUnchecked(object, name, getter, attributes);
%SetNativeFlag(getter);
}
// Helper function to install a getter/setter accessor property.
-function InstallGetterSetter(object, name, getter, setter) {
+function InstallGetterSetter(object, name, getter, setter, attributes) {
%CheckIsBootstrapping();
+ if (IS_UNDEFINED(attributes)) attributes = DONT_ENUM;
SetFunctionName(getter, name, "get");
SetFunctionName(setter, name, "set");
%FunctionRemovePrototype(getter);
@@ -163,6 +170,7 @@ function PostNatives(utils) {
// Whitelist of exports from normal natives to experimental natives and debug.
var expose_list = [
"ArrayToString",
+ "ErrorToString",
"FunctionSourceString",
"GetIterator",
"GetMethod",
@@ -180,16 +188,27 @@ function PostNatives(utils) {
"InnerArraySort",
"InnerArrayToLocaleString",
"IsNaN",
+ "MakeError",
+ "MakeTypeError",
+ "MapEntries",
+ "MapIterator",
+ "MapIteratorNext",
"MathMax",
"MathMin",
+ "MaxSimple",
+ "MinSimple",
"ObjectIsFrozen",
"ObjectDefineProperty",
+ "ObserveArrayMethods",
+ "ObserveObjectMethods",
"OwnPropertyKeys",
+ "SameValueZero",
+ "SetIterator",
+ "SetIteratorNext",
+ "SetValues",
"SymbolToString",
"ToNameArray",
- "ToBoolean",
- "ToNumber",
- "ToString",
+ "ToPositiveInteger",
// From runtime:
"is_concat_spreadable_symbol",
"iterator_symbol",
@@ -197,6 +216,7 @@ function PostNatives(utils) {
"promise_value_symbol",
"reflect_apply",
"reflect_construct",
+ "regexp_flags_symbol",
"to_string_tag_symbol",
];
@@ -225,12 +245,15 @@ function PostExperimentals(utils) {
imports_from_experimental(exports_container);
}
- exports_container = UNDEFINED;
+ utils.InitializeRNG();
+ utils.InitializeRNG = UNDEFINED;
+ utils.CreateDoubleResultArray();
+ utils.CreateDoubleResultArray = UNDEFINED;
- utils.PostExperimentals = UNDEFINED;
- utils.PostDebug = UNDEFINED;
- utils.Import = UNDEFINED;
utils.Export = UNDEFINED;
+ utils.PostDebug = UNDEFINED;
+ utils.PostExperimentals = UNDEFINED;
+ typed_array_setup = UNDEFINED;
}
@@ -239,17 +262,34 @@ function PostDebug(utils) {
imports(exports_container);
}
+ utils.InitializeRNG();
+ utils.InitializeRNG = UNDEFINED;
+ utils.CreateDoubleResultArray();
+ utils.CreateDoubleResultArray = UNDEFINED;
+
exports_container = UNDEFINED;
+ utils.Export = UNDEFINED;
+ utils.Import = UNDEFINED;
+ utils.ImportNow = UNDEFINED;
utils.PostDebug = UNDEFINED;
utils.PostExperimentals = UNDEFINED;
- utils.Import = UNDEFINED;
- utils.Export = UNDEFINED;
+ typed_array_setup = UNDEFINED;
+}
+
+
+function InitializeBuiltinTypedArrays(utils, rng_state, rempio2result) {
+ var setup_list = typed_array_setup;
+
+ for ( ; !IS_UNDEFINED(setup_list); setup_list = setup_list.next) {
+ setup_list(rng_state, rempio2result);
+ }
}
+
// -----------------------------------------------------------------------
-%OptimizeObjectForAddingMultipleProperties(utils, 13);
+%OptimizeObjectForAddingMultipleProperties(utils, 15);
utils.Import = Import;
utils.ImportNow = ImportNow;
diff --git a/deps/v8/src/promise.js b/deps/v8/src/js/promise.js
index b509e76e4a..d7e9a5c67f 100644
--- a/deps/v8/src/promise.js
+++ b/deps/v8/src/js/promise.js
@@ -12,6 +12,9 @@
// Imports
var InternalArray = utils.InternalArray;
+var MakeTypeError;
+var promiseCombinedDeferredSymbol =
+ utils.ImportNow("promise_combined_deferred_symbol");
var promiseHasHandlerSymbol =
utils.ImportNow("promise_has_handler_symbol");
var promiseOnRejectSymbol = utils.ImportNow("promise_on_reject_symbol");
@@ -22,6 +25,10 @@ var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
// -------------------------------------------------------------------
// Status values: 0 = pending, +1 = resolved, -1 = rejected
@@ -34,7 +41,7 @@ var GlobalPromise = function Promise(resolver) {
throw MakeTypeError(kResolverNotAFunction, resolver);
var promise = PromiseInit(this);
try {
- %DebugPushPromise(promise, Promise);
+ %DebugPushPromise(promise, Promise, resolver);
resolver(function(x) { PromiseResolve(promise, x) },
function(r) { PromiseReject(promise, r) });
} catch (e) {
@@ -83,10 +90,10 @@ function PromiseCoerce(constructor, x) {
try {
then = x.then;
} catch(r) {
- return %_CallFunction(constructor, r, PromiseRejected);
+ return %_Call(PromiseRejected, constructor, r);
}
if (IS_CALLABLE(then)) {
- var deferred = %_CallFunction(constructor, PromiseDeferred);
+ var deferred = %_Call(PromiseDeferred, constructor);
try {
%_Call(then, x, deferred.resolve, deferred.reject);
} catch(r) {
@@ -100,13 +107,12 @@ function PromiseCoerce(constructor, x) {
function PromiseHandle(value, handler, deferred) {
try {
- %DebugPushPromise(deferred.promise, PromiseHandle);
- DEBUG_PREPARE_STEP_IN_IF_STEPPING(handler);
+ %DebugPushPromise(deferred.promise, PromiseHandle, handler);
var result = handler(value);
if (result === deferred.promise)
throw MakeTypeError(kPromiseCyclic, result);
else if (IsPromise(result))
- %_CallFunction(result, deferred.resolve, deferred.reject, PromiseChain);
+ %_Call(PromiseChain, result, deferred.resolve, deferred.reject);
else
deferred.resolve(result);
} catch (exception) {
@@ -220,7 +226,7 @@ function PromiseRejected(r) {
function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
- var deferred = %_CallFunction(this.constructor, PromiseDeferred);
+ var deferred = %_Call(PromiseDeferred, this.constructor);
switch (GET_PRIVATE(this, promiseStatusSymbol)) {
case UNDEFINED:
throw MakeTypeError(kNotAPromise, this);
@@ -263,7 +269,8 @@ function PromiseThen(onResolve, onReject) {
onReject = IS_CALLABLE(onReject) ? onReject : PromiseIdRejectHandler;
var that = this;
var constructor = this.constructor;
- return %_CallFunction(
+ return %_Call(
+ PromiseChain,
this,
function(x) {
x = PromiseCoerce(constructor, x);
@@ -277,25 +284,28 @@ function PromiseThen(onResolve, onReject) {
return onResolve(x);
}
},
- onReject,
- PromiseChain
+ onReject
);
}
// Combinators.
function PromiseCast(x) {
- // TODO(rossberg): cannot do better until we support @@create.
- return IsPromise(x) ? x : new this(function(resolve) { resolve(x) });
+ if (IsPromise(x) && x.constructor === this) {
+ return x;
+ } else {
+ return new this(function(resolve) { resolve(x) });
+ }
}
function PromiseAll(iterable) {
- var deferred = %_CallFunction(this, PromiseDeferred);
+ var deferred = %_Call(PromiseDeferred, this);
var resolutions = [];
try {
var count = 0;
var i = 0;
for (var value of iterable) {
+ var reject = function(r) { deferred.reject(r) };
this.resolve(value).then(
// Nested scope to get closure over current i.
// TODO(arv): Use an inner let binding once available.
@@ -304,8 +314,8 @@ function PromiseAll(iterable) {
resolutions[i] = x;
if (--count === 0) deferred.resolve(resolutions);
}
- })(i),
- function(r) { deferred.reject(r); });
+ })(i), reject);
+ SET_PRIVATE(reject, promiseCombinedDeferredSymbol, deferred);
++i;
++count;
}
@@ -321,12 +331,12 @@ function PromiseAll(iterable) {
}
function PromiseRace(iterable) {
- var deferred = %_CallFunction(this, PromiseDeferred);
+ var deferred = %_Call(PromiseDeferred, this);
try {
for (var value of iterable) {
- this.resolve(value).then(
- function(x) { deferred.resolve(x) },
- function(r) { deferred.reject(r) });
+ var reject = function(r) { deferred.reject(r) };
+ this.resolve(value).then(function(x) { deferred.resolve(x) }, reject);
+ SET_PRIVATE(reject, promiseCombinedDeferredSymbol, deferred);
}
} catch (e) {
deferred.reject(e)
@@ -341,8 +351,15 @@ function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
var queue = GET_PRIVATE(promise, promiseOnRejectSymbol);
if (IS_UNDEFINED(queue)) return false;
for (var i = 0; i < queue.length; i += 2) {
- if (queue[i] != PromiseIdRejectHandler) return true;
- if (PromiseHasUserDefinedRejectHandlerRecursive(queue[i + 1].promise)) {
+ var handler = queue[i];
+ if (handler !== PromiseIdRejectHandler) {
+ var deferred = GET_PRIVATE(handler, promiseCombinedDeferredSymbol);
+ if (IS_UNDEFINED(deferred)) return true;
+ if (PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise)) {
+ return true;
+ }
+ } else if (PromiseHasUserDefinedRejectHandlerRecursive(
+ queue[i + 1].promise)) {
return true;
}
}
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/js/proxy.js
index cc45b32b3d..fc38680a13 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/js/proxy.js
@@ -13,10 +13,11 @@
var GlobalFunction = global.Function;
var GlobalObject = global.Object;
-
+var MakeTypeError;
var ToNameArray;
utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
ToNameArray = from.ToNameArray;
});
@@ -80,7 +81,7 @@ function DerivedGetTrap(receiver, name) {
} else {
if (IS_UNDEFINED(desc.get)) { return desc.get }
// The proposal says: desc.get.call(receiver)
- return %_CallFunction(receiver, desc.get)
+ return %_Call(desc.get, receiver)
}
}
@@ -98,7 +99,7 @@ function DerivedSetTrap(receiver, name, val) {
} else { // accessor
if (desc.set) {
// The proposal says: desc.set.call(receiver, val)
- %_CallFunction(receiver, val, desc.set)
+ %_Call(desc.set, receiver, val)
return true
} else {
return false
@@ -116,7 +117,7 @@ function DerivedSetTrap(receiver, name, val) {
} else { // accessor
if (desc.set) {
// The proposal says: desc.set.call(receiver, val)
- %_CallFunction(receiver, val, desc.set)
+ %_Call(desc.set, receiver, val)
return true
} else {
return false
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/js/regexp.js
index e19a813483..55466dc03d 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/js/regexp.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $regexpLastMatchInfoOverride;
-
(function(global, utils) {
%CheckIsBootstrapping();
@@ -11,19 +9,20 @@ var $regexpLastMatchInfoOverride;
// -------------------------------------------------------------------
// Imports
-var FLAG_harmony_regexps;
-var FLAG_harmony_unicode_regexps;
+var FLAG_harmony_tolength;
+var GlobalObject = global.Object;
var GlobalRegExp = global.RegExp;
+var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
-var ToNumber;
+var MakeTypeError;
+var splitSymbol = utils.ImportNow("split_symbol");
-utils.Import(function(from) {
- ToNumber = from.ToNumber;
+utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_tolength = from.FLAG_harmony_tolength;
});
-utils.ImportFromExperimental(function(from) {
- FLAG_harmony_regexps = from.FLAG_harmony_regexps;
- FLAG_harmony_unicode_regexps = from.FLAG_harmony_unicode_regexps;
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
});
// -------------------------------------------------------------------
@@ -42,12 +41,6 @@ var RegExpLastMatchInfo = new InternalPackedArray(
0 // REGEXP_FIRST_CAPTURE + 1
);
-// Override last match info with an array of actual substrings.
-// Used internally by replace regexp with function.
-// The array has the format of an "apply" argument for a replacement
-// function.
-$regexpLastMatchInfoOverride = null;
-
// -------------------------------------------------------------------
// A recursive descent parser for Patterns according to the grammar of
@@ -56,14 +49,12 @@ function DoConstructRegExp(object, pattern, flags) {
// RegExp : Called as constructor; see ECMA-262, section 15.10.4.
if (IS_REGEXP(pattern)) {
if (!IS_UNDEFINED(flags)) throw MakeTypeError(kRegExpFlags);
- flags = (pattern.global ? 'g' : '')
- + (pattern.ignoreCase ? 'i' : '')
- + (pattern.multiline ? 'm' : '');
- if (FLAG_harmony_unicode_regexps)
- flags += (pattern.unicode ? 'u' : '');
- if (FLAG_harmony_regexps)
- flags += (pattern.sticky ? 'y' : '');
- pattern = pattern.source;
+ flags = (REGEXP_GLOBAL(pattern) ? 'g' : '')
+ + (REGEXP_IGNORE_CASE(pattern) ? 'i' : '')
+ + (REGEXP_MULTILINE(pattern) ? 'm' : '')
+ + (REGEXP_UNICODE(pattern) ? 'u' : '')
+ + (REGEXP_STICKY(pattern) ? 'y' : '');
+ pattern = REGEXP_SOURCE(pattern);
}
pattern = IS_UNDEFINED(pattern) ? '' : TO_STRING(pattern);
@@ -112,7 +103,6 @@ function RegExpCompileJS(pattern, flags) {
function DoRegExpExec(regexp, string, index) {
var result = %_RegExpExec(regexp, string, index, RegExpLastMatchInfo);
- if (result !== null) $regexpLastMatchInfoOverride = null;
return result;
}
@@ -147,7 +137,8 @@ function RegExpExecNoTests(regexp, string, start) {
// Must be called with RegExp, string and positive integer as arguments.
var matchInfo = %_RegExpExec(regexp, string, start, RegExpLastMatchInfo);
if (matchInfo !== null) {
- $regexpLastMatchInfoOverride = null;
+ // ES6 21.2.5.2.2 step 18.
+ if (REGEXP_STICKY(regexp)) regexp.lastIndex = matchInfo[CAPTURE1];
RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, string);
}
regexp.lastIndex = 0;
@@ -164,11 +155,11 @@ function RegExpExecJS(string) {
string = TO_STRING(string);
var lastIndex = this.lastIndex;
- // Conversion is required by the ES5 specification (RegExp.prototype.exec
- // algorithm, step 5) even if the value is discarded for non-global RegExps.
- var i = TO_INTEGER(lastIndex);
+ // Conversion is required by the ES2015 specification (RegExpBuiltinExec
+ // algorithm, step 4) even if the value is discarded for non-global RegExps.
+ var i = TO_LENGTH_OR_INTEGER(lastIndex);
- var updateLastIndex = this.global || (FLAG_harmony_regexps && this.sticky);
+ var updateLastIndex = REGEXP_GLOBAL(this) || REGEXP_STICKY(this);
if (updateLastIndex) {
if (i < 0 || i > string.length) {
this.lastIndex = 0;
@@ -187,7 +178,6 @@ function RegExpExecJS(string) {
}
// Successful match.
- $regexpLastMatchInfoOverride = null;
if (updateLastIndex) {
this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
}
@@ -212,11 +202,11 @@ function RegExpTest(string) {
var lastIndex = this.lastIndex;
- // Conversion is required by the ES5 specification (RegExp.prototype.exec
- // algorithm, step 5) even if the value is discarded for non-global RegExps.
- var i = TO_INTEGER(lastIndex);
+ // Conversion is required by the ES2015 specification (RegExpBuiltinExec
+ // algorithm, step 4) even if the value is discarded for non-global RegExps.
+ var i = TO_LENGTH_OR_INTEGER(lastIndex);
- if (this.global || (FLAG_harmony_regexps && this.sticky)) {
+ if (REGEXP_GLOBAL(this) || REGEXP_STICKY(this)) {
if (i < 0 || i > string.length) {
this.lastIndex = 0;
return false;
@@ -227,7 +217,6 @@ function RegExpTest(string) {
this.lastIndex = 0;
return false;
}
- $regexpLastMatchInfoOverride = null;
this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
return true;
} else {
@@ -236,10 +225,11 @@ function RegExpTest(string) {
// checks whether this.source starts with '.*' and that the third char is
// not a '?'. But see https://code.google.com/p/v8/issues/detail?id=3560
var regexp = this;
- if (regexp.source.length >= 3 &&
- %_StringCharCodeAt(regexp.source, 0) == 46 && // '.'
- %_StringCharCodeAt(regexp.source, 1) == 42 && // '*'
- %_StringCharCodeAt(regexp.source, 2) != 63) { // '?'
+ var source = REGEXP_SOURCE(regexp);
+ if (regexp.length >= 3 &&
+ %_StringCharCodeAt(regexp, 0) == 46 && // '.'
+ %_StringCharCodeAt(regexp, 1) == 42 && // '*'
+ %_StringCharCodeAt(regexp, 2) != 63) { // '?'
regexp = TrimRegExp(regexp);
}
// matchIndices is either null or the RegExpLastMatchInfo array.
@@ -248,7 +238,6 @@ function RegExpTest(string) {
this.lastIndex = 0;
return false;
}
- $regexpLastMatchInfoOverride = null;
return true;
}
}
@@ -257,9 +246,10 @@ function TrimRegExp(regexp) {
if (!%_ObjectEquals(regexp_key, regexp)) {
regexp_key = regexp;
regexp_val =
- new GlobalRegExp(%_SubString(regexp.source, 2, regexp.source.length),
- (regexp.ignoreCase ? regexp.multiline ? "im" : "i"
- : regexp.multiline ? "m" : ""));
+ new GlobalRegExp(
+ %_SubString(REGEXP_SOURCE(regexp), 2, REGEXP_SOURCE(regexp).length),
+ (REGEXP_IGNORE_CASE(regexp) ? REGEXP_MULTILINE(regexp) ? "im" : "i"
+ : REGEXP_MULTILINE(regexp) ? "m" : ""));
}
return regexp_val;
}
@@ -270,24 +260,91 @@ function RegExpToString() {
throw MakeTypeError(kIncompatibleMethodReceiver,
'RegExp.prototype.toString', this);
}
- var result = '/' + this.source + '/';
- if (this.global) result += 'g';
- if (this.ignoreCase) result += 'i';
- if (this.multiline) result += 'm';
- if (FLAG_harmony_unicode_regexps && this.unicode) result += 'u';
- if (FLAG_harmony_regexps && this.sticky) result += 'y';
+ var result = '/' + REGEXP_SOURCE(this) + '/';
+ if (REGEXP_GLOBAL(this)) result += 'g';
+ if (REGEXP_IGNORE_CASE(this)) result += 'i';
+ if (REGEXP_MULTILINE(this)) result += 'm';
+ if (REGEXP_UNICODE(this)) result += 'u';
+ if (REGEXP_STICKY(this)) result += 'y';
return result;
}
+// ES6 21.2.5.11.
+function RegExpSplit(string, limit) {
+ // TODO(yangguo): allow non-regexp receivers.
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@split", this);
+ }
+ var separator = this;
+ var subject = TO_STRING(string);
+
+ limit = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
+ var length = subject.length;
+
+ if (limit === 0) return [];
+
+ if (length === 0) {
+ if (DoRegExpExec(separator, subject, 0, 0) !== null) return [];
+ return [subject];
+ }
+
+ var currentIndex = 0;
+ var startIndex = 0;
+ var startMatch = 0;
+ var result = new InternalArray();
+
+ outer_loop:
+ while (true) {
+ if (startIndex === length) {
+ result[result.length] = %_SubString(subject, currentIndex, length);
+ break;
+ }
+
+ var matchInfo = DoRegExpExec(separator, subject, startIndex);
+ if (matchInfo === null || length === (startMatch = matchInfo[CAPTURE0])) {
+ result[result.length] = %_SubString(subject, currentIndex, length);
+ break;
+ }
+ var endIndex = matchInfo[CAPTURE1];
+
+ // We ignore a zero-length match at the currentIndex.
+ if (startIndex === endIndex && endIndex === currentIndex) {
+ startIndex++;
+ continue;
+ }
+
+ result[result.length] = %_SubString(subject, currentIndex, startMatch);
+
+ if (result.length === limit) break;
+
+ var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
+ for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
+ var start = matchInfo[i++];
+ var end = matchInfo[i++];
+ if (end != -1) {
+ result[result.length] = %_SubString(subject, start, end);
+ } else {
+ result[result.length] = UNDEFINED;
+ }
+ if (result.length === limit) break outer_loop;
+ }
+
+ startIndex = currentIndex = endIndex;
+ }
+
+ var array_result = [];
+ %MoveArrayContents(result, array_result);
+ return array_result;
+}
+
+
// Getters for the static properties lastMatch, lastParen, leftContext, and
// rightContext of the RegExp constructor. The properties are computed based
// on the captures array of the last successful match and the subject string
// of the last successful match.
function RegExpGetLastMatch() {
- if ($regexpLastMatchInfoOverride !== null) {
- return OVERRIDE_MATCH($regexpLastMatchInfoOverride);
- }
var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
return %_SubString(regExpSubject,
RegExpLastMatchInfo[CAPTURE0],
@@ -296,11 +353,6 @@ function RegExpGetLastMatch() {
function RegExpGetLastParen() {
- if ($regexpLastMatchInfoOverride) {
- var override = $regexpLastMatchInfoOverride;
- if (override.length <= 3) return '';
- return override[override.length - 3];
- }
var length = NUMBER_OF_CAPTURES(RegExpLastMatchInfo);
if (length <= 2) return ''; // There were no captures.
// We match the SpiderMonkey behavior: return the substring defined by the
@@ -319,14 +371,8 @@ function RegExpGetLastParen() {
function RegExpGetLeftContext() {
var start_index;
var subject;
- if (!$regexpLastMatchInfoOverride) {
- start_index = RegExpLastMatchInfo[CAPTURE0];
- subject = LAST_SUBJECT(RegExpLastMatchInfo);
- } else {
- var override = $regexpLastMatchInfoOverride;
- start_index = OVERRIDE_POS(override);
- subject = OVERRIDE_SUBJECT(override);
- }
+ start_index = RegExpLastMatchInfo[CAPTURE0];
+ subject = LAST_SUBJECT(RegExpLastMatchInfo);
return %_SubString(subject, 0, start_index);
}
@@ -334,15 +380,8 @@ function RegExpGetLeftContext() {
function RegExpGetRightContext() {
var start_index;
var subject;
- if (!$regexpLastMatchInfoOverride) {
- start_index = RegExpLastMatchInfo[CAPTURE1];
- subject = LAST_SUBJECT(RegExpLastMatchInfo);
- } else {
- var override = $regexpLastMatchInfoOverride;
- subject = OVERRIDE_SUBJECT(override);
- var match = OVERRIDE_MATCH(override);
- start_index = OVERRIDE_POS(override) + match.length;
- }
+ start_index = RegExpLastMatchInfo[CAPTURE1];
+ subject = LAST_SUBJECT(RegExpLastMatchInfo);
return %_SubString(subject, start_index, subject.length);
}
@@ -352,12 +391,6 @@ function RegExpGetRightContext() {
// called with indices from 1 to 9.
function RegExpMakeCaptureGetter(n) {
return function foo() {
- if ($regexpLastMatchInfoOverride) {
- if (n < $regexpLastMatchInfoOverride.length - 2) {
- return OVERRIDE_CAPTURE($regexpLastMatchInfoOverride, n);
- }
- return '';
- }
var index = n * 2;
if (index >= NUMBER_OF_CAPTURES(RegExpLastMatchInfo)) return '';
var matchStart = RegExpLastMatchInfo[CAPTURE(index)];
@@ -367,9 +400,54 @@ function RegExpMakeCaptureGetter(n) {
};
}
+
+// ES6 21.2.5.4.
+function RegExpGetGlobal() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.global");
+ }
+ return !!REGEXP_GLOBAL(this);
+}
+%FunctionSetName(RegExpGetGlobal, "RegExp.prototype.global");
+%SetNativeFlag(RegExpGetGlobal);
+
+
+// ES6 21.2.5.5.
+function RegExpGetIgnoreCase() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.ignoreCase");
+ }
+ return !!REGEXP_IGNORE_CASE(this);
+}
+%FunctionSetName(RegExpGetIgnoreCase, "RegExp.prototype.ignoreCase");
+%SetNativeFlag(RegExpGetIgnoreCase);
+
+
+// ES6 21.2.5.7.
+function RegExpGetMultiline() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.multiline");
+ }
+ return !!REGEXP_MULTILINE(this);
+}
+%FunctionSetName(RegExpGetMultiline, "RegExp.prototype.multiline");
+%SetNativeFlag(RegExpGetMultiline);
+
+
+// ES6 21.2.5.10.
+function RegExpGetSource() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.source");
+ }
+ return REGEXP_SOURCE(this);
+}
+%FunctionSetName(RegExpGetSource, "RegExp.prototype.source");
+%SetNativeFlag(RegExpGetSource);
+
// -------------------------------------------------------------------
%FunctionSetInstanceClassName(GlobalRegExp, 'RegExp');
+%FunctionSetPrototype(GlobalRegExp, new GlobalObject());
%AddNamedProperty(
GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
%SetCode(GlobalRegExp, RegExpConstructor);
@@ -378,9 +456,15 @@ utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
"exec", RegExpExecJS,
"test", RegExpTest,
"toString", RegExpToString,
- "compile", RegExpCompileJS
+ "compile", RegExpCompileJS,
+ splitSymbol, RegExpSplit,
]);
+utils.InstallGetter(GlobalRegExp.prototype, 'global', RegExpGetGlobal);
+utils.InstallGetter(GlobalRegExp.prototype, 'ignoreCase', RegExpGetIgnoreCase);
+utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
+utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
+
// The length of compile is 1 in SpiderMonkey.
%FunctionSetLength(GlobalRegExp.prototype.compile, 1);
@@ -396,57 +480,36 @@ var RegExpSetInput = function(string) {
};
%OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'input', RegExpGetInput,
- RegExpSetInput, DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$_', RegExpGetInput,
- RegExpSetInput, DONT_ENUM | DONT_DELETE);
-
-// The properties multiline and $* are aliases for each other. When this
-// value is set in SpiderMonkey, the value it is set to is coerced to a
-// boolean. We mimic that behavior with a slight difference: in SpiderMonkey
-// the value of the expression 'RegExp.multiline = null' (for instance) is the
-// boolean false (i.e., the value after coercion), while in V8 it is the value
-// null (i.e., the value before coercion).
-
-// Getter and setter for multiline.
-var multiline = false;
-var RegExpGetMultiline = function() { return multiline; };
-var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; };
-
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'multiline', RegExpGetMultiline,
- RegExpSetMultiline, DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$*', RegExpGetMultiline,
- RegExpSetMultiline,
- DONT_ENUM | DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, 'input', RegExpGetInput, RegExpSetInput,
+ DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, '$_', RegExpGetInput, RegExpSetInput,
+ DONT_ENUM | DONT_DELETE);
var NoOpSetter = function(ignored) {};
// Static properties set by a successful match.
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'lastMatch', RegExpGetLastMatch,
- NoOpSetter, DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$&', RegExpGetLastMatch,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'lastParen', RegExpGetLastParen,
- NoOpSetter, DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$+', RegExpGetLastParen,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'leftContext',
- RegExpGetLeftContext, NoOpSetter,
- DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$`', RegExpGetLeftContext,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'rightContext',
- RegExpGetRightContext, NoOpSetter,
- DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, "$'", RegExpGetRightContext,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, 'lastMatch', RegExpGetLastMatch,
+ NoOpSetter, DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, '$&', RegExpGetLastMatch, NoOpSetter,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, 'lastParen', RegExpGetLastParen,
+ NoOpSetter, DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, '$+', RegExpGetLastParen, NoOpSetter,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, 'leftContext', RegExpGetLeftContext,
+ NoOpSetter, DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, '$`', RegExpGetLeftContext, NoOpSetter,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, 'rightContext', RegExpGetRightContext,
+ NoOpSetter, DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, "$'", RegExpGetRightContext, NoOpSetter,
+ DONT_ENUM | DONT_DELETE);
for (var i = 1; i < 10; ++i) {
- %DefineAccessorPropertyUnchecked(GlobalRegExp, '$' + i,
- RegExpMakeCaptureGetter(i), NoOpSetter,
- DONT_DELETE);
+ utils.InstallGetterSetter(GlobalRegExp, '$' + i, RegExpMakeCaptureGetter(i),
+ NoOpSetter, DONT_DELETE);
}
%ToFastProperties(GlobalRegExp);
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/js/runtime.js
index 0e82d862bd..b5e23671f4 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/js/runtime.js
@@ -11,14 +11,6 @@
// The following declarations are shared with other native JS files.
// They are all declared at this one spot to avoid redeclaration errors.
-var $NaN;
-var $nonNumberToNumber;
-var $sameValue;
-var $sameValueZero;
-var $toNumber;
-var $toPositiveInteger;
-
-var harmony_tolength = false;
(function(global, utils) {
@@ -29,6 +21,11 @@ var GlobalBoolean = global.Boolean;
var GlobalString = global.String;
var isConcatSpreadableSymbol =
utils.ImportNow("is_concat_spreadable_symbol");
+var MakeRangeError;
+
+utils.Import(function(from) {
+ MakeRangeError = from.MakeRangeError;
+});
// ----------------------------------------------------------------------------
@@ -168,52 +165,6 @@ function CONCAT_ITERABLE_TO_ARRAY(iterable) {
-------------------------------------
*/
-// ECMA-262, section 9.2, page 30
-function ToBoolean(x) {
- if (IS_BOOLEAN(x)) return x;
- if (IS_STRING(x)) return x.length != 0;
- if (x == null) return false;
- if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
- return true;
-}
-
-
-// ECMA-262, section 9.3, page 31.
-function ToNumber(x) {
- if (IS_NUMBER(x)) return x;
- if (IS_STRING(x)) {
- return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
- : %StringToNumber(x);
- }
- if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return NAN;
- // Types that can't be converted to number are caught in DefaultNumber.
- return (IS_NULL(x)) ? 0 : ToNumber(DefaultNumber(x));
-}
-
-function NonNumberToNumber(x) {
- if (IS_STRING(x)) {
- return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
- : %StringToNumber(x);
- }
- if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return NAN;
- // Types that can't be converted to number are caught in DefaultNumber.
- return (IS_NULL(x)) ? 0 : ToNumber(DefaultNumber(x));
-}
-
-
-// ECMA-262, section 9.8, page 35.
-function ToString(x) {
- if (IS_STRING(x)) return x;
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- if (IS_UNDEFINED(x)) return 'undefined';
- // Types that can't be converted to string are caught in DefaultString.
- return (IS_NULL(x)) ? 'null' : ToString(DefaultString(x));
-}
-
-
// ES5, section 9.12
function SameValue(x, y) {
if (typeof x != typeof y) return false;
@@ -243,7 +194,7 @@ function SameValueZero(x, y) {
function ConcatIterableToArray(target, iterable) {
var index = target.length;
for (var element of iterable) {
- %AddElement(target, index++, element);
+ AddIndexedProperty(target, index++, element);
}
return target;
}
@@ -254,14 +205,18 @@ function ConcatIterableToArray(target, iterable) {
---------------------------------
*/
-// Returns if the given x is a primitive value - not an object or a
-// function.
-function IsPrimitive(x) {
- // Even though the type of null is "object", null is still
- // considered a primitive value. IS_SPEC_OBJECT handles this correctly
- // (i.e., it will return false if x is null).
- return !IS_SPEC_OBJECT(x);
+
+// This function should be called rather than %AddElement in contexts where the
+// argument might not be less than 2**32-1. ES2015 ToLength semantics mean that
+// this is a concern at basically all callsites.
+function AddIndexedProperty(obj, index, value) {
+ if (index === TO_UINT32(index) && index !== kMaxUint32) {
+ %AddElement(obj, index, value);
+ } else {
+ %AddNamedProperty(obj, TO_STRING(index), value, NONE);
+ }
}
+%SetForceInlineFlag(AddIndexedProperty);
// ES6, draft 10-14-14, section 22.1.3.1.1
@@ -269,52 +224,30 @@ function IsConcatSpreadable(O) {
if (!IS_SPEC_OBJECT(O)) return false;
var spreadable = O[isConcatSpreadableSymbol];
if (IS_UNDEFINED(spreadable)) return IS_ARRAY(O);
- return ToBoolean(spreadable);
+ return TO_BOOLEAN(spreadable);
}
-// ECMA-262, section 8.6.2.6, page 28.
-function DefaultNumber(x) {
- var valueOf = x.valueOf;
- if (IS_CALLABLE(valueOf)) {
- var v = %_Call(valueOf, x);
- if (IS_SYMBOL(v)) throw MakeTypeError(kSymbolToNumber);
- if (IS_SIMD_VALUE(x)) throw MakeTypeError(kSimdToNumber);
- if (IsPrimitive(v)) return v;
- }
- var toString = x.toString;
- if (IS_CALLABLE(toString)) {
- var s = %_Call(toString, x);
- if (IsPrimitive(s)) return s;
- }
- throw MakeTypeError(kCannotConvertToPrimitive);
+function ToPositiveInteger(x, rangeErrorIndex) {
+ var i = TO_INTEGER_MAP_MINUS_ZERO(x);
+ if (i < 0) throw MakeRangeError(rangeErrorIndex);
+ return i;
}
-// ECMA-262, section 8.6.2.6, page 28.
-function DefaultString(x) {
- if (!IS_SYMBOL_WRAPPER(x)) {
- if (IS_SYMBOL(x)) throw MakeTypeError(kSymbolToString);
- var toString = x.toString;
- if (IS_CALLABLE(toString)) {
- var s = %_Call(toString, x);
- if (IsPrimitive(s)) return s;
- }
- var valueOf = x.valueOf;
- if (IS_CALLABLE(valueOf)) {
- var v = %_Call(valueOf, x);
- if (IsPrimitive(v)) return v;
- }
- }
- throw MakeTypeError(kCannotConvertToPrimitive);
+function MaxSimple(a, b) {
+ return a > b ? a : b;
}
-function ToPositiveInteger(x, rangeErrorIndex) {
- var i = TO_INTEGER_MAP_MINUS_ZERO(x);
- if (i < 0) throw MakeRangeError(rangeErrorIndex);
- return i;
+
+function MinSimple(a, b) {
+ return a > b ? b : a;
}
+
+%SetForceInlineFlag(MaxSimple);
+%SetForceInlineFlag(MinSimple);
+
//----------------------------------------------------------------------------
// NOTE: Setting the prototype for Array must take place as early as
@@ -327,12 +260,14 @@ function ToPositiveInteger(x, rangeErrorIndex) {
// ----------------------------------------------------------------------------
// Exports
-$NaN = %GetRootNaN();
-$nonNumberToNumber = NonNumberToNumber;
-$sameValue = SameValue;
-$sameValueZero = SameValueZero;
-$toNumber = ToNumber;
-$toPositiveInteger = ToPositiveInteger;
+utils.Export(function(to) {
+ to.AddIndexedProperty = AddIndexedProperty;
+ to.MaxSimple = MaxSimple;
+ to.MinSimple = MinSimple;
+ to.SameValue = SameValue;
+ to.SameValueZero = SameValueZero;
+ to.ToPositiveInteger = ToPositiveInteger;
+});
%InstallToContext([
"apply_prepare_builtin", APPLY_PREPARE,
@@ -343,14 +278,6 @@ $toPositiveInteger = ToPositiveInteger;
%InstallToContext([
"concat_iterable_to_array", ConcatIterableToArray,
- "non_number_to_number", NonNumberToNumber,
- "to_number_fun", ToNumber,
]);
-utils.Export(function(to) {
- to.ToBoolean = ToBoolean;
- to.ToNumber = ToNumber;
- to.ToString = ToString;
-});
-
})
diff --git a/deps/v8/src/harmony-spread.js b/deps/v8/src/js/spread.js
index b271c7efe5..235c91ab79 100644
--- a/deps/v8/src/harmony-spread.js
+++ b/deps/v8/src/js/spread.js
@@ -9,6 +9,11 @@
// -------------------------------------------------------------------
// Imports
var InternalArray = utils.InternalArray;
+var MakeTypeError;
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
// -------------------------------------------------------------------
diff --git a/deps/v8/src/string-iterator.js b/deps/v8/src/js/string-iterator.js
index 660dc7c98b..ece207cd74 100644
--- a/deps/v8/src/string-iterator.js
+++ b/deps/v8/src/js/string-iterator.js
@@ -12,13 +12,19 @@
// Imports
var GlobalString = global.String;
+var IteratorPrototype = utils.ImportNow("IteratorPrototype");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var MakeTypeError;
var stringIteratorIteratedStringSymbol =
utils.ImportNow("string_iterator_iterated_string_symbol");
var stringIteratorNextIndexSymbol =
utils.ImportNow("string_iterator_next_index_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
// -------------------------------------------------------------------
function StringIterator() {}
@@ -80,7 +86,7 @@ function StringPrototypeIterator() {
//-------------------------------------------------------------------
-%FunctionSetPrototype(StringIterator, {__proto__: $iteratorPrototype});
+%FunctionSetPrototype(StringIterator, {__proto__: IteratorPrototype});
%FunctionSetInstanceClassName(StringIterator, 'String Iterator');
utils.InstallFunctions(StringIterator.prototype, DONT_ENUM, [
diff --git a/deps/v8/src/string.js b/deps/v8/src/js/string.js
index bd20226757..8ff5b3c008 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/js/string.js
@@ -15,18 +15,21 @@ var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
+var MakeRangeError;
+var MakeTypeError;
var RegExpExec;
var RegExpExecNoTests;
var RegExpLastMatchInfo;
-var ToNumber;
+var splitSymbol = utils.ImportNow("split_symbol");
utils.Import(function(from) {
ArrayIndexOf = from.ArrayIndexOf;
ArrayJoin = from.ArrayJoin;
+ MakeRangeError = from.MakeRangeError;
+ MakeTypeError = from.MakeTypeError;
RegExpExec = from.RegExpExec;
RegExpExecNoTests = from.RegExpExecNoTests;
RegExpLastMatchInfo = from.RegExpLastMatchInfo;
- ToNumber = from.ToNumber;
});
//-------------------------------------------------------------------
@@ -118,7 +121,7 @@ function StringLastIndexOfJS(pat /* position */) { // length == 1
var patLength = pat.length;
var index = subLength - patLength;
if (%_ArgumentsLength() > 1) {
- var position = ToNumber(%_Arguments(1));
+ var position = TO_NUMBER(%_Arguments(1));
if (!NUMBER_IS_NAN(position)) {
position = TO_INTEGER(position);
if (position < 0) {
@@ -153,12 +156,8 @@ function StringMatchJS(regexp) {
var subject = TO_STRING(this);
if (IS_REGEXP(regexp)) {
- // Emulate RegExp.prototype.exec's side effect in step 5, even though
- // value is discarded.
- var lastIndex = TO_INTEGER(regexp.lastIndex);
- if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
+ if (!REGEXP_GLOBAL(regexp)) return RegExpExecNoTests(regexp, subject, 0);
var result = %StringMatch(subject, regexp, RegExpLastMatchInfo);
- if (result !== null) $regexpLastMatchInfoOverride = null;
regexp.lastIndex = 0;
return result;
}
@@ -181,11 +180,10 @@ function StringNormalizeJS() {
var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
- var normalizationForm =
- %_CallFunction(NORMALIZATION_FORMS, form, ArrayIndexOf);
+ var normalizationForm = %_Call(ArrayIndexOf, NORMALIZATION_FORMS, form);
if (normalizationForm === -1) {
throw MakeRangeError(kNormalizationForm,
- %_CallFunction(NORMALIZATION_FORMS, ', ', ArrayJoin));
+ %_Call(ArrayJoin, NORMALIZATION_FORMS, ', '));
}
return s;
@@ -224,14 +222,10 @@ function StringReplace(search, replace) {
// ...... string replace (with $-expansion)
if (IS_REGEXP(search)) {
- // Emulate RegExp.prototype.exec's side effect in step 5, even if
- // value is discarded.
- var lastIndex = TO_INTEGER(search.lastIndex);
-
if (!IS_CALLABLE(replace)) {
replace = TO_STRING(replace);
- if (!search.global) {
+ if (!REGEXP_GLOBAL(search)) {
// Non-global regexp search, string replace.
var match = RegExpExec(search, subject, 0);
if (match == null) {
@@ -249,27 +243,11 @@ function StringReplace(search, replace) {
// Global regexp search, string replace.
search.lastIndex = 0;
- if ($regexpLastMatchInfoOverride == null) {
- return %StringReplaceGlobalRegExpWithString(
- subject, search, replace, RegExpLastMatchInfo);
- } else {
- // We use this hack to detect whether StringReplaceRegExpWithString
- // found at least one hit. In that case we need to remove any
- // override.
- var saved_subject = RegExpLastMatchInfo[LAST_SUBJECT_INDEX];
- RegExpLastMatchInfo[LAST_SUBJECT_INDEX] = 0;
- var answer = %StringReplaceGlobalRegExpWithString(
- subject, search, replace, RegExpLastMatchInfo);
- if (%_IsSmi(RegExpLastMatchInfo[LAST_SUBJECT_INDEX])) {
- RegExpLastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
- } else {
- $regexpLastMatchInfoOverride = null;
- }
- return answer;
- }
+ return %StringReplaceGlobalRegExpWithString(
+ subject, search, replace, RegExpLastMatchInfo);
}
- if (search.global) {
+ if (REGEXP_GLOBAL(search)) {
// Global regexp search, function replace.
return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
}
@@ -443,22 +421,16 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
// input string and some replacements that were returned from the replace
// function.
var match_start = 0;
- var override = new InternalPackedArray(null, 0, subject);
for (var i = 0; i < len; i++) {
var elem = res[i];
if (%_IsSmi(elem)) {
- // Integers represent slices of the original string. Use these to
- // get the offsets we need for the override array (so things like
- // RegExp.leftContext work during the callback function.
+ // Integers represent slices of the original string.
if (elem > 0) {
match_start = (elem >> 11) + (elem & 0x7ff);
} else {
match_start = res[++i] - elem;
}
} else {
- override[0] = elem;
- override[1] = match_start;
- $regexpLastMatchInfoOverride = override;
var func_result = replace(elem, match_start, subject);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
@@ -472,7 +444,6 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
if (!%_IsSmi(elem)) {
// elem must be an Array.
// Use the apply argument as backing for global RegExp properties.
- $regexpLastMatchInfoOverride = elem;
var func_result = %Apply(replace, UNDEFINED, elem, 0, elem.length);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
@@ -480,7 +451,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
}
}
}
- var result = %StringBuilderConcat(res, res.length, subject);
+ var result = %StringBuilderConcat(res, len, subject);
resultArray.length = 0;
reusableReplaceArray = resultArray;
return result;
@@ -584,95 +555,38 @@ function StringSlice(start, end) {
}
-// ECMA-262 section 15.5.4.14
+// ES6 21.1.3.17.
function StringSplitJS(separator, limit) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.split");
- var subject = TO_STRING(this);
- limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
-
- var length = subject.length;
- if (!IS_REGEXP(separator)) {
- var separator_string = TO_STRING(separator);
-
- if (limit === 0) return [];
-
- // ECMA-262 says that if separator is undefined, the result should
- // be an array of size 1 containing the entire string.
- if (IS_UNDEFINED(separator)) return [subject];
-
- var separator_length = separator_string.length;
-
- // If the separator string is empty then return the elements in the subject.
- if (separator_length === 0) return %StringToArray(subject, limit);
-
- var result = %StringSplit(subject, separator_string, limit);
-
- return result;
- }
-
- if (limit === 0) return [];
-
- // Separator is a regular expression.
- return StringSplitOnRegExp(subject, separator, limit, length);
-}
-
-
-function StringSplitOnRegExp(subject, separator, limit, length) {
- if (length === 0) {
- if (RegExpExec(separator, subject, 0, 0) != null) {
- return [];
+ if (!IS_NULL_OR_UNDEFINED(separator)) {
+ var splitter = separator[splitSymbol];
+ if (!IS_UNDEFINED(splitter)) {
+ if (!IS_CALLABLE(splitter)) {
+ throw MakeTypeError(kCalledNonCallable, splitter);
+ }
+ return %_Call(splitter, separator, this, limit);
}
- return [subject];
}
- var currentIndex = 0;
- var startIndex = 0;
- var startMatch = 0;
- var result = new InternalArray();
-
- outer_loop:
- while (true) {
-
- if (startIndex === length) {
- result[result.length] = %_SubString(subject, currentIndex, length);
- break;
- }
+ var subject = TO_STRING(this);
+ limit = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
- var matchInfo = RegExpExec(separator, subject, startIndex);
- if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
- result[result.length] = %_SubString(subject, currentIndex, length);
- break;
- }
- var endIndex = matchInfo[CAPTURE1];
+ var length = subject.length;
+ var separator_string = TO_STRING(separator);
- // We ignore a zero-length match at the currentIndex.
- if (startIndex === endIndex && endIndex === currentIndex) {
- startIndex++;
- continue;
- }
+ if (limit === 0) return [];
- result[result.length] = %_SubString(subject, currentIndex, startMatch);
+ // ECMA-262 says that if separator is undefined, the result should
+ // be an array of size 1 containing the entire string.
+ if (IS_UNDEFINED(separator)) return [subject];
- if (result.length === limit) break;
+ var separator_length = separator_string.length;
- var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
- for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
- var start = matchInfo[i++];
- var end = matchInfo[i++];
- if (end != -1) {
- result[result.length] = %_SubString(subject, start, end);
- } else {
- result[result.length] = UNDEFINED;
- }
- if (result.length === limit) break outer_loop;
- }
+ // If the separator string is empty then return the elements in the subject.
+ if (separator_length === 0) return %StringToArray(subject, limit);
- startIndex = currentIndex = endIndex;
- }
- var array_result = [];
- %MoveArrayContents(result, array_result);
- return array_result;
+ return %StringSplit(subject, separator_string, limit);
}
@@ -803,17 +717,12 @@ function StringTrimRight() {
// ECMA-262, section 15.5.3.2
function StringFromCharCode(code) {
var n = %_ArgumentsLength();
- if (n == 1) {
- if (!%_IsSmi(code)) code = ToNumber(code);
- return %_StringCharFromCode(code & 0xffff);
- }
+ if (n == 1) return %_StringCharFromCode(code & 0xffff);
var one_byte = %NewString(n, NEW_ONE_BYTE_STRING);
var i;
for (i = 0; i < n; i++) {
- var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
- if (code < 0) code = code & 0xffff;
+ code = %_Arguments(i) & 0xffff;
if (code > 0xff) break;
%_OneByteSeqStringSetChar(i, code, one_byte);
}
@@ -821,9 +730,10 @@ function StringFromCharCode(code) {
one_byte = %TruncateString(one_byte, i);
var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
- for (var j = 0; i < n; i++, j++) {
- var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+ %_TwoByteSeqStringSetChar(0, code, two_byte);
+ i++;
+ for (var j = 1; i < n; i++, j++) {
+ code = %_Arguments(i) & 0xffff;
%_TwoByteSeqStringSetChar(j, code, two_byte);
}
return one_byte + two_byte;
@@ -832,7 +742,7 @@ function StringFromCharCode(code) {
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
function HtmlEscape(str) {
- return %_CallFunction(TO_STRING(str), /"/g, "&quot;", StringReplace);
+ return %_Call(StringReplace, TO_STRING(str), /"/g, "&quot;");
}
@@ -1087,7 +997,7 @@ function StringFromCodePoint(_) { // length = 1
for (index = 0; index < length; index++) {
code = %_Arguments(index);
if (!%_IsSmi(code)) {
- code = ToNumber(code);
+ code = TO_NUMBER(code);
}
if (code < 0 || code > 0x10FFFF || code !== TO_INTEGER(code)) {
throw MakeRangeError(kInvalidCodePoint, code);
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/js/symbol.js
index 1596169685..62ef0dd216 100644
--- a/deps/v8/src/symbol.js
+++ b/deps/v8/src/js/symbol.js
@@ -16,14 +16,15 @@ var GlobalSymbol = global.Symbol;
var hasInstanceSymbol = utils.ImportNow("has_instance_symbol");
var isConcatSpreadableSymbol =
utils.ImportNow("is_concat_spreadable_symbol");
-var isRegExpSymbol = utils.ImportNow("is_regexp_symbol");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var MakeTypeError;
var ObjectGetOwnPropertyKeys;
var toPrimitiveSymbol = utils.ImportNow("to_primitive_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
ObjectGetOwnPropertyKeys = from.ObjectGetOwnPropertyKeys;
});
@@ -92,8 +93,12 @@ utils.InstallConstants(GlobalSymbol, [
// TODO(rossberg): expose when implemented.
// "hasInstance", hasInstanceSymbol,
// "isConcatSpreadable", isConcatSpreadableSymbol,
- // "isRegExp", isRegExpSymbol,
"iterator", iteratorSymbol,
+ // TODO(yangguo): expose when implemented.
+ // "match", matchSymbol,
+ // "replace", replaceSymbol,
+ // "search", searchSymbol,
+ // "split, splitSymbol,
"toPrimitive", toPrimitiveSymbol,
// TODO(dslomov, caitp): Currently defined in harmony-tostring.js ---
// Move here when shipping
diff --git a/deps/v8/src/templates.js b/deps/v8/src/js/templates.js
index b273bc39e8..eee6b7d7e7 100644
--- a/deps/v8/src/templates.js
+++ b/deps/v8/src/js/templates.js
@@ -38,7 +38,7 @@ function SameCallSiteElements(rawStrings, other) {
function GetCachedCallSite(siteObj, hash) {
- var obj = %_CallFunction(callSiteCache, hash, mapGetFn);
+ var obj = %_Call(mapGetFn, callSiteCache, hash);
if (IS_UNDEFINED(obj)) return;
@@ -50,13 +50,13 @@ function GetCachedCallSite(siteObj, hash) {
function SetCachedCallSite(siteObj, hash) {
- var obj = %_CallFunction(callSiteCache, hash, mapGetFn);
+ var obj = %_Call(mapGetFn, callSiteCache, hash);
var array;
if (IS_UNDEFINED(obj)) {
array = new InternalArray(1);
array[0] = siteObj;
- %_CallFunction(callSiteCache, hash, array, mapSetFn);
+ %_Call(mapSetFn, callSiteCache, hash, array);
} else {
obj.push(siteObj);
}
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/js/typedarray.js
index b45d304514..db8dabe867 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -11,10 +11,37 @@
// -------------------------------------------------------------------
// Imports
+var ArrayFrom;
+var ArrayToString;
+var ArrayValues;
var GlobalArray = global.Array;
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalDataView = global.DataView;
var GlobalObject = global.Object;
+var InnerArrayCopyWithin;
+var InnerArrayEvery;
+var InnerArrayFill;
+var InnerArrayFilter;
+var InnerArrayFind;
+var InnerArrayFindIndex;
+var InnerArrayForEach;
+var InnerArrayIndexOf;
+var InnerArrayJoin;
+var InnerArrayLastIndexOf;
+var InnerArrayMap;
+var InnerArrayReduce;
+var InnerArrayReduceRight;
+var InnerArraySome;
+var InnerArraySort;
+var InnerArrayToLocaleString;
+var InternalArray = utils.InternalArray;
+var IsNaN;
+var MakeRangeError;
+var MakeTypeError;
+var MaxSimple;
+var MinSimple;
+var PackedArrayReverse;
+var ToPositiveInteger;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
@@ -37,24 +64,47 @@ endmacro
TYPED_ARRAYS(DECLARE_GLOBALS)
-var ToNumber;
-
utils.Import(function(from) {
- ToNumber = from.ToNumber;
+ ArrayFrom = from.ArrayFrom;
+ ArrayToString = from.ArrayToString;
+ ArrayValues = from.ArrayValues;
+ InnerArrayCopyWithin = from.InnerArrayCopyWithin;
+ InnerArrayEvery = from.InnerArrayEvery;
+ InnerArrayFill = from.InnerArrayFill;
+ InnerArrayFilter = from.InnerArrayFilter;
+ InnerArrayFind = from.InnerArrayFind;
+ InnerArrayFindIndex = from.InnerArrayFindIndex;
+ InnerArrayForEach = from.InnerArrayForEach;
+ InnerArrayIndexOf = from.InnerArrayIndexOf;
+ InnerArrayJoin = from.InnerArrayJoin;
+ InnerArrayLastIndexOf = from.InnerArrayLastIndexOf;
+ InnerArrayMap = from.InnerArrayMap;
+ InnerArrayReduce = from.InnerArrayReduce;
+ InnerArrayReduceRight = from.InnerArrayReduceRight;
+ InnerArraySome = from.InnerArraySome;
+ InnerArraySort = from.InnerArraySort;
+ InnerArrayToLocaleString = from.InnerArrayToLocaleString;
+ IsNaN = from.IsNaN;
+ MakeRangeError = from.MakeRangeError;
+ MakeTypeError = from.MakeTypeError;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
+ MinSimple = from.MinSimple;
+ PackedArrayReverse = from.PackedArrayReverse;
+ ToPositiveInteger = from.ToPositiveInteger;
});
-var InternalArray = utils.InternalArray;
-
// --------------- Typed Arrays ---------------------
macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
if (!IS_UNDEFINED(byteOffset)) {
- byteOffset =
- $toPositiveInteger(byteOffset, kInvalidTypedArrayLength);
+ byteOffset = ToPositiveInteger(byteOffset, kInvalidTypedArrayLength);
}
if (!IS_UNDEFINED(length)) {
- length = $toPositiveInteger(length, kInvalidTypedArrayLength);
+ length = ToPositiveInteger(length, kInvalidTypedArrayLength);
}
var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
@@ -95,7 +145,7 @@ function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
function NAMEConstructByLength(obj, length) {
var l = IS_UNDEFINED(length) ?
- 0 : $toPositiveInteger(length, kInvalidTypedArrayLength);
+ 0 : ToPositiveInteger(length, kInvalidTypedArrayLength);
if (l > %_MaxSmi()) {
throw MakeRangeError(kInvalidTypedArrayLength);
}
@@ -110,7 +160,7 @@ function NAMEConstructByLength(obj, length) {
function NAMEConstructByArrayLike(obj, arrayLike) {
var length = arrayLike.length;
- var l = $toPositiveInteger(length, kInvalidTypedArrayLength);
+ var l = ToPositiveInteger(length, kInvalidTypedArrayLength);
if (l > %_MaxSmi()) {
throw MakeRangeError(kInvalidTypedArrayLength);
@@ -161,7 +211,7 @@ function NAMEConstructor(arg1, arg2, arg3) {
NAMEConstructByLength(this, arg1);
} else {
var iteratorFn = arg1[iteratorSymbol];
- if (IS_UNDEFINED(iteratorFn) || iteratorFn === $arrayValues) {
+ if (IS_UNDEFINED(iteratorFn) || iteratorFn === ArrayValues) {
NAMEConstructByArrayLike(this, arg1);
} else {
NAMEConstructByIterable(this, arg1, iteratorFn);
@@ -214,15 +264,15 @@ function NAMESubArray(begin, end) {
}
if (beginInt < 0) {
- beginInt = MAX_SIMPLE(0, srcLength + beginInt);
+ beginInt = MaxSimple(0, srcLength + beginInt);
} else {
- beginInt = MIN_SIMPLE(beginInt, srcLength);
+ beginInt = MinSimple(beginInt, srcLength);
}
if (endInt < 0) {
- endInt = MAX_SIMPLE(0, srcLength + endInt);
+ endInt = MaxSimple(0, srcLength + endInt);
} else {
- endInt = MIN_SIMPLE(endInt, srcLength);
+ endInt = MinSimple(endInt, srcLength);
}
if (endInt < beginInt) {
@@ -350,14 +400,327 @@ function TypedArrayGetToStringTag() {
return name;
}
+
+function ConstructTypedArray(constructor, arg) {
+ // TODO(littledan): This is an approximation of the spec, which requires
+ // that only real TypedArray classes should be accepted (22.2.2.1.1)
+ if (!%IsConstructor(constructor) || IS_UNDEFINED(constructor.prototype) ||
+ !%HasOwnProperty(constructor.prototype, "BYTES_PER_ELEMENT")) {
+ throw MakeTypeError(kNotTypedArray);
+ }
+
+ // TODO(littledan): The spec requires that, rather than directly calling
+ // the constructor, a TypedArray is created with the proper proto and
+ // underlying size and element size, and elements are put in one by one.
+ // By contrast, this would allow subclasses to make a radically different
+ // constructor with different semantics.
+ return new constructor(arg);
+}
+
+
+function ConstructTypedArrayLike(typedArray, arg) {
+ // TODO(littledan): The spec requires that we actuallly use
+ // typedArray.constructor[Symbol.species] (bug v8:4093)
+ // Also, it should default to the default constructor from
+ // table 49 if typedArray.constructor doesn't exist.
+ return ConstructTypedArray(typedArray.constructor, arg);
+}
+
+
+function TypedArrayCopyWithin(target, start, end) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ // TODO(littledan): Replace with a memcpy for better performance
+ return InnerArrayCopyWithin(target, start, end, this, length);
+}
+%FunctionSetLength(TypedArrayCopyWithin, 2);
+
+
+// ES6 draft 05-05-15, section 22.2.3.7
+function TypedArrayEvery(f, receiver) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayEvery(f, receiver, this, length);
+}
+%FunctionSetLength(TypedArrayEvery, 1);
+
+
+// ES6 draft 08-24-14, section 22.2.3.12
+function TypedArrayForEach(f, receiver) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ InnerArrayForEach(f, receiver, this, length);
+}
+%FunctionSetLength(TypedArrayForEach, 1);
+
+
+// ES6 draft 04-05-14 section 22.2.3.8
+function TypedArrayFill(value, start, end) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayFill(value, start, end, this, length);
+}
+%FunctionSetLength(TypedArrayFill, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.9
+function TypedArrayFilter(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ var array = InnerArrayFilter(predicate, thisArg, this, length);
+ return ConstructTypedArrayLike(this, array);
+}
+%FunctionSetLength(TypedArrayFilter, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.10
+function TypedArrayFind(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayFind(predicate, thisArg, this, length);
+}
+%FunctionSetLength(TypedArrayFind, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.11
+function TypedArrayFindIndex(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayFindIndex(predicate, thisArg, this, length);
+}
+%FunctionSetLength(TypedArrayFindIndex, 1);
+
+
+// ES6 draft 05-18-15, section 22.2.3.21
+function TypedArrayReverse() {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return PackedArrayReverse(this, length);
+}
+
+
+function TypedArrayComparefn(x, y) {
+ if (IsNaN(x) && IsNaN(y)) {
+ return IsNaN(y) ? 0 : 1;
+ }
+ if (IsNaN(x)) {
+ return 1;
+ }
+ if (x === 0 && x === y) {
+ if (%_IsMinusZero(x)) {
+ if (!%_IsMinusZero(y)) {
+ return -1;
+ }
+ } else if (%_IsMinusZero(y)) {
+ return 1;
+ }
+ }
+ return x - y;
+}
+
+
+// ES6 draft 05-18-15, section 22.2.3.25
+function TypedArraySort(comparefn) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ if (IS_UNDEFINED(comparefn)) {
+ comparefn = TypedArrayComparefn;
+ }
+
+ return InnerArraySort(this, length, comparefn);
+}
+
+
+// ES6 section 22.2.3.13
+function TypedArrayIndexOf(element, index) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ return InnerArrayIndexOf(this, element, index, length);
+}
+%FunctionSetLength(TypedArrayIndexOf, 1);
+
+
+// ES6 section 22.2.3.16
+function TypedArrayLastIndexOf(element, index) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayLastIndexOf(this, element, index, length,
+ %_ArgumentsLength());
+}
+%FunctionSetLength(TypedArrayLastIndexOf, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.18
+function TypedArrayMap(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ // TODO(littledan): Preallocate rather than making an intermediate
+ // InternalArray, for better performance.
+ var length = %_TypedArrayGetLength(this);
+ var array = InnerArrayMap(predicate, thisArg, this, length);
+ return ConstructTypedArrayLike(this, array);
+}
+%FunctionSetLength(TypedArrayMap, 1);
+
+
+// ES6 draft 05-05-15, section 22.2.3.24
+function TypedArraySome(f, receiver) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArraySome(f, receiver, this, length);
+}
+%FunctionSetLength(TypedArraySome, 1);
+
+
+// ES6 section 22.2.3.27
+function TypedArrayToLocaleString() {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayToLocaleString(this, length);
+}
+
+
+// ES6 section 22.2.3.28
+function TypedArrayToString() {
+ return %_Call(ArrayToString, this);
+}
+
+
+// ES6 section 22.2.3.14
+function TypedArrayJoin(separator) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayJoin(separator, this, length);
+}
+
+
+// ES6 draft 07-15-13, section 22.2.3.19
+function TypedArrayReduce(callback, current) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ return InnerArrayReduce(callback, current, this, length,
+ %_ArgumentsLength());
+}
+%FunctionSetLength(TypedArrayReduce, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.19
+function TypedArrayReduceRight(callback, current) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ return InnerArrayReduceRight(callback, current, this, length,
+ %_ArgumentsLength());
+}
+%FunctionSetLength(TypedArrayReduceRight, 1);
+
+
+function TypedArraySlice(start, end) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ var len = %_TypedArrayGetLength(this);
+
+ var relativeStart = TO_INTEGER(start);
+
+ var k;
+ if (relativeStart < 0) {
+ k = MaxSimple(len + relativeStart, 0);
+ } else {
+ k = MinSimple(relativeStart, len);
+ }
+
+ var relativeEnd;
+ if (IS_UNDEFINED(end)) {
+ relativeEnd = len;
+ } else {
+ relativeEnd = TO_INTEGER(end);
+ }
+
+ var final;
+ if (relativeEnd < 0) {
+ final = MaxSimple(len + relativeEnd, 0);
+ } else {
+ final = MinSimple(relativeEnd, len);
+ }
+
+ var count = MaxSimple(final - k, 0);
+ var array = ConstructTypedArrayLike(this, count);
+ // The code below is the 'then' branch; the 'else' branch species
+ // a memcpy. Because V8 doesn't canonicalize NaN, the difference is
+ // unobservable.
+ var n = 0;
+ while (k < final) {
+ var kValue = this[k];
+ // TODO(littledan): The spec says to throw on an error in setting;
+ // does this throw?
+ array[n] = kValue;
+ k++;
+ n++;
+ }
+ return array;
+}
+
+
+// ES6 draft 08-24-14, section 22.2.2.2
+function TypedArrayOf() {
+ var length = %_ArgumentsLength();
+ var array = new this(length);
+ for (var i = 0; i < length; i++) {
+ array[i] = %_Arguments(i);
+ }
+ return array;
+}
+
+
+function TypedArrayFrom(source, mapfn, thisArg) {
+ // TODO(littledan): Investigate if there is a receiver which could be
+ // faster to accumulate on than Array, e.g., a TypedVector.
+ var array = %_Call(ArrayFrom, GlobalArray, source, mapfn, thisArg);
+ return ConstructTypedArray(this, array);
+}
+%FunctionSetLength(TypedArrayFrom, 1);
+
// -------------------------------------------------------------------
+// TODO(littledan): Fix the TypedArray proto chain (bug v8:4085).
macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
%SetCode(GlobalNAME, NAMEConstructor);
%FunctionSetPrototype(GlobalNAME, new GlobalObject());
%AddNamedProperty(GlobalNAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
READ_ONLY | DONT_ENUM | DONT_DELETE);
+
+ utils.InstallFunctions(GlobalNAME, DONT_ENUM | DONT_DELETE | READ_ONLY, [
+ "from", TypedArrayFrom,
+ "of", TypedArrayOf
+ ]);
+
%AddNamedProperty(GlobalNAME.prototype,
"constructor", global.NAME, DONT_ENUM);
%AddNamedProperty(GlobalNAME.prototype,
@@ -374,7 +737,26 @@ macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
TypedArrayGetToStringTag);
utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
"subarray", NAMESubArray,
- "set", TypedArraySet
+ "set", TypedArraySet,
+ "copyWithin", TypedArrayCopyWithin,
+ "every", TypedArrayEvery,
+ "fill", TypedArrayFill,
+ "filter", TypedArrayFilter,
+ "find", TypedArrayFind,
+ "findIndex", TypedArrayFindIndex,
+ "indexOf", TypedArrayIndexOf,
+ "join", TypedArrayJoin,
+ "lastIndexOf", TypedArrayLastIndexOf,
+ "forEach", TypedArrayForEach,
+ "map", TypedArrayMap,
+ "reduce", TypedArrayReduce,
+ "reduceRight", TypedArrayReduceRight,
+ "reverse", TypedArrayReverse,
+ "slice", TypedArraySlice,
+ "some", TypedArraySome,
+ "sort", TypedArraySort,
+ "toString", TypedArrayToString,
+ "toLocaleString", TypedArrayToLocaleString
]);
endmacro
@@ -387,10 +769,10 @@ function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
// TODO(binji): support SharedArrayBuffers?
if (!IS_ARRAYBUFFER(buffer)) throw MakeTypeError(kDataViewNotArrayBuffer);
if (!IS_UNDEFINED(byteOffset)) {
- byteOffset = $toPositiveInteger(byteOffset, kInvalidDataViewOffset);
+ byteOffset = ToPositiveInteger(byteOffset, kInvalidDataViewOffset);
}
if (!IS_UNDEFINED(byteLength)) {
- byteLength = TO_INTEGER(byteLength);
+ byteLength = TO_INTEGER(byteLength);
}
var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
@@ -452,7 +834,7 @@ function DataViewGetTYPENAMEJS(offset, little_endian) {
'DataView.getTYPENAME', this);
}
if (%_ArgumentsLength() < 1) throw MakeTypeError(kInvalidArgument);
- offset = $toPositiveInteger(offset, kInvalidDataViewAccessorOffset);
+ offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
return %DataViewGetTYPENAME(this, offset, !!little_endian);
}
@@ -462,8 +844,8 @@ function DataViewSetTYPENAMEJS(offset, value, little_endian) {
'DataView.setTYPENAME', this);
}
if (%_ArgumentsLength() < 2) throw MakeTypeError(kInvalidArgument);
- offset = $toPositiveInteger(offset, kInvalidDataViewAccessorOffset);
- %DataViewSetTYPENAME(this, offset, TO_NUMBER_INLINE(value), !!little_endian);
+ offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
+ %DataViewSetTYPENAME(this, offset, TO_NUMBER(value), !!little_endian);
}
endmacro
diff --git a/deps/v8/src/uri.js b/deps/v8/src/js/uri.js
index bdb83d1431..712d7e60f3 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/js/uri.js
@@ -17,6 +17,12 @@
var GlobalObject = global.Object;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
+var MakeURIError;
+
+utils.Import(function(from) {
+ MakeURIError = from.MakeURIError;
+});
+
// -------------------------------------------------------------------
// Define internal helper functions.
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/js/v8natives.js
index 37e6f1bcce..f6b394c2e7 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -17,17 +17,29 @@ var GlobalNumber = global.Number;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var MakeRangeError;
+var MakeSyntaxError;
+var MakeTypeError;
var MathAbs;
+var NaN = %GetRootNaN();
+var ObserveBeginPerformSplice;
+var ObserveEndPerformSplice;
+var ObserveEnqueueSpliceRecord;
var ProxyDelegateCallAndConstruct;
var ProxyDerivedHasOwnTrap;
var ProxyDerivedKeysTrap;
+var SameValue = utils.ImportNow("SameValue");
var StringIndexOf;
-var ToBoolean = utils.ImportNow("ToBoolean");
-var ToNumber = utils.ImportNow("ToNumber");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
+ MakeRangeError = from.MakeRangeError;
+ MakeSyntaxError = from.MakeSyntaxError;
+ MakeTypeError = from.MakeTypeError;
MathAbs = from.MathAbs;
+ ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
+ ObserveEndPerformSplice = from.ObserveEndPerformSplice;
+ ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
StringIndexOf = from.StringIndexOf;
});
@@ -43,14 +55,14 @@ utils.ImportFromExperimental(function(from) {
// ECMA 262 - 15.1.4
function GlobalIsNaN(number) {
- number = TO_NUMBER_INLINE(number);
+ number = TO_NUMBER(number);
return NUMBER_IS_NAN(number);
}
// ECMA 262 - 15.1.5
function GlobalIsFinite(number) {
- number = TO_NUMBER_INLINE(number);
+ number = TO_NUMBER(number);
return NUMBER_IS_FINITE(number);
}
@@ -77,7 +89,7 @@ function GlobalParseInt(string, radix) {
string = TO_STRING(string);
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36))) {
- return NAN;
+ return NaN;
}
}
@@ -105,7 +117,7 @@ function GlobalEval(x) {
var f = %CompileString(x, false);
if (!IS_FUNCTION(f)) return f;
- return %_CallFunction(global_proxy, f);
+ return %_Call(f, global_proxy);
}
@@ -116,7 +128,7 @@ var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
utils.InstallConstants(global, [
// ECMA 262 - 15.1.1.1.
- "NaN", NAN,
+ "NaN", NaN,
// ECMA-262 - 15.1.1.2.
"Infinity", INFINITY,
// ECMA-262 - 15.1.1.2.
@@ -349,11 +361,11 @@ function ToPropertyDescriptor(obj) {
var desc = new PropertyDescriptor();
if ("enumerable" in obj) {
- desc.setEnumerable(ToBoolean(obj.enumerable));
+ desc.setEnumerable(TO_BOOLEAN(obj.enumerable));
}
if ("configurable" in obj) {
- desc.setConfigurable(ToBoolean(obj.configurable));
+ desc.setConfigurable(TO_BOOLEAN(obj.configurable));
}
if ("value" in obj) {
@@ -361,7 +373,7 @@ function ToPropertyDescriptor(obj) {
}
if ("writable" in obj) {
- desc.setWritable(ToBoolean(obj.writable));
+ desc.setWritable(TO_BOOLEAN(obj.writable));
}
if ("get" in obj) {
@@ -542,17 +554,17 @@ function GetTrap(handler, name, defaultTrap) {
function CallTrap0(handler, name, defaultTrap) {
- return %_CallFunction(handler, GetTrap(handler, name, defaultTrap));
+ return %_Call(GetTrap(handler, name, defaultTrap), handler);
}
function CallTrap1(handler, name, defaultTrap, x) {
- return %_CallFunction(handler, x, GetTrap(handler, name, defaultTrap));
+ return %_Call(GetTrap(handler, name, defaultTrap), handler, x);
}
function CallTrap2(handler, name, defaultTrap, x, y) {
- return %_CallFunction(handler, x, y, GetTrap(handler, name, defaultTrap));
+ return %_Call(GetTrap(handler, name, defaultTrap), handler, x, y);
}
@@ -615,7 +627,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
var handler = %GetHandler(obj);
var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes);
- if (!ToBoolean(result)) {
+ if (!result) {
if (should_throw) {
throw MakeTypeError(kProxyHandlerReturned,
handler, "false", "defineProperty");
@@ -648,17 +660,17 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
if ((IsGenericDescriptor(desc) ||
IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
(!desc.hasEnumerable() ||
- $sameValue(desc.isEnumerable(), current.isEnumerable())) &&
+ SameValue(desc.isEnumerable(), current.isEnumerable())) &&
(!desc.hasConfigurable() ||
- $sameValue(desc.isConfigurable(), current.isConfigurable())) &&
+ SameValue(desc.isConfigurable(), current.isConfigurable())) &&
(!desc.hasWritable() ||
- $sameValue(desc.isWritable(), current.isWritable())) &&
+ SameValue(desc.isWritable(), current.isWritable())) &&
(!desc.hasValue() ||
- $sameValue(desc.getValue(), current.getValue())) &&
+ SameValue(desc.getValue(), current.getValue())) &&
(!desc.hasGetter() ||
- $sameValue(desc.getGet(), current.getGet())) &&
+ SameValue(desc.getGet(), current.getGet())) &&
(!desc.hasSetter() ||
- $sameValue(desc.getSet(), current.getSet()))) {
+ SameValue(desc.getSet(), current.getSet()))) {
return true;
}
if (!current.isConfigurable()) {
@@ -697,7 +709,7 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
}
}
if (!currentIsWritable && desc.hasValue() &&
- !$sameValue(desc.getValue(), current.getValue())) {
+ !SameValue(desc.getValue(), current.getValue())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
} else {
@@ -708,14 +720,14 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
// Step 11
if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
if (desc.hasSetter() &&
- !$sameValue(desc.getSet(), current.getSet())) {
+ !SameValue(desc.getSet(), current.getSet())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
} else {
return false;
}
}
- if (desc.hasGetter() && !$sameValue(desc.getGet(),current.getGet())) {
+ if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
} else {
@@ -808,14 +820,14 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
var length = obj.length;
if (index >= length && %IsObserved(obj)) {
emit_splice = true;
- $observeBeginPerformSplice(obj);
+ ObserveBeginPerformSplice(obj);
}
var length_desc = GetOwnPropertyJS(obj, "length");
if ((index >= length && !length_desc.isWritable()) ||
!DefineObjectProperty(obj, p, desc, true)) {
if (emit_splice)
- $observeEndPerformSplice(obj);
+ ObserveEndPerformSplice(obj);
if (should_throw) {
throw MakeTypeError(kDefineDisallowed, p);
} else {
@@ -826,8 +838,8 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
obj.length = index + 1;
}
if (emit_splice) {
- $observeEndPerformSplice(obj);
- $observeEnqueueSpliceRecord(obj, length, [], index + 1 - length);
+ ObserveEndPerformSplice(obj);
+ ObserveEnqueueSpliceRecord(obj, length, [], index + 1 - length);
}
return true;
}
@@ -854,17 +866,6 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
}
-function DefineOwnPropertyFromAPI(obj, p, value, desc) {
- return DefineOwnProperty(obj, p, ToPropertyDescriptor({
- value: value,
- writable: desc[0],
- enumerable: desc[1],
- configurable: desc[2]
- }),
- false);
-}
-
-
// ES6 section 19.1.2.9
function ObjectGetPrototypeOf(obj) {
return %_GetPrototype(TO_OBJECT(obj));
@@ -1026,42 +1027,47 @@ function ObjectCreate(proto, properties) {
// ES5 section 15.2.3.6.
function ObjectDefineProperty(obj, p, attributes) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError(kCalledOnNonObject, "Object.defineProperty");
- }
- var name = TO_NAME(p);
- if (%_IsJSProxy(obj)) {
- // Clone the attributes object for protection.
- // TODO(rossberg): not spec'ed yet, so not sure if this should involve
- // non-own properties as it does (or non-enumerable ones, as it doesn't?).
- var attributesClone = { __proto__: null };
- for (var a in attributes) {
- attributesClone[a] = attributes[a];
+ // The new pure-C++ implementation doesn't support Proxies yet, nor O.o.
+ // TODO(jkummerow): Implement missing features and remove fallback path.
+ if (%_IsJSProxy(obj) || %IsObserved(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError(kCalledOnNonObject, "Object.defineProperty");
}
- DefineProxyProperty(obj, name, attributesClone, true);
- // The following would implement the spec as in the current proposal,
- // but after recent comments on es-discuss, is most likely obsolete.
- /*
- var defineObj = FromGenericPropertyDescriptor(desc);
- var names = ObjectGetOwnPropertyNames(attributes);
- var standardNames =
- {value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0};
- for (var i = 0; i < names.length; i++) {
- var N = names[i];
- if (!(%HasOwnProperty(standardNames, N))) {
- var attr = GetOwnPropertyJS(attributes, N);
- DefineOwnProperty(descObj, N, attr, true);
+ var name = TO_NAME(p);
+ if (%_IsJSProxy(obj)) {
+ // Clone the attributes object for protection.
+ // TODO(rossberg): not spec'ed yet, so not sure if this should involve
+ // non-own properties as it does (or non-enumerable ones, as it doesn't?).
+ var attributesClone = { __proto__: null };
+ for (var a in attributes) {
+ attributesClone[a] = attributes[a];
}
+ DefineProxyProperty(obj, name, attributesClone, true);
+ // The following would implement the spec as in the current proposal,
+ // but after recent comments on es-discuss, is most likely obsolete.
+ /*
+ var defineObj = FromGenericPropertyDescriptor(desc);
+ var names = ObjectGetOwnPropertyNames(attributes);
+ var standardNames =
+ {value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0};
+ for (var i = 0; i < names.length; i++) {
+ var N = names[i];
+ if (!(%HasOwnProperty(standardNames, N))) {
+ var attr = GetOwnPropertyJS(attributes, N);
+ DefineOwnProperty(descObj, N, attr, true);
+ }
+ }
+ // This is really confusing the types, but it is what the proxies spec
+ // currently requires:
+ desc = descObj;
+ */
+ } else {
+ var desc = ToPropertyDescriptor(attributes);
+ DefineOwnProperty(obj, name, desc, true);
}
- // This is really confusing the types, but it is what the proxies spec
- // currently requires:
- desc = descObj;
- */
- } else {
- var desc = ToPropertyDescriptor(attributes);
- DefineOwnProperty(obj, name, desc, true);
+ return obj;
}
- return obj;
+ return %ObjectDefineProperty(obj, p, attributes);
}
@@ -1089,19 +1095,24 @@ function GetOwnEnumerablePropertyNames(object) {
// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
- }
- var props = TO_OBJECT(properties);
- var names = GetOwnEnumerablePropertyNames(props);
- var descriptors = new InternalArray();
- for (var i = 0; i < names.length; i++) {
- descriptors.push(ToPropertyDescriptor(props[names[i]]));
- }
- for (var i = 0; i < names.length; i++) {
- DefineOwnProperty(obj, names[i], descriptors[i], true);
+ // The new pure-C++ implementation doesn't support Proxies yet, nor O.o.
+ // TODO(jkummerow): Implement missing features and remove fallback path.
+ if (%_IsJSProxy(obj) || %_IsJSProxy(properties) || %IsObserved(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
+ }
+ var props = TO_OBJECT(properties);
+ var names = GetOwnEnumerablePropertyNames(props);
+ var descriptors = new InternalArray();
+ for (var i = 0; i < names.length; i++) {
+ descriptors.push(ToPropertyDescriptor(props[names[i]]));
+ }
+ for (var i = 0; i < names.length; i++) {
+ DefineOwnProperty(obj, names[i], descriptors[i], true);
+ }
+ return obj;
}
- return obj;
+ return %ObjectDefineProperties(obj, properties);
}
@@ -1254,12 +1265,6 @@ function ObjectIsExtensible(obj) {
}
-// ECMA-262, Edition 6, section 19.1.2.10
-function ObjectIs(obj1, obj2) {
- return $sameValue(obj1, obj2);
-}
-
-
// ECMA-262, Edition 6, section 19.1.2.1
function ObjectAssign(target, sources) {
// TODO(bmeurer): Move this to toplevel.
@@ -1306,14 +1311,13 @@ function ObjectSetProto(proto) {
}
+// ECMA-262, Edition 6, section 19.1.1.1
function ObjectConstructor(x) {
- if (%_IsConstructCall()) {
- if (x == null) return this;
- return TO_OBJECT(x);
- } else {
- if (x == null) return { };
- return TO_OBJECT(x);
+ if (GlobalObject != new.target && !IS_UNDEFINED(new.target)) {
+ return this;
}
+ if (IS_NULL(x) || IS_UNDEFINED(x)) return {};
+ return TO_OBJECT(x);
}
@@ -1355,7 +1359,7 @@ utils.InstallFunctions(GlobalObject, DONT_ENUM, [
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
"getOwnPropertyNames", ObjectGetOwnPropertyNames,
// getOwnPropertySymbols is added in symbol.js.
- "is", ObjectIs,
+ "is", SameValue, // ECMA-262, Edition 6, section 19.1.2.10
"isExtensible", ObjectIsExtensible,
"isFrozen", ObjectIsFrozen,
"isSealed", ObjectIsSealed,
@@ -1373,9 +1377,9 @@ function BooleanConstructor(x) {
// TODO(bmeurer): Move this to toplevel.
"use strict";
if (%_IsConstructCall()) {
- %_SetValueOf(this, ToBoolean(x));
+ %_SetValueOf(this, TO_BOOLEAN(x));
} else {
- return ToBoolean(x);
+ return TO_BOOLEAN(x);
}
}
@@ -1423,7 +1427,7 @@ utils.InstallFunctions(GlobalBoolean.prototype, DONT_ENUM, [
function NumberConstructor(x) {
// TODO(bmeurer): Move this to toplevel.
"use strict";
- var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
+ var value = %_ArgumentsLength() == 0 ? 0 : TO_NUMBER(x);
if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
@@ -1459,7 +1463,7 @@ function NumberToStringJS(radix) {
// ECMA-262 section 15.7.4.3
function NumberToLocaleString() {
- return %_CallFunction(this, NumberToStringJS);
+ return %_Call(NumberToStringJS, this);
}
@@ -1596,7 +1600,7 @@ utils.InstallConstants(GlobalNumber, [
// ECMA-262 section 15.7.3.2.
"MIN_VALUE", 5e-324,
// ECMA-262 section 15.7.3.3.
- "NaN", NAN,
+ "NaN", NaN,
// ECMA-262 section 15.7.3.4.
"NEGATIVE_INFINITY", -INFINITY,
// ECMA-262 section 15.7.3.5.
@@ -1763,7 +1767,7 @@ function NewFunctionString(args, function_token) {
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
// compile. We avoid this problem by checking for this early on.
- if (%_CallFunction(p, ')', StringIndexOf) != -1) {
+ if (%_Call(StringIndexOf, p, ')') != -1) {
throw MakeSyntaxError(kParenthesisInArgString);
}
// If the formal parameters include an unbalanced block comment, the
@@ -1781,9 +1785,11 @@ function FunctionConstructor(arg1) { // length == 1
var global_proxy = %GlobalProxy(FunctionConstructor);
// Compile the string in the constructor and not a helper so that errors
// appear to come from here.
- var f = %_CallFunction(global_proxy, %CompileString(source, true));
- %FunctionMarkNameShouldPrintAsAnonymous(f);
- return f;
+ var func = %_Call(%CompileString(source, true), global_proxy);
+ // Set name-should-print-as-anonymous flag on the ShareFunctionInfo and
+ // ensure that |func| uses correct initial map from |new.target| if
+ // it's available.
+ return %CompleteFunctionConstruction(func, GlobalFunction, new.target);
}
@@ -1810,7 +1816,7 @@ function GetIterator(obj, method) {
if (!IS_CALLABLE(method)) {
throw MakeTypeError(kNotIterable, obj);
}
- var iterator = %_CallFunction(obj, method);
+ var iterator = %_Call(method, obj);
if (!IS_SPEC_OBJECT(iterator)) {
throw MakeTypeError(kNotAnIterator, iterator);
}
@@ -1844,7 +1850,6 @@ utils.Export(function(to) {
"global_eval_fun", GlobalEval,
"object_value_of", ObjectValueOf,
"object_to_string", ObjectToString,
- "object_define_own_property", DefineOwnPropertyFromAPI,
"object_get_own_property_descriptor", ObjectGetOwnPropertyDescriptor,
"to_complete_property_descriptor", ToCompletePropertyDescriptor,
]);
diff --git a/deps/v8/src/weak-collection.js b/deps/v8/src/js/weak-collection.js
index 1c60a2f47a..c4568f97f4 100644
--- a/deps/v8/src/weak-collection.js
+++ b/deps/v8/src/js/weak-collection.js
@@ -8,11 +8,23 @@
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
+var GetExistingHash;
+var GetHash;
var GlobalObject = global.Object;
var GlobalWeakMap = global.WeakMap;
var GlobalWeakSet = global.WeakSet;
+var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.Import(function(from) {
+ GetExistingHash = from.GetExistingHash;
+ GetHash = from.GetHash;
+ MakeTypeError = from.MakeTypeError;
+});
+
// -------------------------------------------------------------------
// Harmony WeakMap
@@ -44,7 +56,7 @@ function WeakMapGet(key) {
'WeakMap.prototype.get', this);
}
if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
- var hash = $getExistingHash(key);
+ var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) return UNDEFINED;
return %WeakCollectionGet(this, key, hash);
}
@@ -56,7 +68,7 @@ function WeakMapSet(key, value) {
'WeakMap.prototype.set', this);
}
if (!IS_SPEC_OBJECT(key)) throw MakeTypeError(kInvalidWeakMapKey);
- return %WeakCollectionSet(this, key, value, $getHash(key));
+ return %WeakCollectionSet(this, key, value, GetHash(key));
}
@@ -66,7 +78,7 @@ function WeakMapHas(key) {
'WeakMap.prototype.has', this);
}
if (!IS_SPEC_OBJECT(key)) return false;
- var hash = $getExistingHash(key);
+ var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionHas(this, key, hash);
}
@@ -78,7 +90,7 @@ function WeakMapDelete(key) {
'WeakMap.prototype.delete', this);
}
if (!IS_SPEC_OBJECT(key)) return false;
- var hash = $getExistingHash(key);
+ var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionDelete(this, key, hash);
}
@@ -130,7 +142,7 @@ function WeakSetAdd(value) {
'WeakSet.prototype.add', this);
}
if (!IS_SPEC_OBJECT(value)) throw MakeTypeError(kInvalidWeakSetValue);
- return %WeakCollectionSet(this, value, true, $getHash(value));
+ return %WeakCollectionSet(this, value, true, GetHash(value));
}
@@ -140,7 +152,7 @@ function WeakSetHas(value) {
'WeakSet.prototype.has', this);
}
if (!IS_SPEC_OBJECT(value)) return false;
- var hash = $getExistingHash(value);
+ var hash = GetExistingHash(value);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionHas(this, value, hash);
}
@@ -152,7 +164,7 @@ function WeakSetDelete(value) {
'WeakSet.prototype.delete', this);
}
if (!IS_SPEC_OBJECT(value)) return false;
- var hash = $getExistingHash(value);
+ var hash = GetExistingHash(value);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionDelete(this, value, hash);
}
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index cac4979859..21889530c3 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -845,6 +845,7 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
return result;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_JSON_PARSER_H_
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index fa4946dad7..8bcef34c79 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -223,6 +223,7 @@ MaybeHandle<Object> BasicJsonStringifier::StringifyString(
SerializeStringUnchecked_(object->GetFlatContent().ToOneByteVector(),
&no_extend);
no_extend.Append('\"');
+ return no_extend.Finalize();
} else {
result = isolate->factory()
->NewRawTwoByteString(worst_case_length)
@@ -233,8 +234,8 @@ MaybeHandle<Object> BasicJsonStringifier::StringifyString(
SerializeStringUnchecked_(object->GetFlatContent().ToUC16Vector(),
&no_extend);
no_extend.Append('\"');
+ return no_extend.Finalize();
}
- return result;
}
@@ -524,7 +525,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
HandleScope handle_scope(isolate_);
Result stack_push = StackPush(object);
if (stack_push != SUCCESS) return stack_push;
- DCHECK(!object->IsJSGlobalProxy() && !object->IsGlobalObject());
+ DCHECK(!object->IsJSGlobalProxy() && !object->IsJSGlobalObject());
builder_.AppendCharacter('{');
bool comma = false;
@@ -681,6 +682,7 @@ void BasicJsonStringifier::SerializeString(Handle<String> object) {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_JSON_STRINGIFIER_H_
diff --git a/deps/v8/src/key-accumulator.cc b/deps/v8/src/key-accumulator.cc
new file mode 100644
index 0000000000..91b014aacd
--- /dev/null
+++ b/deps/v8/src/key-accumulator.cc
@@ -0,0 +1,263 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/key-accumulator.h"
+
+#include "src/elements.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+KeyAccumulator::~KeyAccumulator() {
+ for (size_t i = 0; i < elements_.size(); i++) {
+ delete elements_[i];
+ }
+}
+
+
+Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
+ if (length_ == 0) {
+ return isolate_->factory()->empty_fixed_array();
+ }
+ // Make sure we have all the lengths collected.
+ NextPrototype();
+
+ // Assemble the result array by first adding the element keys and then the
+ // property keys. We use the total number of String + Symbol keys per level in
+ // |level_lengths_| and the available element keys in the corresponding bucket
+ // in |elements_| to deduce the number of keys to take from the
+ // |string_properties_| and |symbol_properties_| set.
+ Handle<FixedArray> result = isolate_->factory()->NewFixedArray(length_);
+ int insertion_index = 0;
+ int string_properties_index = 0;
+ int symbol_properties_index = 0;
+ // String and Symbol lengths always come in pairs:
+ size_t max_level = level_lengths_.size() / 2;
+ for (size_t level = 0; level < max_level; level++) {
+ int num_string_properties = level_lengths_[level * 2];
+ int num_symbol_properties = level_lengths_[level * 2 + 1];
+ if (num_string_properties < 0) {
+ // If the |num_string_properties| is negative, the current level contains
+ // properties from a proxy, hence we skip the integer keys in |elements_|
+ // since proxies define the complete ordering.
+ num_string_properties = -num_string_properties;
+ } else if (level < elements_.size()) {
+ // Add the element indices for this prototype level.
+ std::vector<uint32_t>* elements = elements_[level];
+ int num_elements = static_cast<int>(elements->size());
+ for (int i = 0; i < num_elements; i++) {
+ Handle<Object> key;
+ if (convert == KEEP_NUMBERS) {
+ key = isolate_->factory()->NewNumberFromUint(elements->at(i));
+ } else {
+ key = isolate_->factory()->Uint32ToString(elements->at(i));
+ }
+ result->set(insertion_index, *key);
+ insertion_index++;
+ }
+ }
+ // Add the string property keys for this prototype level.
+ for (int i = 0; i < num_string_properties; i++) {
+ Object* key = string_properties_->KeyAt(string_properties_index);
+ result->set(insertion_index, key);
+ insertion_index++;
+ string_properties_index++;
+ }
+ // Add the symbol property keys for this prototype level.
+ for (int i = 0; i < num_symbol_properties; i++) {
+ Object* key = symbol_properties_->KeyAt(symbol_properties_index);
+ result->set(insertion_index, key);
+ insertion_index++;
+ symbol_properties_index++;
+ }
+ }
+
+ DCHECK_EQ(insertion_index, length_);
+ return result;
+}
+
+
+namespace {
+
+bool AccumulatorHasKey(std::vector<uint32_t>* sub_elements, uint32_t key) {
+ return std::binary_search(sub_elements->begin(), sub_elements->end(), key);
+}
+
+} // namespace
+
+bool KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
+ return AddKey(handle(key, isolate_), convert);
+}
+
+
+bool KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
+ if (key->IsSymbol()) {
+ if (filter_ == SKIP_SYMBOLS) return false;
+ return AddSymbolKey(key);
+ }
+ // Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
+ DCHECK_LE(0, level_string_length_);
+ // In some cases (e.g. proxies) we might get in String-converted ints which
+ // should be added to the elements list instead of the properties. For
+ // proxies we have to convert as well but also respect the original order.
+ // Therefore we add a converted key to both sides
+ if (convert == CONVERT_TO_ARRAY_INDEX || convert == PROXY_MAGIC) {
+ uint32_t index = 0;
+ int prev_length = length_;
+ int prev_proto = level_string_length_;
+ if ((key->IsString() && Handle<String>::cast(key)->AsArrayIndex(&index)) ||
+ key->ToArrayIndex(&index)) {
+ bool key_was_added = AddIntegerKey(index);
+ if (convert == CONVERT_TO_ARRAY_INDEX) return key_was_added;
+ if (convert == PROXY_MAGIC) {
+ // If we had an array index (number) and it wasn't added, the key
+ // already existed before, hence we cannot add it to the properties
+ // keys as it would lead to duplicate entries.
+ if (!key_was_added) {
+ return false;
+ }
+ length_ = prev_length;
+ level_string_length_ = prev_proto;
+ }
+ }
+ }
+ return AddStringKey(key, convert);
+}
+
+
+bool KeyAccumulator::AddKey(uint32_t key) { return AddIntegerKey(key); }
+
+
+bool KeyAccumulator::AddIntegerKey(uint32_t key) {
+ // Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
+ // We mark proxy-levels with a negative length
+ DCHECK_LE(0, level_string_length_);
+ // Binary search over all but the last level. The last one might not be
+ // sorted yet.
+ for (size_t i = 1; i < elements_.size(); i++) {
+ if (AccumulatorHasKey(elements_[i - 1], key)) return false;
+ }
+ elements_.back()->push_back(key);
+ length_++;
+ return true;
+}
+
+
+bool KeyAccumulator::AddStringKey(Handle<Object> key,
+ AddKeyConversion convert) {
+ if (string_properties_.is_null()) {
+ string_properties_ = OrderedHashSet::Allocate(isolate_, 16);
+ }
+ // TODO(cbruni): remove this conversion once we throw the correct TypeError
+ // for non-string/symbol elements returned by proxies
+ if (convert == PROXY_MAGIC && key->IsNumber()) {
+ key = isolate_->factory()->NumberToString(key);
+ }
+ int prev_size = string_properties_->NumberOfElements();
+ string_properties_ = OrderedHashSet::Add(string_properties_, key);
+ if (prev_size < string_properties_->NumberOfElements()) {
+ length_++;
+ level_string_length_++;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+bool KeyAccumulator::AddSymbolKey(Handle<Object> key) {
+ if (symbol_properties_.is_null()) {
+ symbol_properties_ = OrderedHashSet::Allocate(isolate_, 16);
+ }
+ int prev_size = symbol_properties_->NumberOfElements();
+ symbol_properties_ = OrderedHashSet::Add(symbol_properties_, key);
+ if (prev_size < symbol_properties_->NumberOfElements()) {
+ length_++;
+ level_symbol_length_++;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+void KeyAccumulator::AddKeys(Handle<FixedArray> array,
+ AddKeyConversion convert) {
+ int add_length = array->length();
+ if (add_length == 0) return;
+ for (int i = 0; i < add_length; i++) {
+ Handle<Object> current(array->get(i), isolate_);
+ AddKey(current, convert);
+ }
+}
+
+
+void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
+ AddKeyConversion convert) {
+ DCHECK(array_like->IsJSArray() || array_like->HasSloppyArgumentsElements());
+ ElementsAccessor* accessor = array_like->GetElementsAccessor();
+ accessor->AddElementsToKeyAccumulator(array_like, this, convert);
+}
+
+
+void KeyAccumulator::AddKeysFromProxy(Handle<JSObject> array_like) {
+ // Proxies define a complete list of keys with no distinction of
+ // elements and properties, which breaks the normal assumption for the
+ // KeyAccumulator.
+ AddKeys(array_like, PROXY_MAGIC);
+ // Invert the current length to indicate a present proxy, so we can ignore
+ // element keys for this level. Otherwise we would not fully respect the order
+ // given by the proxy.
+ level_string_length_ = -level_string_length_;
+}
+
+
+void KeyAccumulator::AddElementKeysFromInterceptor(
+ Handle<JSObject> array_like) {
+ AddKeys(array_like, CONVERT_TO_ARRAY_INDEX);
+ // The interceptor might introduce duplicates for the current level, since
+ // these keys get added after the objects's normal element keys.
+ SortCurrentElementsListRemoveDuplicates();
+}
+
+
+void KeyAccumulator::SortCurrentElementsListRemoveDuplicates() {
+ // Sort and remove duplicates from the current elements level and adjust.
+ // the lengths accordingly.
+ auto last_level = elements_.back();
+ size_t nof_removed_keys = last_level->size();
+ std::sort(last_level->begin(), last_level->end());
+ last_level->erase(std::unique(last_level->begin(), last_level->end()),
+ last_level->end());
+ // Adjust total length by the number of removed duplicates.
+ nof_removed_keys -= last_level->size();
+ length_ -= static_cast<int>(nof_removed_keys);
+}
+
+
+void KeyAccumulator::SortCurrentElementsList() {
+ if (elements_.empty()) return;
+ auto element_keys = elements_.back();
+ std::sort(element_keys->begin(), element_keys->end());
+}
+
+
+void KeyAccumulator::NextPrototype() {
+ // Store the protoLength on the first call of this method.
+ if (!elements_.empty()) {
+ level_lengths_.push_back(level_string_length_);
+ level_lengths_.push_back(level_symbol_length_);
+ }
+ elements_.push_back(new std::vector<uint32_t>());
+ level_string_length_ = 0;
+ level_symbol_length_ = 0;
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/key-accumulator.h b/deps/v8/src/key-accumulator.h
new file mode 100644
index 0000000000..21b68433ec
--- /dev/null
+++ b/deps/v8/src/key-accumulator.h
@@ -0,0 +1,92 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_KEY_ACCUMULATOR_H_
+#define V8_KEY_ACCUMULATOR_H_
+
+#include "src/isolate.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX, PROXY_MAGIC };
+
+// This is a helper class for JSReceiver::GetKeys which collects and sorts keys.
+// GetKeys needs to sort keys per prototype level, first showing the integer
+// indices from elements then the strings from the properties. However, this
+// does not apply to proxies which are in full control of how the keys are
+// sorted.
+//
+// For performance reasons the KeyAccumulator internally separates integer keys
+// in |elements_| into sorted lists per prototype level. String keys are
+// collected in |string_properties_|, a single OrderedHashSet (similar for
+// Symbols in |symbol_properties_|. To separate the keys per level later when
+// assembling the final list, |levelLengths_| keeps track of the number of
+// String and Symbol keys per level.
+//
+// Only unique keys are kept by the KeyAccumulator, strings are stored in a
+// HashSet for inexpensive lookups. Integer keys are kept in sorted lists which
+// are more compact and allow for reasonably fast includes check.
+class KeyAccumulator final BASE_EMBEDDED {
+ public:
+ explicit KeyAccumulator(Isolate* isolate,
+ KeyFilter filter = KeyFilter::SKIP_SYMBOLS)
+ : isolate_(isolate), filter_(filter) {}
+ ~KeyAccumulator();
+
+ bool AddKey(uint32_t key);
+ bool AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
+ bool AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKeys(Handle<FixedArray> array,
+ AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKeys(Handle<JSObject> array,
+ AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKeysFromProxy(Handle<JSObject> array);
+ void AddElementKeysFromInterceptor(Handle<JSObject> array);
+ // Jump to the next level, pushing the current |levelLength_| to
+ // |levelLengths_| and adding a new list to |elements_|.
+ void NextPrototype();
+ // Sort the integer indices in the last list in |elements_|
+ void SortCurrentElementsList();
+ Handle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
+ int length() { return length_; }
+
+ private:
+ bool AddIntegerKey(uint32_t key);
+ bool AddStringKey(Handle<Object> key, AddKeyConversion convert);
+ bool AddSymbolKey(Handle<Object> array);
+ void SortCurrentElementsListRemoveDuplicates();
+
+ Isolate* isolate_;
+ KeyFilter filter_;
+ // |elements_| contains the sorted element keys (indices) per level.
+ std::vector<std::vector<uint32_t>*> elements_;
+ // |protoLengths_| contains the total number of keys (elements + properties)
+ // per level. Negative values mark counts for a level with keys from a proxy.
+ std::vector<int> level_lengths_;
+ // |string_properties_| contains the unique String property keys for all
+ // levels in insertion order per level.
+ Handle<OrderedHashSet> string_properties_;
+ // |symbol_properties_| contains the unique Symbol property keys for all
+ // levels in insertion order per level.
+ Handle<OrderedHashSet> symbol_properties_;
+ // |length_| keeps track of the total number of all element and property keys.
+ int length_ = 0;
+ // |levelLength_| keeps track of the number of String keys in the current
+ // level.
+ int level_string_length_ = 0;
+ // |levelSymbolLength_| keeps track of the number of Symbol keys in the
+ // current level.
+ int level_symbol_length_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
+};
+
+
+} // namespace internal
+} // namespace v8
+
+
+#endif // V8_KEY_ACCUMULATOR_H_
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index 3771064c8f..3f150658e7 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -62,8 +62,8 @@ LayoutDescriptor* LayoutDescriptor::SetRawData(int field_index) {
LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
- int layout_word_index;
- int layout_bit_index;
+ int layout_word_index = 0;
+ int layout_bit_index = 0;
if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
CHECK(false);
@@ -250,7 +250,7 @@ bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
return layout_descriptor_->IsTagged(field_index);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LAYOUT_DESCRIPTOR_INL_H_
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 11d8d35f26..5a80e73f1f 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -153,7 +153,7 @@ class LayoutDescriptorHelper {
int header_size_;
LayoutDescriptor* layout_descriptor_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LAYOUT_DESCRIPTOR_H_
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index ddceab5457..383d8ce1e0 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -168,4 +168,5 @@ double DefaultPlatform::MonotonicallyIncreasingTime() {
return base::TimeTicks::HighResolutionNow().ToInternalValue() /
static_cast<double>(base::Time::kMicrosecondsPerSecond);
}
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 94ef9c5055..b452fdd345 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -34,15 +34,13 @@ class DefaultPlatform : public Platform {
bool PumpMessageLoop(v8::Isolate* isolate);
// v8::Platform implementation.
- virtual void CallOnBackgroundThread(
- Task* task, ExpectedRuntime expected_runtime) override;
- virtual void CallOnForegroundThread(v8::Isolate* isolate,
- Task* task) override;
- virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
- double delay_in_seconds) override;
- virtual void CallIdleOnForegroundThread(Isolate* isolate,
- IdleTask* task) override;
- virtual bool IdleTasksEnabled(Isolate* isolate) override;
+ void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) override;
+ void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override;
+ void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
+ double delay_in_seconds) override;
+ void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override;
+ bool IdleTasksEnabled(Isolate* isolate) override;
double MonotonicallyIncreasingTime() override;
private:
@@ -68,7 +66,8 @@ class DefaultPlatform : public Platform {
};
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
#endif // V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
diff --git a/deps/v8/src/libplatform/task-queue.cc b/deps/v8/src/libplatform/task-queue.cc
index 7a9071f362..0a630ed3c3 100644
--- a/deps/v8/src/libplatform/task-queue.cc
+++ b/deps/v8/src/libplatform/task-queue.cc
@@ -53,4 +53,5 @@ void TaskQueue::Terminate() {
process_queue_semaphore_.Signal();
}
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/task-queue.h b/deps/v8/src/libplatform/task-queue.h
index eb9d6987e9..efe9e07e06 100644
--- a/deps/v8/src/libplatform/task-queue.h
+++ b/deps/v8/src/libplatform/task-queue.h
@@ -41,7 +41,8 @@ class TaskQueue {
DISALLOW_COPY_AND_ASSIGN(TaskQueue);
};
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
#endif // V8_LIBPLATFORM_TASK_QUEUE_H_
diff --git a/deps/v8/src/libplatform/worker-thread.cc b/deps/v8/src/libplatform/worker-thread.cc
index 99637151e2..a8e714a896 100644
--- a/deps/v8/src/libplatform/worker-thread.cc
+++ b/deps/v8/src/libplatform/worker-thread.cc
@@ -28,4 +28,5 @@ void WorkerThread::Run() {
}
}
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/worker-thread.h b/deps/v8/src/libplatform/worker-thread.h
index 730e039ca1..6a55a6bc89 100644
--- a/deps/v8/src/libplatform/worker-thread.h
+++ b/deps/v8/src/libplatform/worker-thread.h
@@ -32,7 +32,8 @@ class WorkerThread : public base::Thread {
DISALLOW_COPY_AND_ASSIGN(WorkerThread);
};
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
#endif // V8_LIBPLATFORM_WORKER_THREAD_H_
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 5a247d5fd7..9a2d11f96a 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -286,6 +286,7 @@ int SortedListBSearch(const List<T>& list, T elem) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LIST_INL_H_
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index d935f764b8..8b8a5dd1ed 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -229,7 +229,8 @@ template <typename T>
int SortedListBSearch(const List<T>& list, T elem);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LIST_H_
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h
index 520d05c4d3..70c74bc33f 100644
--- a/deps/v8/src/log-inl.h
+++ b/deps/v8/src/log-inl.h
@@ -38,6 +38,7 @@ void Logger::CallEventLogger(Isolate* isolate, const char* name, StartEnd se,
}
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LOG_INL_H_
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 67143078a9..7621668552 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -5,6 +5,8 @@
#ifndef V8_LOG_UTILS_H_
#define V8_LOG_UTILS_H_
+#include <stdio.h>
+
#include <cstdarg>
#include "src/allocation.h"
@@ -136,6 +138,7 @@ class Log {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LOG_UTILS_H_
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index ea69fb4bee..a10d9621d3 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1497,7 +1497,11 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
if (sample->has_external_callback) {
msg.Append(",1,");
+#if USES_FUNCTION_DESCRIPTORS
+ msg.AppendAddress(*FUNCTION_ENTRYPOINT_ADDRESS(sample->external_callback));
+#else
msg.AppendAddress(sample->external_callback);
+#endif
} else {
msg.Append(",0,");
msg.AppendAddress(sample->tos);
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 33c1b29d96..064115b3aa 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -526,7 +526,8 @@ class CodeEventLogger : public CodeEventListener {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LOG_H_
diff --git a/deps/v8/src/lookup-inl.h b/deps/v8/src/lookup-inl.h
deleted file mode 100644
index 3df2194d3f..0000000000
--- a/deps/v8/src/lookup-inl.h
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LOOKUP_INL_H_
-#define V8_LOOKUP_INL_H_
-
-#include "src/lookup.h"
-
-#include "src/elements.h"
-
-namespace v8 {
-namespace internal {
-
-
-JSReceiver* LookupIterator::NextHolder(Map* map) {
- DisallowHeapAllocation no_gc;
- if (!map->prototype()->IsJSReceiver()) return NULL;
-
- JSReceiver* next = JSReceiver::cast(map->prototype());
- DCHECK(!next->map()->IsGlobalObjectMap() ||
- next->map()->is_hidden_prototype());
-
- if (!check_prototype_chain() &&
- !(check_hidden() && next->map()->is_hidden_prototype()) &&
- // Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
- // when not checking other hidden prototypes.
- !map->IsJSGlobalProxyMap()) {
- return NULL;
- }
-
- return next;
-}
-
-
-LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
- JSReceiver* const holder) {
- STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
- DisallowHeapAllocation no_gc;
- if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
- return LookupNonMaskingInterceptorInHolder(map, holder);
- }
- switch (state_) {
- case NOT_FOUND:
- if (map->IsJSProxyMap()) return JSPROXY;
- if (map->is_access_check_needed() &&
- (IsElement() || !isolate_->IsInternallyUsedPropertyName(name_))) {
- return ACCESS_CHECK;
- }
- // Fall through.
- case ACCESS_CHECK:
- if (exotic_index_state_ != ExoticIndexState::kNotExotic &&
- IsIntegerIndexedExotic(holder)) {
- return INTEGER_INDEXED_EXOTIC;
- }
- if (check_interceptor() && HasInterceptor(map) &&
- !SkipInterceptor(JSObject::cast(holder))) {
- return INTERCEPTOR;
- }
- // Fall through.
- case INTERCEPTOR:
- if (IsElement()) {
- // TODO(verwaest): Optimize.
- if (holder->IsStringObjectWithCharacterAt(index_)) {
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- property_details_ = PropertyDetails(attributes, v8::internal::DATA, 0,
- PropertyCellType::kNoCell);
- } else {
- JSObject* js_object = JSObject::cast(holder);
- if (js_object->elements() == isolate()->heap()->empty_fixed_array()) {
- return NOT_FOUND;
- }
-
- ElementsAccessor* accessor = js_object->GetElementsAccessor();
- FixedArrayBase* backing_store = js_object->elements();
- number_ =
- accessor->GetEntryForIndex(js_object, backing_store, index_);
- if (number_ == kMaxUInt32) return NOT_FOUND;
- property_details_ = accessor->GetDetails(backing_store, number_);
- }
- } else if (!map->is_dictionary_map()) {
- DescriptorArray* descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(*name_, map);
- if (number == DescriptorArray::kNotFound) return NOT_FOUND;
- number_ = static_cast<uint32_t>(number);
- property_details_ = descriptors->GetDetails(number_);
- } else if (map->IsGlobalObjectMap()) {
- GlobalDictionary* dict = JSObject::cast(holder)->global_dictionary();
- int number = dict->FindEntry(name_);
- if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
- number_ = static_cast<uint32_t>(number);
- DCHECK(dict->ValueAt(number_)->IsPropertyCell());
- PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
- if (cell->value()->IsTheHole()) return NOT_FOUND;
- property_details_ = cell->property_details();
- } else {
- NameDictionary* dict = JSObject::cast(holder)->property_dictionary();
- int number = dict->FindEntry(name_);
- if (number == NameDictionary::kNotFound) return NOT_FOUND;
- number_ = static_cast<uint32_t>(number);
- property_details_ = dict->DetailsAt(number_);
- }
- has_property_ = true;
- switch (property_details_.kind()) {
- case v8::internal::kData:
- return DATA;
- case v8::internal::kAccessor:
- return ACCESSOR;
- }
- case ACCESSOR:
- case DATA:
- return NOT_FOUND;
- case INTEGER_INDEXED_EXOTIC:
- case JSPROXY:
- case TRANSITION:
- UNREACHABLE();
- }
- UNREACHABLE();
- return state_;
-}
-
-
-LookupIterator::State LookupIterator::LookupNonMaskingInterceptorInHolder(
- Map* const map, JSReceiver* const holder) {
- switch (state_) {
- case NOT_FOUND:
- if (check_interceptor() && HasInterceptor(map) &&
- !SkipInterceptor(JSObject::cast(holder))) {
- return INTERCEPTOR;
- }
- // Fall through.
- default:
- return NOT_FOUND;
- }
- UNREACHABLE();
- return state_;
-}
-}
-} // namespace v8::internal
-
-#endif // V8_LOOKUP_INL_H_
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 809c35e4a5..f60563b19e 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -6,13 +6,45 @@
#include "src/bootstrapper.h"
#include "src/deoptimizer.h"
+#include "src/elements.h"
#include "src/isolate-inl.h"
-#include "src/lookup-inl.h"
namespace v8 {
namespace internal {
+// static
+LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> key,
+ bool* success,
+ Configuration configuration) {
+ uint32_t index = 0;
+ if (key->ToArrayIndex(&index)) {
+ *success = true;
+ return LookupIterator(isolate, receiver, index, configuration);
+ }
+
+ Handle<Name> name;
+ *success = Object::ToName(isolate, key).ToHandle(&name);
+ if (!*success) {
+ DCHECK(isolate->has_pending_exception());
+ // Return an unusable dummy.
+ return LookupIterator(receiver, isolate->factory()->empty_string());
+ }
+
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it(isolate, receiver, index, configuration);
+ // Here we try to avoid having to rebuild the string later
+ // by storing it on the indexed LookupIterator.
+ it.name_ = name;
+ return it;
+ }
+
+ return LookupIterator(receiver, name, configuration);
+}
+
+
void LookupIterator::Next() {
DCHECK_NE(JSPROXY, state_);
DCHECK_NE(TRANSITION, state_);
@@ -48,13 +80,13 @@ void LookupIterator::Next() {
}
-void LookupIterator::RestartLookupForNonMaskingInterceptors() {
- interceptor_state_ = InterceptorState::kProcessNonMasking;
+void LookupIterator::RestartInternal(InterceptorState interceptor_state) {
state_ = NOT_FOUND;
+ interceptor_state_ = interceptor_state;
property_details_ = PropertyDetails::Empty();
- number_ = DescriptorArray::kNotFound;
holder_ = initial_holder_;
holder_map_ = handle(holder_->map(), isolate_);
+ number_ = DescriptorArray::kNotFound;
Next();
}
@@ -102,7 +134,8 @@ Handle<JSObject> LookupIterator::GetStoreTarget() const {
bool LookupIterator::HasAccess() const {
DCHECK_EQ(ACCESS_CHECK, state_);
- return isolate_->MayAccess(GetHolder<JSObject>());
+ return isolate_->MayAccess(handle(isolate_->context()),
+ GetHolder<JSObject>());
}
@@ -212,11 +245,11 @@ void LookupIterator::PrepareTransitionToDataProperty(
state_ = TRANSITION;
transition_ = transition;
- if (receiver->IsGlobalObject()) {
+ if (receiver->IsJSGlobalObject()) {
// Install a property cell.
InternalizeName();
- auto cell = GlobalObject::EnsurePropertyCell(
- Handle<GlobalObject>::cast(receiver), name());
+ auto cell = JSGlobalObject::EnsurePropertyCell(
+ Handle<JSGlobalObject>::cast(receiver), name());
DCHECK(cell->value()->IsTheHole());
transition_ = cell;
} else if (!transition->is_dictionary_map()) {
@@ -230,7 +263,7 @@ void LookupIterator::ApplyTransitionToDataProperty() {
DCHECK_EQ(TRANSITION, state_);
Handle<JSObject> receiver = GetStoreTarget();
- if (receiver->IsGlobalObject()) return;
+ if (receiver->IsJSGlobalObject()) return;
holder_ = receiver;
holder_map_ = transition_map();
JSObject::MigrateToMap(receiver, holder_map_);
@@ -394,7 +427,7 @@ Handle<Object> LookupIterator::FetchValue() const {
ElementsAccessor* accessor = holder->GetElementsAccessor();
return accessor->Get(handle(holder->elements()), number_);
- } else if (holder_map_->IsGlobalObjectMap()) {
+ } else if (holder_map_->IsJSGlobalObjectMap()) {
result = holder->global_dictionary()->ValueAt(number_);
DCHECK(result->IsPropertyCell());
result = PropertyCell::cast(result)->value();
@@ -453,7 +486,7 @@ Handle<HeapType> LookupIterator::GetFieldType() const {
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
DCHECK(!IsElement());
Handle<JSObject> holder = GetHolder<JSObject>();
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(holder);
Object* value = global->global_dictionary()->ValueAt(dictionary_entry());
DCHECK(value->IsPropertyCell());
return handle(PropertyCell::cast(value));
@@ -479,7 +512,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
if (IsElement()) {
ElementsAccessor* accessor = holder->GetElementsAccessor();
accessor->Set(holder->elements(), number_, *value);
- } else if (holder->IsGlobalObject()) {
+ } else if (holder->IsJSGlobalObject()) {
Handle<GlobalDictionary> property_dictionary =
handle(holder->global_dictionary());
PropertyCell::UpdateCell(property_dictionary, dictionary_entry(), value,
@@ -561,5 +594,131 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
}
return interceptor_state_ == InterceptorState::kProcessNonMasking;
}
+
+
+JSReceiver* LookupIterator::NextHolder(Map* map) {
+ DisallowHeapAllocation no_gc;
+ if (!map->prototype()->IsJSReceiver()) return NULL;
+
+ JSReceiver* next = JSReceiver::cast(map->prototype());
+ DCHECK(!next->map()->IsJSGlobalObjectMap() ||
+ next->map()->is_hidden_prototype());
+
+ if (!check_prototype_chain() &&
+ !(check_hidden() && next->map()->is_hidden_prototype()) &&
+ // Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
+ // when not checking other hidden prototypes.
+ !map->IsJSGlobalProxyMap()) {
+ return NULL;
+ }
+
+ return next;
+}
+
+
+LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
+ JSReceiver* const holder) {
+ STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
+ DisallowHeapAllocation no_gc;
+ if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
+ return LookupNonMaskingInterceptorInHolder(map, holder);
+ }
+ switch (state_) {
+ case NOT_FOUND:
+ if (map->IsJSProxyMap()) return JSPROXY;
+ if (map->is_access_check_needed() &&
+ (IsElement() || !isolate_->IsInternallyUsedPropertyName(name_))) {
+ return ACCESS_CHECK;
+ }
+ // Fall through.
+ case ACCESS_CHECK:
+ if (exotic_index_state_ != ExoticIndexState::kNotExotic &&
+ IsIntegerIndexedExotic(holder)) {
+ return INTEGER_INDEXED_EXOTIC;
+ }
+ if (check_interceptor() && HasInterceptor(map) &&
+ !SkipInterceptor(JSObject::cast(holder))) {
+ return INTERCEPTOR;
+ }
+ // Fall through.
+ case INTERCEPTOR:
+ if (IsElement()) {
+ // TODO(verwaest): Optimize.
+ if (holder->IsStringObjectWithCharacterAt(index_)) {
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ property_details_ = PropertyDetails(attributes, v8::internal::DATA, 0,
+ PropertyCellType::kNoCell);
+ } else {
+ JSObject* js_object = JSObject::cast(holder);
+ if (js_object->elements() == isolate()->heap()->empty_fixed_array()) {
+ return NOT_FOUND;
+ }
+
+ ElementsAccessor* accessor = js_object->GetElementsAccessor();
+ FixedArrayBase* backing_store = js_object->elements();
+ number_ =
+ accessor->GetEntryForIndex(js_object, backing_store, index_);
+ if (number_ == kMaxUInt32) return NOT_FOUND;
+ property_details_ = accessor->GetDetails(backing_store, number_);
+ }
+ } else if (!map->is_dictionary_map()) {
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int number = descriptors->SearchWithCache(*name_, map);
+ if (number == DescriptorArray::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = descriptors->GetDetails(number_);
+ } else if (map->IsJSGlobalObjectMap()) {
+ GlobalDictionary* dict = JSObject::cast(holder)->global_dictionary();
+ int number = dict->FindEntry(name_);
+ if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
+ DCHECK(dict->ValueAt(number_)->IsPropertyCell());
+ PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
+ if (cell->value()->IsTheHole()) return NOT_FOUND;
+ property_details_ = cell->property_details();
+ } else {
+ NameDictionary* dict = JSObject::cast(holder)->property_dictionary();
+ int number = dict->FindEntry(name_);
+ if (number == NameDictionary::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = dict->DetailsAt(number_);
+ }
+ has_property_ = true;
+ switch (property_details_.kind()) {
+ case v8::internal::kData:
+ return DATA;
+ case v8::internal::kAccessor:
+ return ACCESSOR;
+ }
+ case ACCESSOR:
+ case DATA:
+ return NOT_FOUND;
+ case INTEGER_INDEXED_EXOTIC:
+ case JSPROXY:
+ case TRANSITION:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return state_;
+}
+
+
+LookupIterator::State LookupIterator::LookupNonMaskingInterceptorInHolder(
+ Map* const map, JSReceiver* const holder) {
+ switch (state_) {
+ case NOT_FOUND:
+ if (check_interceptor() && HasInterceptor(map) &&
+ !SkipInterceptor(JSObject::cast(holder))) {
+ return INTERCEPTOR;
+ }
+ // Fall through.
+ default:
+ return NOT_FOUND;
+ }
+ UNREACHABLE();
+ return state_;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 3888ed6240..5396619852 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -158,6 +158,12 @@ class LookupIterator final BASE_EMBEDDED {
return it;
}
+ static LookupIterator PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
+ bool* success, Configuration configuration = DEFAULT);
+
+ void Restart() { RestartInternal(InterceptorState::kUninitialized); }
+
Isolate* isolate() const { return isolate_; }
State state() const { return state_; }
@@ -256,7 +262,10 @@ class LookupIterator final BASE_EMBEDDED {
MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
inline State LookupInHolder(Map* map, JSReceiver* holder);
- void RestartLookupForNonMaskingInterceptors();
+ void RestartLookupForNonMaskingInterceptors() {
+ RestartInternal(InterceptorState::kProcessNonMasking);
+ }
+ void RestartInternal(InterceptorState interceptor_state);
State LookupNonMaskingInterceptorInHolder(Map* map, JSReceiver* holder);
Handle<Object> FetchValue() const;
void ReloadPropertyInformation();
@@ -316,6 +325,7 @@ class LookupIterator final BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LOOKUP_H_
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index c865a5fb0c..fd2aa7c314 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -278,6 +278,7 @@ class AllocationUtils {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MACRO_ASSEMBLER_H_
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 640c2dff4e..27ee8e4334 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -84,8 +84,8 @@ void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
Handle<Object> argument(message->argument(), isolate);
Handle<Object> args[] = {argument};
MaybeHandle<Object> maybe_stringified = Execution::TryCall(
- isolate->to_detail_string_fun(), isolate->factory()->undefined_value(),
- arraysize(args), args);
+ isolate, isolate->to_detail_string_fun(),
+ isolate->factory()->undefined_value(), arraysize(args), args);
Handle<Object> stringified;
if (!maybe_stringified.ToHandle(&stringified)) {
stringified = isolate->factory()->NewStringFromAsciiChecked("exception");
@@ -144,10 +144,13 @@ base::SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
CallSite::CallSite(Isolate* isolate, Handle<JSObject> call_site_obj)
: isolate_(isolate) {
+ Handle<Object> maybe_function = JSObject::GetDataProperty(
+ call_site_obj, isolate->factory()->call_site_function_symbol());
+ if (!maybe_function->IsJSFunction()) return;
+
+ fun_ = Handle<JSFunction>::cast(maybe_function);
receiver_ = JSObject::GetDataProperty(
call_site_obj, isolate->factory()->call_site_receiver_symbol());
- fun_ = Handle<JSFunction>::cast(JSObject::GetDataProperty(
- call_site_obj, isolate->factory()->call_site_function_symbol()));
pos_ = Handle<Smi>::cast(JSObject::GetDataProperty(
call_site_obj,
isolate->factory()->call_site_position_symbol()))
@@ -316,7 +319,7 @@ Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
Handle<JSFunction> fun = isolate->no_side_effect_to_string_fun();
MaybeHandle<Object> maybe_result =
- Execution::TryCall(fun, factory->undefined_value(), 1, &arg);
+ Execution::TryCall(isolate, fun, factory->undefined_value(), 1, &arg);
Handle<Object> result;
if (!maybe_result.ToHandle(&result) || !result->IsString()) {
return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
@@ -400,10 +403,6 @@ MaybeHandle<String> ErrorToStringHelper::Stringify(Isolate* isolate,
Handle<String> name_string = isolate->factory()->name_string();
LookupIterator internal_error_lookup(
error, internal_key, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- LookupIterator message_lookup(
- error, message_string, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- LookupIterator name_lookup(error, name_string,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
// Find out whether an internally created error object is on the prototype
// chain. If the name property is found on a holder prior to the internally
@@ -412,24 +411,26 @@ MaybeHandle<String> ErrorToStringHelper::Stringify(Isolate* isolate,
// Similar for the message property. If the message property shadows the
// internally created error object, use that message property. Otherwise
// use empty string as message.
- if (internal_error_lookup.IsFound()) {
- if (!ShadowsInternalError(isolate, &name_lookup, &internal_error_lookup)) {
- Handle<JSObject> holder = internal_error_lookup.GetHolder<JSObject>();
- name = Handle<String>(holder->constructor_name());
- }
- if (!ShadowsInternalError(isolate, &message_lookup,
- &internal_error_lookup)) {
- message = isolate->factory()->empty_string();
- }
- }
- if (name.is_null()) {
+ LookupIterator name_lookup(error, name_string,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ if (internal_error_lookup.IsFound() &&
+ !ShadowsInternalError(isolate, &name_lookup, &internal_error_lookup)) {
+ Handle<JSObject> holder = internal_error_lookup.GetHolder<JSObject>();
+ name = Handle<String>(holder->constructor_name());
+ } else {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, name,
GetStringifiedProperty(isolate, &name_lookup,
isolate->factory()->Error_string()),
String);
}
- if (message.is_null()) {
+
+ LookupIterator message_lookup(
+ error, message_string, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ if (internal_error_lookup.IsFound() &&
+ !ShadowsInternalError(isolate, &message_lookup, &internal_error_lookup)) {
+ message = isolate->factory()->empty_string();
+ } else {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, message,
GetStringifiedProperty(isolate, &message_lookup,
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index bb78f3d0a5..6d3f797822 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -62,6 +62,8 @@ class CallSite {
bool IsEval();
bool IsConstructor();
+ bool IsValid() { return !fun_.is_null(); }
+
private:
Isolate* isolate_;
Handle<Object> receiver_;
@@ -92,8 +94,9 @@ class CallSite {
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
- T(CannotPreventExtExternalArray, \
- "Cannot prevent extension of an object with external array elements") \
+ T(CannotPreventExt, "Cannot prevent extensions") \
+ T(CannotFreezeArrayBufferView, \
+ "Cannot freeze array buffer views with elements") \
T(CircularStructure, "Converting circular structure to JSON") \
T(ConstAssign, "Assignment to constant variable.") \
T(ConstructorNonCallable, \
@@ -105,6 +108,7 @@ class CallSite {
T(DateType, "this is not a Date object.") \
T(DebuggerFrame, "Debugger: Invalid frame index.") \
T(DebuggerType, "Debugger: Parameters have wrong types.") \
+ T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
T(DefineDisallowed, "Cannot define property:%, object is not extensible.") \
T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
T(ExtendsValueGenerator, \
@@ -113,8 +117,6 @@ class CallSite {
"Class extends value % is not a function or null") \
T(FirstArgumentNotRegExp, \
"First argument to % must not be a regular expression") \
- T(FlagsGetterNonObject, \
- "RegExp.prototype.flags getter called on non-object %") \
T(FunctionBind, "Bind must be called on a function") \
T(GeneratorRunning, "Generator is already running") \
T(IllegalInvocation, "Illegal invocation") \
@@ -199,6 +201,8 @@ class CallSite {
T(ReduceNoInitial, "Reduce of empty array with no initial value") \
T(RegExpFlags, \
"Cannot supply flags when constructing one RegExp from another") \
+ T(RegExpNonObject, "% getter called on non-object %") \
+ T(RegExpNonRegExp, "% getter called on non-RegExp object") \
T(ReinitializeIntl, "Trying to re-initialize % object.") \
T(ResolvedOptionsCalledOnNonObject, \
"resolvedOptions method called on a non-object or on a object that is " \
@@ -213,7 +217,9 @@ class CallSite {
T(StrictPoisonPill, \
"'caller', 'callee', and 'arguments' properties may not be accessed on " \
"strict mode functions or the arguments objects for calls to them") \
- T(StrictReadOnlyProperty, "Cannot assign to read only property '%' of %") \
+ T(StrictReadOnlyProperty, \
+ "Cannot assign to read only property '%' of % '%'") \
+ T(StrictCannotCreateProperty, "Cannot create property '%' on % '%'") \
T(StrongArity, \
"In strong mode, calling a function with too few arguments is deprecated") \
T(StrongDeleteProperty, \
@@ -232,8 +238,8 @@ class CallSite {
T(SimdToNumber, "Cannot convert a SIMD value to a number") \
T(UndefinedOrNullToObject, "Cannot convert undefined or null to object") \
T(ValueAndAccessor, \
- "Invalid property. A property cannot both have accessors and be " \
- "writable or have a value, %") \
+ "Invalid property descriptor. Cannot both specify accessors and a value " \
+ "or writable attribute, %") \
T(VarRedeclaration, "Identifier '%' has already been declared") \
T(WithExpression, "% has no properties") \
T(WrongArgs, "%: Arguments list has wrong type") \
@@ -250,6 +256,7 @@ class CallSite {
T(DateRange, "Provided date is not in valid range.") \
T(ExpectedLocation, "Expected Area/Location for time zone, got %") \
T(InvalidArrayBufferLength, "Invalid array buffer length") \
+ T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
T(InvalidArrayLength, "Invalid array length") \
T(InvalidCodePoint, "Invalid code point %") \
T(InvalidCountValue, "Invalid count value") \
@@ -267,6 +274,7 @@ class CallSite {
T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
T(InvalidTypedArrayLength, "Invalid typed array length") \
T(InvalidTypedArrayOffset, "Start offset is too large:") \
+ T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
T(LocaleMatcher, "Illegal value for localeMatcher:%") \
T(NormalizationForm, "The normalization form should be one of %.") \
T(NumberFormatRange, "% argument must be between 0 and 20") \
@@ -319,6 +327,9 @@ class CallSite {
T(NoCatchOrFinally, "Missing catch or finally after try") \
T(NotIsvar, "builtin %%IS_VAR: not a variable") \
T(ParamAfterRest, "Rest parameter must be last formal parameter") \
+ T(PushPastSafeLength, \
+ "Pushing % elements on an array-like of length % " \
+ "is disallowed, as the total surpasses 2**53-1") \
T(BadSetterRestParameter, \
"Setter function argument must not be a rest parameter") \
T(ParamDupe, "Duplicate parameter name not allowed in this context") \
@@ -490,6 +501,7 @@ class ErrorToStringHelper {
List<Handle<JSObject> > visited_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MESSAGES_H_
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index f4bddf5461..0719055eff 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -84,48 +84,10 @@ bool Operand::is_reg() const {
}
-int Register::NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
-}
-
-
-int DoubleRegister::NumRegisters() {
- return FPURegister::kMaxNumRegisters;
-}
-
-
-int DoubleRegister::NumAllocatableRegisters() {
- return FPURegister::kMaxNumAllocatableRegisters;
-}
-
-
-int DoubleRegister::NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
-}
-
-
-int FPURegister::ToAllocationIndex(FPURegister reg) {
- DCHECK(reg.code() % 2 == 0);
- DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters);
- DCHECK(reg.is_valid());
- DCHECK(!reg.is(kDoubleRegZero));
- DCHECK(!reg.is(kLithiumScratchDouble));
- return (reg.code() / 2);
-}
-
-
// -----------------------------------------------------------------------------
// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
- if (IsCodeTarget(rmode_)) {
- uint32_t scope1 = (uint32_t) target_address() & ~kImm28Mask;
- uint32_t scope2 = reinterpret_cast<uint32_t>(pc_) & ~kImm28Mask;
-
- if (scope1 != scope2) {
- Assembler::JumpToJumpRegister(pc_);
- }
- }
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
@@ -475,16 +437,29 @@ void Assembler::CheckTrampolinePoolQuick(int extra_instructions) {
}
-void Assembler::emit(Instr x) {
+void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
+ if (IsPrevInstrCompactBranch()) {
+ if (Instruction::IsForbiddenAfterBranchInstr(x)) {
+ // Nop instruction to preceed a CTI in forbidden slot:
+ Instr nop = SPECIAL | SLL;
+ *reinterpret_cast<Instr*>(pc_) = nop;
+ pc_ += kInstrSize;
+ }
+ ClearCompactBranchState();
+ }
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
+ if (is_compact_branch == CompactBranchType::COMPACT_BRANCH) {
+ EmittedCompactBranchInstruction();
+ }
CheckTrampolinePoolQuick();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 7fa4d5d66a..3860fe4e19 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -64,28 +64,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
-const char* DoubleRegister::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
-}
-
-
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
@@ -250,31 +228,31 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
static const int kNegOffset = 0x00008000;
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
-const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift)
- | (kPointerSize & kImm16Mask); // NOLINT
+const Instr kPopInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
+ (Register::kCode_sp << kRtShift) |
+ (kPointerSize & kImm16Mask); // NOLINT
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift)
- | (-kPointerSize & kImm16Mask); // NOLINT
+const Instr kPushInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
+ (Register::kCode_sp << kRtShift) |
+ (-kPointerSize & kImm16Mask); // NOLINT
// sw(r, MemOperand(sp, 0))
-const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kPushRegPattern =
+ SW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
// lw(r, MemOperand(sp, 0))
-const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kPopRegPattern =
+ LW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kLwRegFpOffsetPattern =
+ LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kSwRegFpOffsetPattern =
+ SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
+ (kNegOffset & kImm16Mask); // NOLINT
-const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
+ (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xffe00000;
@@ -307,6 +285,10 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
+ if (IsPrevInstrCompactBranch()) {
+ nop();
+ ClearCompactBranchState();
+ }
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -319,6 +301,10 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ if (IsPrevInstrCompactBranch()) {
+ nop();
+ ClearCompactBranchState();
+ }
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -334,21 +320,21 @@ void Assembler::CodeTargetAlign() {
Register Assembler::GetRtReg(Instr instr) {
Register rt;
- rt.code_ = (instr & kRtFieldMask) >> kRtShift;
+ rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
return rt;
}
Register Assembler::GetRsReg(Instr instr) {
Register rs;
- rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+ rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
return rs;
}
Register Assembler::GetRdReg(Instr instr) {
Register rd;
- rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+ rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
return rd;
}
@@ -475,19 +461,38 @@ bool Assembler::IsBranch(Instr instr) {
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
// Checks if the instruction is a branch.
- return opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL ||
+ bool isBranch =
+ opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
+ opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
(opcode == COP1 && rs_field == BC1EQZ) ||
(opcode == COP1 && rs_field == BC1NEZ);
+ if (!isBranch && IsMipsArchVariant(kMips32r6)) {
+ // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
+ // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
+ isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
+ opcode == BALC ||
+ (opcode == POP66 && rs_field != 0) || // BEQZC
+ (opcode == POP76 && rs_field != 0); // BNEZC
+ }
+ return isBranch;
+}
+
+
+bool Assembler::IsBc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a BC or BALC.
+ return opcode == BC || opcode == BALC;
+}
+
+
+bool Assembler::IsBzc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is BEQZC or BNEZC.
+ return (opcode == POP66 && GetRsField(instr) != 0) ||
+ (opcode == POP76 && GetRsField(instr) != 0);
}
@@ -507,6 +512,34 @@ bool Assembler::IsBne(Instr instr) {
}
+bool Assembler::IsBeqzc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ return opcode == POP66 && GetRsField(instr) != 0;
+}
+
+
+bool Assembler::IsBnezc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ return opcode == POP76 && GetRsField(instr) != 0;
+}
+
+
+bool Assembler::IsBeqc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs = GetRsField(instr);
+ uint32_t rt = GetRtField(instr);
+ return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
+}
+
+
+bool Assembler::IsBnec(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs = GetRsField(instr);
+ uint32_t rt = GetRtField(instr);
+ return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
+}
+
+
bool Assembler::IsJump(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
@@ -592,7 +625,7 @@ int32_t Assembler::GetBranchOffset(Instr instr) {
bool Assembler::IsLw(Instr instr) {
- return ((instr & kOpcodeMask) == LW);
+ return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
}
@@ -614,7 +647,7 @@ Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
bool Assembler::IsSw(Instr instr) {
- return ((instr & kOpcodeMask) == SW);
+ return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
}
@@ -640,6 +673,36 @@ bool Assembler::IsAndImmediate(Instr instr) {
}
+static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (Assembler::IsBc(instr)) {
+ return Assembler::OffsetSize::kOffset26;
+ } else if (Assembler::IsBzc(instr)) {
+ return Assembler::OffsetSize::kOffset21;
+ }
+ }
+ return Assembler::OffsetSize::kOffset16;
+}
+
+
+static inline int32_t AddBranchOffset(int pos, Instr instr) {
+ int bits = OffsetSizeInBits(instr);
+ const int32_t mask = (1 << bits) - 1;
+ bits = 32 - bits;
+
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+ // the compiler uses arithmetic shifts for signed integers.
+ int32_t imm = ((instr & mask) << bits) >> (bits - 2);
+
+ if (imm == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + Assembler::kBranchPCOffset + imm;
+ }
+}
+
+
int Assembler::target_at(int pos, bool is_internal) {
Instr instr = instr_at(pos);
if (is_internal) {
@@ -663,18 +726,9 @@ int Assembler::target_at(int pos, bool is_internal) {
}
// Check we have a branch or jump instruction.
DCHECK(IsBranch(instr) || IsLui(instr));
- // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
- // the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) {
- int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
-
- if (imm18 == kEndOfChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
- } else {
- return pos + kBranchPCOffset + imm18;
- }
- } else if (IsLui(instr)) {
+ return AddBranchOffset(pos, instr);
+ } else {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
DCHECK(IsOri(instr_ori));
@@ -690,10 +744,23 @@ int Assembler::target_at(int pos, bool is_internal) {
DCHECK(pos > delta);
return pos - delta;
}
- } else {
- UNREACHABLE();
- return 0;
}
+ return 0;
+}
+
+
+static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t bits = OffsetSizeInBits(instr);
+ int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
+ DCHECK((imm & 3) == 0);
+ imm >>= 2;
+
+ const int32_t mask = (1 << bits) - 1;
+ instr &= ~mask;
+ DCHECK(is_intn(imm, bits));
+
+ return instr | (imm & mask);
}
@@ -716,15 +783,9 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
DCHECK(IsBranch(instr) || IsLui(instr));
if (IsBranch(instr)) {
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- DCHECK((imm18 & 3) == 0);
-
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- DCHECK(is_int16(imm16));
-
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
- } else if (IsLui(instr)) {
+ instr = SetBranchOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
+ } else {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
DCHECK(IsOri(instr_ori));
@@ -738,8 +799,6 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_lui | ((imm & kHiMask) >> kLuiShift));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
- } else {
- UNREACHABLE();
}
}
@@ -788,20 +847,23 @@ void Assembler::bind_to(Label* L, int pos) {
Instr instr = instr_at(fixup_pos);
if (is_internal) {
target_at_put(fixup_pos, pos, is_internal);
- } else if (!is_internal && IsBranch(instr)) {
- if (dist > kMaxBranchOffset) {
- if (trampoline_pos == kInvalidSlotPos) {
- trampoline_pos = get_trampoline_entry(fixup_pos);
- CHECK(trampoline_pos != kInvalidSlotPos);
+ } else {
+ if (IsBranch(instr)) {
+ int branch_offset = BranchOffset(instr);
+ if (dist > branch_offset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK(trampoline_pos != kInvalidSlotPos);
+ }
+ CHECK((trampoline_pos - fixup_pos) <= branch_offset);
+ target_at_put(fixup_pos, trampoline_pos, false);
+ fixup_pos = trampoline_pos;
+ dist = pos - fixup_pos;
}
- CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos, false);
- fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
+ target_at_put(fixup_pos, pos, false);
+ } else {
+ target_at_put(fixup_pos, pos, false);
}
- target_at_put(fixup_pos, pos, false);
- } else {
- target_at_put(fixup_pos, pos, false);
}
}
L->bind_to(pos);
@@ -832,10 +894,47 @@ void Assembler::next(Label* L, bool is_internal) {
bool Assembler::is_near(Label* L) {
- if (L->is_bound()) {
- return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
+}
+
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ if (L == nullptr || !L->is_bound()) return true;
+ return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
+}
+
+
+bool Assembler::is_near_branch(Label* L) {
+ DCHECK(L->is_bound());
+ return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
+}
+
+
+int Assembler::BranchOffset(Instr instr) {
+ // At pre-R6 and for other R6 branches the offset is 16 bits.
+ int bits = OffsetSize::kOffset16;
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ uint32_t opcode = GetOpcodeField(instr);
+ switch (opcode) {
+ // Checks BC or BALC.
+ case BC:
+ case BALC:
+ bits = OffsetSize::kOffset26;
+ break;
+
+ // Checks BEQZC or BNEZC.
+ case POP66:
+ case POP76:
+ if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
+ break;
+ default:
+ break;
+ }
}
- return false;
+
+ return (1 << (bits + 2 - 1)) - 1;
}
@@ -926,49 +1025,56 @@ void Assembler::GenInstrRegister(Opcode opcode,
// Instructions with immediate value.
// Registers are in the order of the instruction encoding, from left to right.
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- FPURegister ft,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
+ CompactBranchType is_compact_branch) {
+ DCHECK(rs.is_valid() && (is_int21(offset21)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t j) {
- DCHECK(rs.is_valid() && (is_uint21(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (j & kImm21Mask);
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
+ uint32_t offset21) {
+ DCHECK(rs.is_valid() && (is_uint21(offset21)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
emit(instr);
}
-void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26) {
+void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
+ CompactBranchType is_compact_branch) {
DCHECK(is_int26(offset26));
Instr instr = opcode | (offset26 & kImm26Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
@@ -1021,99 +1127,18 @@ uint32_t Assembler::jump_address(Label* L) {
}
-int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t target_pos;
-
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- DCHECK((offset & 3) == 0);
- DCHECK(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - pc_offset();
- DCHECK((offset & 3) == 0);
- DCHECK(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos;
+ int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos();
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
} else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t target_pos;
-
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
@@ -1122,9 +1147,9 @@ int32_t Assembler::branch_offset21_compact(Label* L,
}
}
- int32_t offset = target_pos - pc_offset();
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
+ DCHECK(is_intn(offset, bits + 2));
DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
return offset;
}
@@ -1171,14 +1196,14 @@ void Assembler::bal(int16_t offset) {
void Assembler::bc(int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrImmediate(BC, offset);
+ GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::balc(int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
positions_recorder()->WriteRecordedPositions();
- GenInstrImmediate(BALC, offset);
+ GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1199,7 +1224,7 @@ void Assembler::bgez(Register rs, int16_t offset) {
void Assembler::bgezc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, rt, rt, offset);
+ GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1208,7 +1233,7 @@ void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZ, rs, rt, offset);
+ GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1217,7 +1242,7 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZL, rs, rt, offset);
+ GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1240,7 +1265,8 @@ void Assembler::bgtz(Register rs, int16_t offset) {
void Assembler::bgtzc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+ GenInstrImmediate(BGTZL, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
@@ -1254,14 +1280,15 @@ void Assembler::blez(Register rs, int16_t offset) {
void Assembler::blezc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+ GenInstrImmediate(BLEZL, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltzc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, rt, rt, offset);
+ DCHECK(!rt.is(zero_reg));
+ GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1270,16 +1297,16 @@ void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZ, rs, rt, offset);
+ GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!rs.is(zero_reg));
+ DCHECK(!rt.is(zero_reg));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZL, rs, rt, offset);
+ GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1310,7 +1337,7 @@ void Assembler::bovc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1318,86 +1345,105 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BLEZ, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, rt, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezall(Register rs, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, rt, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BGTZ, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(ADDI, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(ADDI, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(DADDI, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(DADDI, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::beqc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
+ DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
+ if (rs.code() < rt.code()) {
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
- Instr instr = POP66 | (rs.code() << kRsShift) | (offset & kImm21Mask);
- emit(instr);
+ GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bnec(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
+ DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
+ if (rs.code() < rt.code()) {
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
- Instr instr = POP76 | (rs.code() << kRsShift) | offset;
- emit(instr);
+ GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1409,7 +1455,9 @@ void Assembler::j(int32_t target) {
(kImm26Bits + kImmFieldShift)) == 0;
DCHECK(in_range && ((target & 3) == 0));
#endif
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrJump(J, (target >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1435,8 +1483,10 @@ void Assembler::jal(int32_t target) {
(kImm26Bits + kImmFieldShift)) == 0;
DCHECK(in_range && ((target & 3) == 0));
#endif
+ BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrJump(JAL, (target >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1451,9 +1501,7 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::jic(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- Instr instr = POP66 | (JIC << kRsShift) | (rt.code() << kRtShift) |
- (offset & kImm16Mask);
- emit(instr);
+ GenInstrImmediate(POP66, zero_reg, rt, offset);
}
@@ -1784,7 +1832,7 @@ void Assembler::aui(Register rs, Register rt, int32_t j) {
void Assembler::addiupc(Register rs, int32_t imm19) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs.is_valid() && is_int19(imm19));
- int32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
+ uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -1792,23 +1840,23 @@ void Assembler::addiupc(Register rs, int32_t imm19) {
void Assembler::lwpc(Register rs, int32_t offset19) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs.is_valid() && is_int19(offset19));
- int32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
+ uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
void Assembler::auipc(Register rs, int16_t imm16) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.is_valid() && is_int16(imm16));
- int32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ DCHECK(rs.is_valid());
+ uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
void Assembler::aluipc(Register rs, int16_t imm16) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.is_valid() && is_int16(imm16));
- int32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ DCHECK(rs.is_valid());
+ uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -1942,14 +1990,14 @@ void Assembler::movn(Register rd, Register rs, Register rt) {
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 1;
+ rt.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 0;
+ rt.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
@@ -2233,7 +2281,7 @@ void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(IsMipsArchVariant(kMips32r2));
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 1;
+ ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
@@ -2241,7 +2289,7 @@ void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(IsMipsArchVariant(kMips32r2));
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 1;
+ ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
@@ -2249,7 +2297,7 @@ void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(IsMipsArchVariant(kMips32r2));
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 0;
+ ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
@@ -2257,7 +2305,7 @@ void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(IsMipsArchVariant(kMips32r2));
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 0;
+ ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
@@ -2693,7 +2741,6 @@ void Assembler::bc1t(int16_t offset, uint16_t cc) {
}
-// Debugging.
int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta) {
Instr instr = instr_at(pc);
@@ -2887,8 +2934,12 @@ void Assembler::CheckTrampolinePool() {
// First we emit jump (2 instructions), then we emit trampoline pool.
{ BlockTrampolinePoolScope block_trampoline_pool(this);
Label after_pool;
- b(&after_pool);
- nop();
+ if (IsMipsArchVariant(kMips32r6)) {
+ bc(&after_pool);
+ } else {
+ b(&after_pool);
+ nop();
+ }
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
@@ -2975,130 +3026,12 @@ void Assembler::set_target_address_at(Address pc,
*p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
*(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
- // The following code is an optimization for the common case of Call()
- // or Jump() which is load to register, and jump through register:
- // li(t9, address); jalr(t9) (or jr(t9)).
- // If the destination address is in the same 256 MB page as the call, it
- // is faster to do a direct jal, or j, rather than jump thru register, since
- // that lets the cpu pipeline prefetch the target address. However each
- // time the address above is patched, we have to patch the direct jal/j
- // instruction, as well as possibly revert to jalr/jr if we now cross a
- // 256 MB page. Note that with the jal/j instructions, we do not need to
- // load the register, but that code is left, since it makes it easy to
- // revert this process. A further optimization could try replacing the
- // li sequence with nops.
- // This optimization can only be applied if the rt-code from instr2 is the
- // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
- // mips return. Occasionally this lands after an li().
-
- Instr instr3 = instr_at(pc + 2 * kInstrSize);
- uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
- uint32_t target_field =
- static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
- bool patched_jump = false;
-
-#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
- // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
- // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
- // apply this workaround for all cores so we don't have to identify the core.
- if (in_range) {
- // The 24k core E156 bug has some very specific requirements, we only check
- // the most simple one: if the address of the delay slot instruction is in
- // the first or last 32 KB of the 256 MB segment.
- uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
- uint32_t ipc_segment_addr = ipc & segment_mask;
- if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
- in_range = false;
- }
-#endif
-
- if (IsJalr(instr3)) {
- // Try to convert JALR to JAL.
- if (in_range && GetRt(instr2) == GetRs(instr3)) {
- *(p + 2) = JAL | target_field;
- patched_jump = true;
- }
- } else if (IsJr(instr3)) {
- // Try to convert JR to J, skip returns (jr ra).
- bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
- if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
- *(p + 2) = J | target_field;
- patched_jump = true;
- }
- } else if (IsJal(instr3)) {
- if (in_range) {
- // We are patching an already converted JAL.
- *(p + 2) = JAL | target_field;
- } else {
- // Patch JAL, but out of range, revert to JALR.
- // JALR rs reg is the rt reg specified in the ORI instruction.
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
- }
- patched_jump = true;
- } else if (IsJ(instr3)) {
- if (in_range) {
- // We are patching an already converted J (jump).
- *(p + 2) = J | target_field;
- } else {
- // Trying patch J, but out of range, just go back to JR.
- // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- if (IsMipsArchVariant(kMips32r6)) {
- *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
- } else {
- *(p + 2) = SPECIAL | rs_field | JR;
- }
- }
- patched_jump = true;
- }
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
+ CpuFeatures::FlushICache(pc, 2 * sizeof(int32_t));
}
}
-
-void Assembler::JumpToJumpRegister(Address pc) {
- // Address pc points to lui/ori instructions.
- // Jump to label may follow at pc + 2 * kInstrSize.
- uint32_t* p = reinterpret_cast<uint32_t*>(pc);
-#ifdef DEBUG
- Instr instr1 = instr_at(pc);
-#endif
- Instr instr2 = instr_at(pc + 1 * kInstrSize);
- Instr instr3 = instr_at(pc + 2 * kInstrSize);
- bool patched = false;
-
- if (IsJal(instr3)) {
- DCHECK(GetOpcodeField(instr1) == LUI);
- DCHECK(GetOpcodeField(instr2) == ORI);
-
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
- patched = true;
- } else if (IsJ(instr3)) {
- DCHECK(GetOpcodeField(instr1) == LUI);
- DCHECK(GetOpcodeField(instr2) == ORI);
-
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- if (IsMipsArchVariant(kMips32r6)) {
- *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
- } else {
- *(p + 2) = SPECIAL | rs_field | JR;
- }
- patched = true;
- }
-
- if (patched) {
- CpuFeatures::FlushICache(pc + 2, sizeof(Address));
- }
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index c47f6d3abe..682c6602da 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -41,12 +41,33 @@
#include <set>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/mips/constants-mips.h"
namespace v8 {
namespace internal {
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \
+ V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7)
+
+#define DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
+ V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
+// clang-format on
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -72,13 +93,19 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of Register and FPURegister.
-// Core register.
struct Register {
- static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp.
- static const int kSizeInBytes = 4;
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kNumRegisters = Code::kAfterLast;
+
#if defined(V8_TARGET_LITTLE_ENDIAN)
static const int kMantissaOffset = 0;
static const int kExponentOffset = 4;
@@ -89,117 +116,37 @@ struct Register {
#error Unknown endianness
#endif
- inline static int NumAllocatableRegisters();
-
- static int ToAllocationIndex(Register reg) {
- DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
- reg.is(from_code(kCpRegister)));
- return reg.is(from_code(kCpRegister)) ?
- kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'.
- reg.code() - 2; // zero_reg and 'at' are skipped.
- }
-
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return index == kMaxNumAllocatableRegisters - 1 ?
- from_code(kCpRegister) : // Last index is always the 'cp' register.
- from_code(index + 2); // zero_reg and 'at' are skipped.
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "v0",
- "v1",
- "a0",
- "a1",
- "a2",
- "a3",
- "t0",
- "t1",
- "t2",
- "t3",
- "t4",
- "t5",
- "t6",
- "s7",
- };
- return names[index];
- }
static Register from_code(int code) {
- Register r = { code };
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
+ Register r = {code};
return r;
}
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-#define REGISTER(N, C) \
- const int kRegister_ ## N ## _Code = C; \
- const Register N = { C }
-
-REGISTER(no_reg, -1);
-// Always zero.
-REGISTER(zero_reg, 0);
-// at: Reserved for synthetic instructions.
-REGISTER(at, 1);
-// v0, v1: Used when returning multiple values from subroutines.
-REGISTER(v0, 2);
-REGISTER(v1, 3);
-// a0 - a4: Used to pass non-FP parameters.
-REGISTER(a0, 4);
-REGISTER(a1, 5);
-REGISTER(a2, 6);
-REGISTER(a3, 7);
-// t0 - t9: Can be used without reservation, act as temporary registers and are
-// allowed to be destroyed by subroutines.
-REGISTER(t0, 8);
-REGISTER(t1, 9);
-REGISTER(t2, 10);
-REGISTER(t3, 11);
-REGISTER(t4, 12);
-REGISTER(t5, 13);
-REGISTER(t6, 14);
-REGISTER(t7, 15);
-// s0 - s7: Subroutine register variables. Subroutines that write to these
-// registers must restore their values before exiting so that the caller can
-// expect the values to be preserved.
-REGISTER(s0, 16);
-REGISTER(s1, 17);
-REGISTER(s2, 18);
-REGISTER(s3, 19);
-REGISTER(s4, 20);
-REGISTER(s5, 21);
-REGISTER(s6, 22);
-REGISTER(s7, 23);
-REGISTER(t8, 24);
-REGISTER(t9, 25);
-// k0, k1: Reserved for system calls and interrupt handlers.
-REGISTER(k0, 26);
-REGISTER(k1, 27);
-// gp: Reserved.
-REGISTER(gp, 28);
-// sp: Stack pointer.
-REGISTER(sp, 29);
-// fp: Frame pointer.
-REGISTER(fp, 30);
-// ra: Return address pointer.
-REGISTER(ra, 31);
-
-#undef REGISTER
+// s7: context register
+// s3: lithium scratch
+// s4: lithium scratch2
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
int ToNumber(Register reg);
@@ -207,75 +154,70 @@ int ToNumber(Register reg);
Register ToRegister(int num);
// Coprocessor register.
-struct FPURegister {
- static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
-
- // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
- // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
- // number of Double regs (64-bit regs, or FPU-reg-pairs).
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0.
- // f28: 0.0
- // f30: scratch register.
- static const int kNumReservedRegisters = 2;
- static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
- kNumReservedRegisters;
+ static const int kMaxNumRegisters = Code::kAfterLast;
inline static int NumRegisters();
- inline static int NumAllocatableRegisters();
-
- // TODO(turbofan): Proper support for float32.
- inline static int NumAllocatableAliasedRegisters();
-
- inline static int ToAllocationIndex(FPURegister reg);
- static const char* AllocationIndexToString(int index);
-
- static FPURegister FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index * 2);
- }
- static FPURegister from_code(int code) {
- FPURegister r = { code };
- return r;
- }
+ // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
+ // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
+ // number of Double regs (64-bit regs, or FPU-reg-pairs).
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
- bool is(FPURegister creg) const { return code_ == creg.code_; }
- FPURegister low() const {
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+ DoubleRegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_;
+ DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
+ DoubleRegister reg;
+ reg.reg_code = reg_code;
DCHECK(reg.is_valid());
return reg;
}
- FPURegister high() const {
+ DoubleRegister high() const {
// Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_ + 1;
+ DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
+ DoubleRegister reg;
+ reg.reg_code = reg_code + 1;
DCHECK(reg.is_valid());
return reg;
}
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
+ }
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
}
void setcode(int f) {
- code_ = f;
+ reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
+// A few double registers are reserved: one as a scratch register and one to
+// hold 0.0.
+// f28: 0.0
+// f30: scratch register.
+
// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
// 32-bit registers, f0 through f31. When used as 'double' they are used
// in pairs, starting with the even numbered register. So a double operation
@@ -285,43 +227,43 @@ struct FPURegister {
// but it is not in common use. Someday we will want to support this in v8.)
// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef FPURegister DoubleRegister;
-typedef FPURegister FloatRegister;
-
-const FPURegister no_freg = { -1 };
-
-const FPURegister f0 = { 0 }; // Return value in hard float mode.
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
+typedef DoubleRegister FPURegister;
+typedef DoubleRegister FloatRegister;
+
+const DoubleRegister no_freg = {-1};
+
+const DoubleRegister f0 = {0}; // Return value in hard float mode.
+const DoubleRegister f1 = {1};
+const DoubleRegister f2 = {2};
+const DoubleRegister f3 = {3};
+const DoubleRegister f4 = {4};
+const DoubleRegister f5 = {5};
+const DoubleRegister f6 = {6};
+const DoubleRegister f7 = {7};
+const DoubleRegister f8 = {8};
+const DoubleRegister f9 = {9};
+const DoubleRegister f10 = {10};
+const DoubleRegister f11 = {11};
+const DoubleRegister f12 = {12}; // Arg 0 in hard float mode.
+const DoubleRegister f13 = {13};
+const DoubleRegister f14 = {14}; // Arg 1 in hard float mode.
+const DoubleRegister f15 = {15};
+const DoubleRegister f16 = {16};
+const DoubleRegister f17 = {17};
+const DoubleRegister f18 = {18};
+const DoubleRegister f19 = {19};
+const DoubleRegister f20 = {20};
+const DoubleRegister f21 = {21};
+const DoubleRegister f22 = {22};
+const DoubleRegister f23 = {23};
+const DoubleRegister f24 = {24};
+const DoubleRegister f25 = {25};
+const DoubleRegister f26 = {26};
+const DoubleRegister f27 = {27};
+const DoubleRegister f28 = {28};
+const DoubleRegister f29 = {29};
+const DoubleRegister f30 = {30};
+const DoubleRegister f31 = {31};
// Register aliases.
// cp is assumed to be a callee saved register.
@@ -341,22 +283,22 @@ const FPURegister f31 = { 31 };
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
- bool is_valid() const { return code_ == kFCSRRegister; }
- bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+ bool is_valid() const { return reg_code == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
void setcode(int f) {
- code_ = f;
+ reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
@@ -466,27 +408,46 @@ class Assembler : public AssemblerBase {
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
void bind(Label* L); // Binds an unbound label L to current code position.
+
+ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
+
// Determines if Label is bound and near enough so that branch instruction
// can be used to reach it, instead of jump instruction.
bool is_near(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_branch(Label* L);
+ inline bool is_near_pre_r6(Label* L) {
+ DCHECK(!IsMipsArchVariant(kMips32r6));
+ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
+ }
+ inline bool is_near_r6(Label* L) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
+ }
+
+ int BranchOffset(Instr instr);
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
- int32_t branch_offset(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
- int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t o = branch_offset(L, jump_elimination_allowed);
- DCHECK((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
- }
- int32_t shifted_branch_offset_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t o = branch_offset_compact(L, jump_elimination_allowed);
- DCHECK((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
+ int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset16);
+ }
+ inline int32_t branch_offset21(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+ inline int32_t branch_offset26(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset26);
+ }
+ inline int32_t shifted_branch_offset(Label* L) {
+ return branch_offset(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset21(Label* L) {
+ return branch_offset21(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset26(Label* L) {
+ return branch_offset26(L) >> 2;
}
uint32_t jump_address(Label* L);
@@ -526,8 +487,6 @@ class Assembler : public AssemblerBase {
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
- static void JumpToJumpRegister(Address pc);
-
static void QuietNaN(HeapObject* nan);
// This sets the branch destination (which gets loaded at the call address).
@@ -629,111 +588,111 @@ class Assembler : public AssemblerBase {
// --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
- void b(Label* L) { b(branch_offset(L, false)>>2); }
+ inline void b(Label* L) { b(shifted_branch_offset(L)); }
void bal(int16_t offset);
- void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+ inline void bal(Label* L) { bal(shifted_branch_offset(L)); }
void bc(int32_t offset);
- void bc(Label* L) { bc(branch_offset(L, false) >> 2); }
+ inline void bc(Label* L) { bc(shifted_branch_offset26(L)); }
void balc(int32_t offset);
- void balc(Label* L) { balc(branch_offset(L, false) >> 2); }
+ inline void balc(Label* L) { balc(shifted_branch_offset26(L)); }
void beq(Register rs, Register rt, int16_t offset);
- void beq(Register rs, Register rt, Label* L) {
- beq(rs, rt, branch_offset(L, false) >> 2);
+ inline void beq(Register rs, Register rt, Label* L) {
+ beq(rs, rt, shifted_branch_offset(L));
}
void bgez(Register rs, int16_t offset);
void bgezc(Register rt, int16_t offset);
- void bgezc(Register rt, Label* L) {
- bgezc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgezc(Register rt, Label* L) {
+ bgezc(rt, shifted_branch_offset(L));
}
void bgeuc(Register rs, Register rt, int16_t offset);
- void bgeuc(Register rs, Register rt, Label* L) {
- bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bgeuc(Register rs, Register rt, Label* L) {
+ bgeuc(rs, rt, shifted_branch_offset(L));
}
void bgec(Register rs, Register rt, int16_t offset);
- void bgec(Register rs, Register rt, Label* L) {
- bgec(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bgec(Register rs, Register rt, Label* L) {
+ bgec(rs, rt, shifted_branch_offset(L));
}
void bgezal(Register rs, int16_t offset);
void bgezalc(Register rt, int16_t offset);
- void bgezalc(Register rt, Label* L) {
- bgezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgezalc(Register rt, Label* L) {
+ bgezalc(rt, shifted_branch_offset(L));
}
void bgezall(Register rs, int16_t offset);
- void bgezall(Register rs, Label* L) {
- bgezall(rs, branch_offset(L, false)>>2);
+ inline void bgezall(Register rs, Label* L) {
+ bgezall(rs, branch_offset(L) >> 2);
}
void bgtz(Register rs, int16_t offset);
void bgtzc(Register rt, int16_t offset);
- void bgtzc(Register rt, Label* L) {
- bgtzc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgtzc(Register rt, Label* L) {
+ bgtzc(rt, shifted_branch_offset(L));
}
void blez(Register rs, int16_t offset);
void blezc(Register rt, int16_t offset);
- void blezc(Register rt, Label* L) {
- blezc(rt, branch_offset_compact(L, false)>>2);
+ inline void blezc(Register rt, Label* L) {
+ blezc(rt, shifted_branch_offset(L));
}
void bltz(Register rs, int16_t offset);
void bltzc(Register rt, int16_t offset);
- void bltzc(Register rt, Label* L) {
- bltzc(rt, branch_offset_compact(L, false)>>2);
+ inline void bltzc(Register rt, Label* L) {
+ bltzc(rt, shifted_branch_offset(L));
}
void bltuc(Register rs, Register rt, int16_t offset);
- void bltuc(Register rs, Register rt, Label* L) {
- bltuc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bltuc(Register rs, Register rt, Label* L) {
+ bltuc(rs, rt, shifted_branch_offset(L));
}
void bltc(Register rs, Register rt, int16_t offset);
- void bltc(Register rs, Register rt, Label* L) {
- bltc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bltc(Register rs, Register rt, Label* L) {
+ bltc(rs, rt, shifted_branch_offset(L));
}
void bltzal(Register rs, int16_t offset);
void blezalc(Register rt, int16_t offset);
- void blezalc(Register rt, Label* L) {
- blezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void blezalc(Register rt, Label* L) {
+ blezalc(rt, shifted_branch_offset(L));
}
void bltzalc(Register rt, int16_t offset);
- void bltzalc(Register rt, Label* L) {
- bltzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bltzalc(Register rt, Label* L) {
+ bltzalc(rt, shifted_branch_offset(L));
}
void bgtzalc(Register rt, int16_t offset);
- void bgtzalc(Register rt, Label* L) {
- bgtzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgtzalc(Register rt, Label* L) {
+ bgtzalc(rt, shifted_branch_offset(L));
}
void beqzalc(Register rt, int16_t offset);
- void beqzalc(Register rt, Label* L) {
- beqzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void beqzalc(Register rt, Label* L) {
+ beqzalc(rt, shifted_branch_offset(L));
}
void beqc(Register rs, Register rt, int16_t offset);
- void beqc(Register rs, Register rt, Label* L) {
- beqc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void beqc(Register rs, Register rt, Label* L) {
+ beqc(rs, rt, shifted_branch_offset(L));
}
void beqzc(Register rs, int32_t offset);
- void beqzc(Register rs, Label* L) {
- beqzc(rs, branch_offset21_compact(L, false)>>2);
+ inline void beqzc(Register rs, Label* L) {
+ beqzc(rs, shifted_branch_offset21(L));
}
void bnezalc(Register rt, int16_t offset);
- void bnezalc(Register rt, Label* L) {
- bnezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bnezalc(Register rt, Label* L) {
+ bnezalc(rt, shifted_branch_offset(L));
}
void bnec(Register rs, Register rt, int16_t offset);
- void bnec(Register rs, Register rt, Label* L) {
- bnec(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bnec(Register rs, Register rt, Label* L) {
+ bnec(rs, rt, shifted_branch_offset(L));
}
void bnezc(Register rt, int32_t offset);
- void bnezc(Register rt, Label* L) {
- bnezc(rt, branch_offset21_compact(L, false)>>2);
+ inline void bnezc(Register rt, Label* L) {
+ bnezc(rt, shifted_branch_offset21(L));
}
void bne(Register rs, Register rt, int16_t offset);
- void bne(Register rs, Register rt, Label* L) {
- bne(rs, rt, branch_offset(L, false)>>2);
+ inline void bne(Register rs, Register rt, Label* L) {
+ bne(rs, rt, shifted_branch_offset(L));
}
void bovc(Register rs, Register rt, int16_t offset);
- void bovc(Register rs, Register rt, Label* L) {
- bovc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bovc(Register rs, Register rt, Label* L) {
+ bovc(rs, rt, shifted_branch_offset(L));
}
void bnvc(Register rs, Register rt, int16_t offset);
- void bnvc(Register rs, Register rt, Label* L) {
- bnvc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bnvc(Register rs, Register rt, Label* L) {
+ bnvc(rs, rt, shifted_branch_offset(L));
}
// Never use the int16_t b(l)cond version with a branch offset
@@ -867,10 +826,10 @@ class Assembler : public AssemblerBase {
void movz_s(FPURegister fd, FPURegister fs, Register rt);
void movz_d(FPURegister fd, FPURegister fs, Register rt);
- void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
- void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
- void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
- void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movn_s(FPURegister fd, FPURegister fs, Register rt);
void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
@@ -978,12 +937,12 @@ class Assembler : public AssemblerBase {
void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void bc1eqz(int16_t offset, FPURegister ft);
- void bc1eqz(Label* L, FPURegister ft) {
- bc1eqz(branch_offset(L, false)>>2, ft);
+ inline void bc1eqz(Label* L, FPURegister ft) {
+ bc1eqz(shifted_branch_offset(L), ft);
}
void bc1nez(int16_t offset, FPURegister ft);
- void bc1nez(Label* L, FPURegister ft) {
- bc1nez(branch_offset(L, false)>>2, ft);
+ inline void bc1nez(Label* L, FPURegister ft) {
+ bc1nez(shifted_branch_offset(L), ft);
}
// Conditions and branches for non MIPSr6.
@@ -993,9 +952,13 @@ class Assembler : public AssemblerBase {
void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
- void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
+ inline void bc1f(Label* L, uint16_t cc = 0) {
+ bc1f(shifted_branch_offset(L), cc);
+ }
void bc1t(int16_t offset, uint16_t cc = 0);
- void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
+ inline void bc1t(Label* L, uint16_t cc = 0) {
+ bc1t(shifted_branch_offset(L), cc);
+ }
void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
@@ -1114,8 +1077,14 @@ class Assembler : public AssemblerBase {
// Check if an instruction is a branch of some kind.
static bool IsBranch(Instr instr);
+ static bool IsBc(Instr instr);
+ static bool IsBzc(Instr instr);
static bool IsBeq(Instr instr);
static bool IsBne(Instr instr);
+ static bool IsBeqzc(Instr instr);
+ static bool IsBnezc(Instr instr);
+ static bool IsBeqc(Instr instr);
+ static bool IsBnec(Instr instr);
static bool IsJump(Instr instr);
static bool IsJ(Instr instr);
@@ -1174,6 +1143,8 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
+ bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1237,6 +1208,8 @@ class Assembler : public AssemblerBase {
return block_buffer_growth_;
}
+ inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+
private:
inline static void set_target_internal_reference_encoded_at(Address pc,
Address target);
@@ -1279,11 +1252,14 @@ class Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Readable constants for compact branch handling in emit()
+ enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
+
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
- inline void emit(Instr x);
- inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+ inline void emit(Instr x,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
@@ -1334,21 +1310,22 @@ class Assembler : public AssemblerBase {
FPUControlRegister fs,
SecondaryField func = NULLSF);
-
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register r1,
- FPURegister r2,
- int32_t j);
- void GenInstrImmediate(Opcode opcode, Register rs, int32_t j);
- void GenInstrImmediate(Opcode opcode, int32_t offset26);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, Register rt, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, SecondaryField SF, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register r1, FPURegister r2, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, int32_t offset21,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21);
+ void GenInstrImmediate(
+ Opcode opcode, int32_t offset26,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
void GenInstrJump(Opcode opcode,
@@ -1423,12 +1400,17 @@ class Assembler : public AssemblerBase {
bool trampoline_emitted_;
static const int kTrampolineSlotsSize = 4 * kInstrSize;
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+ static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static const int kInvalidSlotPos = -1;
// Internal reference positions, required for unbounded internal reference
// labels.
std::set<int> internal_reference_positions_;
+ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
+ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
+ bool prev_instr_compact_branch_ = false;
+
Trampoline trampoline_;
bool internal_trampoline_exception_;
@@ -1450,6 +1432,7 @@ class EnsureSpace BASE_EMBEDDED {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index f4da194579..08f8e65359 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -23,8 +23,9 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
- // -- a1 : called function (only guaranteed when
- // -- extra_args requires it)
+ // (only guaranteed when the called function
+ // is not marked as DontAdaptArguments)
+ // -- a1 : called function
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
@@ -49,8 +50,22 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
// JumpToExternalReference expects a0 to contain the number of arguments
- // including the receiver and the extra arguments.
+ // including the receiver and the extra arguments. But a0 is only valid
+ // if the called function is marked as DontAdaptArguments, otherwise we
+ // need to load the argument count from the SharedFunctionInfo.
+ Label argc, done_argc;
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(a2);
+ __ Branch(&argc, eq, a2,
+ Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ Addu(a0, a2, num_extra_args + 1);
+ __ jmp(&done_argc);
+ __ bind(&argc);
__ Addu(a0, a0, num_extra_args + 1);
+ __ bind(&done_argc);
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -62,8 +77,7 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
__ lw(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ lw(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
__ lw(result,
MemOperand(result,
@@ -78,8 +92,7 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
__ lw(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ lw(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
__ lw(result,
MemOperand(result,
@@ -212,6 +225,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a3 : original constructor
// -- ra : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
@@ -245,10 +259,10 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ Push(a1);
+ __ Push(a1, a3);
__ CallStub(&stub);
__ Move(a0, v0);
- __ Pop(a1);
+ __ Pop(a1, a3);
}
__ bind(&done_convert);
}
@@ -258,10 +272,15 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the first argument
// -- a1 : constructor function
+ // -- a3 : original constructor
// -- ra : return address
// -----------------------------------
- Label allocate, done_allocate;
+ Label allocate, done_allocate, rt_call;
+
+ // Fall back to runtime if the original constructor and function differ.
+ __ Branch(&rt_call, ne, a1, Operand(a3));
+
__ Allocate(JSValue::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
__ bind(&done_allocate);
@@ -285,6 +304,17 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Pop(a0, a1);
}
__ jmp(&done_allocate);
+
+ // Fallback to the runtime to create new object.
+ __ bind(&rt_call);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a1, a3); // constructor function, original constructor
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Pop(a0, a1);
+ }
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
}
}
@@ -366,16 +396,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ lw(a2, MemOperand(a2));
__ Branch(&rt_call, ne, a2, Operand(zero_reg));
- // Fall back to runtime if the original constructor and function differ.
- __ Branch(&rt_call, ne, a1, Operand(a3));
+ // Verify that the original constructor is a JSFunction.
+ __ GetObjectType(a3, t1, t0);
+ __ Branch(&rt_call, ne, t0, Operand(JS_FUNCTION_TYPE));
// Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ // a3: original constructor
+ __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(a2, &rt_call);
__ GetObjectType(a2, t5, t4);
__ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ lw(t1, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&rt_call, ne, a1, Operand(t1));
+
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the
// initial map's instance type would be JS_FUNCTION_TYPE.
@@ -397,7 +433,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Operand(Map::kSlackTrackingCounterEnd));
__ sw(t0, bit_field3); // In delay slot.
- __ Push(a1, a2, a1); // a1 = Constructor.
+ __ Push(a1, a2, a2); // a2 = Initial map.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(a1, a2);
@@ -495,7 +531,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a3: original constructor
__ bind(&rt_call);
- __ Push(a1, a3); // arguments 2-3 / 1-2
+ __ Push(a1, a3); // constructor function, original constructor
__ CallRuntime(Runtime::kNewObject, 2);
__ mov(t4, v0);
@@ -892,28 +928,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
+ __ push(kInterpreterBytecodeArrayRegister);
__ CallRuntime(Runtime::kStackGuard, 0);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -963,6 +987,67 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ Addu(a3, a0, Operand(1)); // Add one for receiver.
+ __ sll(a3, a3, kPointerSizeLog2);
+ __ Subu(a3, a2, Operand(a3));
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ lw(t0, MemOperand(a2));
+ __ Addu(a2, a2, Operand(-kPointerSize));
+ __ push(t0);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(a3));
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (not including receiver)
+ // -- a3 : original constructor
+ // -- a1 : constructor to call
+ // -- a2 : address of the first argument
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ sll(t0, a0, kPointerSizeLog2);
+ __ Subu(t0, a2, Operand(t0));
+
+ // Push a slot for the receiver.
+ __ push(zero_reg);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ lw(t1, MemOperand(a2));
+ __ Addu(a2, a2, Operand(-kPointerSize));
+ __ push(t1);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(t0));
+
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1490,72 +1575,84 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(a1);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ Branch(&class_constructor, ne, at, Operand(zero_reg));
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
__ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
{
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
- __ lw(a3, MemOperand(at));
-
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
// -- a2 : the shared function info.
- // -- a3 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(a3, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ GetObjectType(a3, t0, t0);
- __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
- __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(a3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ lw(a3, MemOperand(at));
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ sll(a0, a0, kSmiTagSize); // Smi tagged.
+ __ Push(a0, a1);
+ __ mov(a0, a3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a3, v0);
+ __ Pop(a0, a1);
+ __ sra(a0, a0, kSmiTagSize); // Un-tag.
+ }
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ Branch(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ Push(a0, a1);
- __ mov(a0, a3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(a3, v0);
- __ Pop(a0, a1);
- __ sra(a0, a0, kSmiTagSize); // Un-tag.
- }
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ sw(a3, MemOperand(at));
@@ -1576,11 +1673,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
ParameterCount actual(a0);
ParameterCount expected(a2);
__ InvokeCode(a3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ }
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -1590,8 +1694,8 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq, t2, Operand(JS_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
__ Branch(&non_function, ne, t2, Operand(JS_FUNCTION_PROXY_TYPE));
// 1. Call to function proxy.
@@ -1613,7 +1717,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ sw(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
@@ -1710,35 +1816,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a2 : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- a1 : the target to call (can be any Object).
-
- // Find the address of the last argument.
- __ Addu(a3, a0, Operand(1)); // Add one for receiver.
- __ sll(a3, a3, kPointerSizeLog2);
- __ Subu(a3, a2, Operand(a3));
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ Branch(&loop_check);
- __ bind(&loop_header);
- __ lw(t0, MemOperand(a2));
- __ Addu(a2, a2, Operand(-kPointerSize));
- __ push(t0);
- __ bind(&loop_check);
- __ Branch(&loop_header, gt, a2, Operand(a3));
-
- // Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index b12cb718ab..0b536504c2 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -1066,13 +1066,21 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ //
+ // If argv_in_register():
+ // a2: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Compute the argv pointer in a callee-saved register.
- __ sll(s1, a0, kPointerSizeLog2);
- __ Addu(s1, sp, s1);
- __ Subu(s1, s1, kPointerSize);
+ if (argv_in_register()) {
+ // Move argv into the correct register.
+ __ mov(s1, a2);
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ sll(s1, a0, kPointerSizeLog2);
+ __ Addu(s1, sp, s1);
+ __ Subu(s1, s1, kPointerSize);
+ }
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
@@ -1153,8 +1161,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- // s0: still holds argc (callee-saved).
- __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
+ Register argc;
+ if (argv_in_register()) {
+ // We don't want to pop arguments so set argc to no_reg.
+ argc = no_reg;
+ } else {
+ // s0: still holds argc (callee-saved).
+ argc = s0;
+ }
+ __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -1684,7 +1699,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
+ __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset));
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kNormalOffset));
@@ -1888,7 +1903,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the arguments boilerplate from the current native context.
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
+ __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset));
__ lw(t0, MemOperand(
t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
@@ -2487,100 +2502,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
-
- // Do not transform the receiver for strict mode functions.
- int32_t strict_mode_function_mask =
- 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
- // Do not transform the receiver for native (Compilerhints already in a3).
- int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
- __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
- __ Branch(cont, ne, at, Operand(zero_reg));
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ li(a0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ mov(a0, a3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(a1);
- }
- __ Branch(USE_DELAY_SLOT, cont);
- __ sw(v0, MemOperand(sp, argc * kPointerSize));
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // a1 : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
-
- // Goto slow case if we do not have a function.
- __ GetObjectType(a1, t0, t0);
- __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
- }
-
- // Fast-case: Invoke the function now.
- // a1: pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ lw(a3, MemOperand(sp, argc * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(a3, &wrap);
- __ GetObjectType(a3, t0, t0);
- __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- // Wrap the receiver and patch it back onto the stack.
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
@@ -2664,9 +2585,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2703,34 +2622,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
- // Compute the receiver in sloppy mode.
- __ lw(a3, MemOperand(sp, argc * kPointerSize));
-
- __ JumpIfSmi(a3, &wrap);
- __ GetObjectType(a3, t0, t0);
- __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call);
+ __ li(a0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&slow_start, eq, t0, Operand(at));
+ __ Branch(&call, eq, t0, Operand(at));
// Verify that t0 contains an AllocationSite
__ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
@@ -2765,7 +2665,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ sw(t0, FieldMemOperand(a2, with_types_offset));
__ lw(t0, FieldMemOperand(a2, generic_offset));
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
- __ Branch(USE_DELAY_SLOT, &slow_start);
+ __ Branch(USE_DELAY_SLOT, &call);
__ sw(t0, FieldMemOperand(a2, generic_offset)); // In delay slot.
__ bind(&uninitialized);
@@ -2805,23 +2705,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(a1);
}
- __ Branch(&have_js_function);
+ __ Branch(&call);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
-
- // Goto slow case if we do not have a function.
- __ GetObjectType(a1, t0, t0);
- __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
- __ Branch(&have_js_function);
+ __ Branch(&call);
}
@@ -2979,7 +2870,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode, 1);
__ Move(result_, v0);
call_helper.AfterCall(masm);
@@ -3299,6 +3190,23 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
}
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in a0.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(a0, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
+ __ mov(a0, zero_reg);
+ __ bind(&positive_smi);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_smi);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength, 1, 1);
+}
+
+
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes on argument in a0.
Label is_number;
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 67228e0170..9009ec2692 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -343,6 +343,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_CODE_STUBS_MIPS_H_
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index f79ad4e41c..22784fcf53 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -46,6 +46,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index ff8a79f1b2..6ca430a157 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -126,24 +126,28 @@ int FPURegisters::Number(const char* name) {
// -----------------------------------------------------------------------------
// Instructions.
-bool Instruction::IsForbiddenInBranchDelay() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
+bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
+ Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
+ switch (opcode) {
case J:
case JAL:
case BEQ:
case BNE:
- case BLEZ:
- case BGTZ:
+ case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc
+ case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc
case BEQL:
case BNEL:
- case BLEZL:
- case BGTZL:
+ case BLEZL: // POP26 bgezc, blezc, bgec/blec
+ case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc
case BC:
case BALC:
+ case POP10: // beqzalc, bovc, beqc
+ case POP30: // bnezalc, bvnc, bnec
+ case POP66: // beqzc, jic
+ case POP76: // bnezc, jialc
return true;
case REGIMM:
- switch (RtFieldRaw()) {
+ switch (instr & kRtFieldMask) {
case BLTZ:
case BGEZ:
case BLTZAL:
@@ -154,7 +158,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
}
break;
case SPECIAL:
- switch (FunctionFieldRaw()) {
+ switch (instr & kFunctionFieldMask) {
case JR:
case JALR:
return true;
@@ -162,6 +166,17 @@ bool Instruction::IsForbiddenInBranchDelay() const {
return false;
}
break;
+ case COP1:
+ switch (instr & kRsFieldMask) {
+ case BC1:
+ case BC1EQZ:
+ case BC1NEZ:
+ return true;
+ break;
+ default:
+ return false;
+ }
+ break;
default:
return false;
}
@@ -169,8 +184,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
bool Instruction::IsLinkingInstruction() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
+ switch (OpcodeFieldRaw()) {
case JAL:
return true;
case POP76:
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index fcbda80191..b0c2ebbdf8 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -298,311 +298,319 @@ const int kFBtrueBits = 1;
// ----- Miscellaneous useful masks.
// Instruction bit masks.
-const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
-const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
const int kImm18Mask = ((1 << kImm18Bits) - 1) << kImm18Shift;
const int kImm19Mask = ((1 << kImm19Bits) - 1) << kImm19Shift;
const int kImm21Mask = ((1 << kImm21Bits) - 1) << kImm21Shift;
-const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
-const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
-const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
-const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
-const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
-const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
-const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
+const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
+const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
+const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
+const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
-const int kHiMask = 0xffff << 16;
-const int kLoMask = 0xffff;
-const int kSignMask = 0x80000000;
-const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
+const int kHiMask = 0xffff << 16;
+const int kLoMask = 0xffff;
+const int kSignMask = 0x80000000;
+const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
// ----- MIPS Opcodes and Function Fields.
// We use this presentation to stay close to the table representation in
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
-enum Opcode {
- SPECIAL = 0 << kOpcodeShift,
- REGIMM = 1 << kOpcodeShift,
-
- J = ((0 << 3) + 2) << kOpcodeShift,
- JAL = ((0 << 3) + 3) << kOpcodeShift,
- BEQ = ((0 << 3) + 4) << kOpcodeShift,
- BNE = ((0 << 3) + 5) << kOpcodeShift,
- BLEZ = ((0 << 3) + 6) << kOpcodeShift,
- BGTZ = ((0 << 3) + 7) << kOpcodeShift,
-
- ADDI = ((1 << 3) + 0) << kOpcodeShift,
- ADDIU = ((1 << 3) + 1) << kOpcodeShift,
- SLTI = ((1 << 3) + 2) << kOpcodeShift,
- SLTIU = ((1 << 3) + 3) << kOpcodeShift,
- ANDI = ((1 << 3) + 4) << kOpcodeShift,
- ORI = ((1 << 3) + 5) << kOpcodeShift,
- XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
-
- BEQC = ((2 << 3) + 0) << kOpcodeShift,
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
- BEQL = ((2 << 3) + 4) << kOpcodeShift,
- BNEL = ((2 << 3) + 5) << kOpcodeShift,
- BLEZL = ((2 << 3) + 6) << kOpcodeShift,
- BGTZL = ((2 << 3) + 7) << kOpcodeShift,
-
- DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
- SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
- SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
-
- LB = ((4 << 3) + 0) << kOpcodeShift,
- LH = ((4 << 3) + 1) << kOpcodeShift,
- LWL = ((4 << 3) + 2) << kOpcodeShift,
- LW = ((4 << 3) + 3) << kOpcodeShift,
- LBU = ((4 << 3) + 4) << kOpcodeShift,
- LHU = ((4 << 3) + 5) << kOpcodeShift,
- LWR = ((4 << 3) + 6) << kOpcodeShift,
- SB = ((5 << 3) + 0) << kOpcodeShift,
- SH = ((5 << 3) + 1) << kOpcodeShift,
- SWL = ((5 << 3) + 2) << kOpcodeShift,
- SW = ((5 << 3) + 3) << kOpcodeShift,
- SWR = ((5 << 3) + 6) << kOpcodeShift,
-
- LWC1 = ((6 << 3) + 1) << kOpcodeShift,
- BC = ((6 << 3) + 2) << kOpcodeShift,
- LDC1 = ((6 << 3) + 5) << kOpcodeShift,
- POP66 = ((6 << 3) + 6) << kOpcodeShift,
-
- PREF = ((6 << 3) + 3) << kOpcodeShift,
-
- SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- BALC = ((7 << 3) + 2) << kOpcodeShift,
- PCREL = ((7 << 3) + 3) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift,
- POP76 = ((7 << 3) + 6) << kOpcodeShift,
-
- COP1X = ((1 << 4) + 3) << kOpcodeShift
+enum Opcode : uint32_t {
+ SPECIAL = 0U << kOpcodeShift,
+ REGIMM = 1U << kOpcodeShift,
+
+ J = ((0U << 3) + 2) << kOpcodeShift,
+ JAL = ((0U << 3) + 3) << kOpcodeShift,
+ BEQ = ((0U << 3) + 4) << kOpcodeShift,
+ BNE = ((0U << 3) + 5) << kOpcodeShift,
+ BLEZ = ((0U << 3) + 6) << kOpcodeShift,
+ BGTZ = ((0U << 3) + 7) << kOpcodeShift,
+
+ ADDI = ((1U << 3) + 0) << kOpcodeShift,
+ ADDIU = ((1U << 3) + 1) << kOpcodeShift,
+ SLTI = ((1U << 3) + 2) << kOpcodeShift,
+ SLTIU = ((1U << 3) + 3) << kOpcodeShift,
+ ANDI = ((1U << 3) + 4) << kOpcodeShift,
+ ORI = ((1U << 3) + 5) << kOpcodeShift,
+ XORI = ((1U << 3) + 6) << kOpcodeShift,
+ LUI = ((1U << 3) + 7) << kOpcodeShift, // LUI/AUI family.
+
+ BEQC = ((2U << 3) + 0) << kOpcodeShift,
+ COP1 = ((2U << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
+ BEQL = ((2U << 3) + 4) << kOpcodeShift,
+ BNEL = ((2U << 3) + 5) << kOpcodeShift,
+ BLEZL = ((2U << 3) + 6) << kOpcodeShift,
+ BGTZL = ((2U << 3) + 7) << kOpcodeShift,
+
+ DADDI = ((3U << 3) + 0) << kOpcodeShift, // This is also BNEC.
+ SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift,
+
+ LB = ((4U << 3) + 0) << kOpcodeShift,
+ LH = ((4U << 3) + 1) << kOpcodeShift,
+ LWL = ((4U << 3) + 2) << kOpcodeShift,
+ LW = ((4U << 3) + 3) << kOpcodeShift,
+ LBU = ((4U << 3) + 4) << kOpcodeShift,
+ LHU = ((4U << 3) + 5) << kOpcodeShift,
+ LWR = ((4U << 3) + 6) << kOpcodeShift,
+ SB = ((5U << 3) + 0) << kOpcodeShift,
+ SH = ((5U << 3) + 1) << kOpcodeShift,
+ SWL = ((5U << 3) + 2) << kOpcodeShift,
+ SW = ((5U << 3) + 3) << kOpcodeShift,
+ SWR = ((5U << 3) + 6) << kOpcodeShift,
+
+ LWC1 = ((6U << 3) + 1) << kOpcodeShift,
+ BC = ((6U << 3) + 2) << kOpcodeShift,
+ LDC1 = ((6U << 3) + 5) << kOpcodeShift,
+ POP66 = ((6U << 3) + 6) << kOpcodeShift, // beqzc, jic
+
+ PREF = ((6U << 3) + 3) << kOpcodeShift,
+
+ SWC1 = ((7U << 3) + 1) << kOpcodeShift,
+ BALC = ((7U << 3) + 2) << kOpcodeShift,
+ PCREL = ((7U << 3) + 3) << kOpcodeShift,
+ SDC1 = ((7U << 3) + 5) << kOpcodeShift,
+ POP76 = ((7U << 3) + 6) << kOpcodeShift, // bnezc, jialc
+
+ COP1X = ((1U << 4) + 3) << kOpcodeShift,
+
+ // New r6 instruction.
+ POP06 = BLEZ, // bgeuc/bleuc, blezalc, bgezalc
+ POP07 = BGTZ, // bltuc/bgtuc, bgtzalc, bltzalc
+ POP10 = ADDI, // beqzalc, bovc, beqc
+ POP26 = BLEZL, // bgezc, blezc, bgec/blec
+ POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc
+ POP30 = DADDI, // bnezalc, bvnc, bnec
};
-enum SecondaryField {
+enum SecondaryField : uint32_t {
// SPECIAL Encoding of Function Field.
- SLL = ((0 << 3) + 0),
- MOVCI = ((0 << 3) + 1),
- SRL = ((0 << 3) + 2),
- SRA = ((0 << 3) + 3),
- SLLV = ((0 << 3) + 4),
- SRLV = ((0 << 3) + 6),
- SRAV = ((0 << 3) + 7),
-
- JR = ((1 << 3) + 0),
- JALR = ((1 << 3) + 1),
- MOVZ = ((1 << 3) + 2),
- MOVN = ((1 << 3) + 3),
- BREAK = ((1 << 3) + 5),
-
- MFHI = ((2 << 3) + 0),
- CLZ_R6 = ((2 << 3) + 0),
- CLO_R6 = ((2 << 3) + 1),
- MFLO = ((2 << 3) + 2),
-
- MULT = ((3 << 3) + 0),
- MULTU = ((3 << 3) + 1),
- DIV = ((3 << 3) + 2),
- DIVU = ((3 << 3) + 3),
-
- ADD = ((4 << 3) + 0),
- ADDU = ((4 << 3) + 1),
- SUB = ((4 << 3) + 2),
- SUBU = ((4 << 3) + 3),
- AND = ((4 << 3) + 4),
- OR = ((4 << 3) + 5),
- XOR = ((4 << 3) + 6),
- NOR = ((4 << 3) + 7),
-
- SLT = ((5 << 3) + 2),
- SLTU = ((5 << 3) + 3),
-
- TGE = ((6 << 3) + 0),
- TGEU = ((6 << 3) + 1),
- TLT = ((6 << 3) + 2),
- TLTU = ((6 << 3) + 3),
- TEQ = ((6 << 3) + 4),
- SELEQZ_S = ((6 << 3) + 5),
- TNE = ((6 << 3) + 6),
- SELNEZ_S = ((6 << 3) + 7),
+ SLL = ((0U << 3) + 0),
+ MOVCI = ((0U << 3) + 1),
+ SRL = ((0U << 3) + 2),
+ SRA = ((0U << 3) + 3),
+ SLLV = ((0U << 3) + 4),
+ SRLV = ((0U << 3) + 6),
+ SRAV = ((0U << 3) + 7),
+
+ JR = ((1U << 3) + 0),
+ JALR = ((1U << 3) + 1),
+ MOVZ = ((1U << 3) + 2),
+ MOVN = ((1U << 3) + 3),
+ BREAK = ((1U << 3) + 5),
+
+ MFHI = ((2U << 3) + 0),
+ CLZ_R6 = ((2U << 3) + 0),
+ CLO_R6 = ((2U << 3) + 1),
+ MFLO = ((2U << 3) + 2),
+
+ MULT = ((3U << 3) + 0),
+ MULTU = ((3U << 3) + 1),
+ DIV = ((3U << 3) + 2),
+ DIVU = ((3U << 3) + 3),
+
+ ADD = ((4U << 3) + 0),
+ ADDU = ((4U << 3) + 1),
+ SUB = ((4U << 3) + 2),
+ SUBU = ((4U << 3) + 3),
+ AND = ((4U << 3) + 4),
+ OR = ((4U << 3) + 5),
+ XOR = ((4U << 3) + 6),
+ NOR = ((4U << 3) + 7),
+
+ SLT = ((5U << 3) + 2),
+ SLTU = ((5U << 3) + 3),
+
+ TGE = ((6U << 3) + 0),
+ TGEU = ((6U << 3) + 1),
+ TLT = ((6U << 3) + 2),
+ TLTU = ((6U << 3) + 3),
+ TEQ = ((6U << 3) + 4),
+ SELEQZ_S = ((6U << 3) + 5),
+ TNE = ((6U << 3) + 6),
+ SELNEZ_S = ((6U << 3) + 7),
// Multiply integers in r6.
- MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
- MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
- RINT = ((3 << 3) + 2),
+ MUL_MUH = ((3U << 3) + 0), // MUL, MUH.
+ MUL_MUH_U = ((3U << 3) + 1), // MUL_U, MUH_U.
+ RINT = ((3U << 3) + 2),
- MUL_OP = ((0 << 3) + 2),
- MUH_OP = ((0 << 3) + 3),
- DIV_OP = ((0 << 3) + 2),
- MOD_OP = ((0 << 3) + 3),
+ MUL_OP = ((0U << 3) + 2),
+ MUH_OP = ((0U << 3) + 3),
+ DIV_OP = ((0U << 3) + 2),
+ MOD_OP = ((0U << 3) + 3),
- DIV_MOD = ((3 << 3) + 2),
- DIV_MOD_U = ((3 << 3) + 3),
+ DIV_MOD = ((3U << 3) + 2),
+ DIV_MOD_U = ((3U << 3) + 3),
// SPECIAL2 Encoding of Function Field.
- MUL = ((0 << 3) + 2),
- CLZ = ((4 << 3) + 0),
- CLO = ((4 << 3) + 1),
+ MUL = ((0U << 3) + 2),
+ CLZ = ((4U << 3) + 0),
+ CLO = ((4U << 3) + 1),
// SPECIAL3 Encoding of Function Field.
- EXT = ((0 << 3) + 0),
- INS = ((0 << 3) + 4),
- BSHFL = ((4 << 3) + 0),
+ EXT = ((0U << 3) + 0),
+ INS = ((0U << 3) + 4),
+ BSHFL = ((4U << 3) + 0),
// SPECIAL3 Encoding of sa Field.
- BITSWAP = ((0 << 3) + 0),
- ALIGN = ((0 << 3) + 2),
- WSBH = ((0 << 3) + 2),
- SEB = ((2 << 3) + 0),
- SEH = ((3 << 3) + 0),
+ BITSWAP = ((0U << 3) + 0),
+ ALIGN = ((0U << 3) + 2),
+ WSBH = ((0U << 3) + 2),
+ SEB = ((2U << 3) + 0),
+ SEH = ((3U << 3) + 0),
// REGIMM encoding of rt Field.
- BLTZ = ((0 << 3) + 0) << 16,
- BGEZ = ((0 << 3) + 1) << 16,
- BLTZAL = ((2 << 3) + 0) << 16,
- BGEZAL = ((2 << 3) + 1) << 16,
- BGEZALL = ((2 << 3) + 3) << 16,
+ BLTZ = ((0U << 3) + 0) << 16,
+ BGEZ = ((0U << 3) + 1) << 16,
+ BLTZAL = ((2U << 3) + 0) << 16,
+ BGEZAL = ((2U << 3) + 1) << 16,
+ BGEZALL = ((2U << 3) + 3) << 16,
// COP1 Encoding of rs Field.
- MFC1 = ((0 << 3) + 0) << 21,
- CFC1 = ((0 << 3) + 2) << 21,
- MFHC1 = ((0 << 3) + 3) << 21,
- MTC1 = ((0 << 3) + 4) << 21,
- CTC1 = ((0 << 3) + 6) << 21,
- MTHC1 = ((0 << 3) + 7) << 21,
- BC1 = ((1 << 3) + 0) << 21,
- S = ((2 << 3) + 0) << 21,
- D = ((2 << 3) + 1) << 21,
- W = ((2 << 3) + 4) << 21,
- L = ((2 << 3) + 5) << 21,
- PS = ((2 << 3) + 6) << 21,
+ MFC1 = ((0U << 3) + 0) << 21,
+ CFC1 = ((0U << 3) + 2) << 21,
+ MFHC1 = ((0U << 3) + 3) << 21,
+ MTC1 = ((0U << 3) + 4) << 21,
+ CTC1 = ((0U << 3) + 6) << 21,
+ MTHC1 = ((0U << 3) + 7) << 21,
+ BC1 = ((1U << 3) + 0) << 21,
+ S = ((2U << 3) + 0) << 21,
+ D = ((2U << 3) + 1) << 21,
+ W = ((2U << 3) + 4) << 21,
+ L = ((2U << 3) + 5) << 21,
+ PS = ((2U << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
- ADD_S = ((0 << 3) + 0),
- SUB_S = ((0 << 3) + 1),
- MUL_S = ((0 << 3) + 2),
- DIV_S = ((0 << 3) + 3),
- ABS_S = ((0 << 3) + 5),
- SQRT_S = ((0 << 3) + 4),
- MOV_S = ((0 << 3) + 6),
- NEG_S = ((0 << 3) + 7),
- ROUND_L_S = ((1 << 3) + 0),
- TRUNC_L_S = ((1 << 3) + 1),
- CEIL_L_S = ((1 << 3) + 2),
- FLOOR_L_S = ((1 << 3) + 3),
- ROUND_W_S = ((1 << 3) + 4),
- TRUNC_W_S = ((1 << 3) + 5),
- CEIL_W_S = ((1 << 3) + 6),
- FLOOR_W_S = ((1 << 3) + 7),
- RECIP_S = ((2 << 3) + 5),
- RSQRT_S = ((2 << 3) + 6),
- CLASS_S = ((3 << 3) + 3),
- CVT_D_S = ((4 << 3) + 1),
- CVT_W_S = ((4 << 3) + 4),
- CVT_L_S = ((4 << 3) + 5),
- CVT_PS_S = ((4 << 3) + 6),
+ ADD_S = ((0U << 3) + 0),
+ SUB_S = ((0U << 3) + 1),
+ MUL_S = ((0U << 3) + 2),
+ DIV_S = ((0U << 3) + 3),
+ ABS_S = ((0U << 3) + 5),
+ SQRT_S = ((0U << 3) + 4),
+ MOV_S = ((0U << 3) + 6),
+ NEG_S = ((0U << 3) + 7),
+ ROUND_L_S = ((1U << 3) + 0),
+ TRUNC_L_S = ((1U << 3) + 1),
+ CEIL_L_S = ((1U << 3) + 2),
+ FLOOR_L_S = ((1U << 3) + 3),
+ ROUND_W_S = ((1U << 3) + 4),
+ TRUNC_W_S = ((1U << 3) + 5),
+ CEIL_W_S = ((1U << 3) + 6),
+ FLOOR_W_S = ((1U << 3) + 7),
+ RECIP_S = ((2U << 3) + 5),
+ RSQRT_S = ((2U << 3) + 6),
+ CLASS_S = ((3U << 3) + 3),
+ CVT_D_S = ((4U << 3) + 1),
+ CVT_W_S = ((4U << 3) + 4),
+ CVT_L_S = ((4U << 3) + 5),
+ CVT_PS_S = ((4U << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
- ADD_D = ((0 << 3) + 0),
- SUB_D = ((0 << 3) + 1),
- MUL_D = ((0 << 3) + 2),
- DIV_D = ((0 << 3) + 3),
- SQRT_D = ((0 << 3) + 4),
- ABS_D = ((0 << 3) + 5),
- MOV_D = ((0 << 3) + 6),
- NEG_D = ((0 << 3) + 7),
- ROUND_L_D = ((1 << 3) + 0),
- TRUNC_L_D = ((1 << 3) + 1),
- CEIL_L_D = ((1 << 3) + 2),
- FLOOR_L_D = ((1 << 3) + 3),
- ROUND_W_D = ((1 << 3) + 4),
- TRUNC_W_D = ((1 << 3) + 5),
- CEIL_W_D = ((1 << 3) + 6),
- FLOOR_W_D = ((1 << 3) + 7),
- RECIP_D = ((2 << 3) + 5),
- RSQRT_D = ((2 << 3) + 6),
- CLASS_D = ((3 << 3) + 3),
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
- CVT_S_D = ((4 << 3) + 0),
- CVT_W_D = ((4 << 3) + 4),
- CVT_L_D = ((4 << 3) + 5),
- C_F_D = ((6 << 3) + 0),
- C_UN_D = ((6 << 3) + 1),
- C_EQ_D = ((6 << 3) + 2),
- C_UEQ_D = ((6 << 3) + 3),
- C_OLT_D = ((6 << 3) + 4),
- C_ULT_D = ((6 << 3) + 5),
- C_OLE_D = ((6 << 3) + 6),
- C_ULE_D = ((6 << 3) + 7),
+ ADD_D = ((0U << 3) + 0),
+ SUB_D = ((0U << 3) + 1),
+ MUL_D = ((0U << 3) + 2),
+ DIV_D = ((0U << 3) + 3),
+ SQRT_D = ((0U << 3) + 4),
+ ABS_D = ((0U << 3) + 5),
+ MOV_D = ((0U << 3) + 6),
+ NEG_D = ((0U << 3) + 7),
+ ROUND_L_D = ((1U << 3) + 0),
+ TRUNC_L_D = ((1U << 3) + 1),
+ CEIL_L_D = ((1U << 3) + 2),
+ FLOOR_L_D = ((1U << 3) + 3),
+ ROUND_W_D = ((1U << 3) + 4),
+ TRUNC_W_D = ((1U << 3) + 5),
+ CEIL_W_D = ((1U << 3) + 6),
+ FLOOR_W_D = ((1U << 3) + 7),
+ RECIP_D = ((2U << 3) + 5),
+ RSQRT_D = ((2U << 3) + 6),
+ CLASS_D = ((3U << 3) + 3),
+ MIN = ((3U << 3) + 4),
+ MINA = ((3U << 3) + 5),
+ MAX = ((3U << 3) + 6),
+ MAXA = ((3U << 3) + 7),
+ CVT_S_D = ((4U << 3) + 0),
+ CVT_W_D = ((4U << 3) + 4),
+ CVT_L_D = ((4U << 3) + 5),
+ C_F_D = ((6U << 3) + 0),
+ C_UN_D = ((6U << 3) + 1),
+ C_EQ_D = ((6U << 3) + 2),
+ C_UEQ_D = ((6U << 3) + 3),
+ C_OLT_D = ((6U << 3) + 4),
+ C_ULT_D = ((6U << 3) + 5),
+ C_OLE_D = ((6U << 3) + 6),
+ C_ULE_D = ((6U << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
- CVT_S_W = ((4 << 3) + 0),
- CVT_D_W = ((4 << 3) + 1),
- CVT_S_L = ((4 << 3) + 0),
- CVT_D_L = ((4 << 3) + 1),
- BC1EQZ = ((2 << 2) + 1) << 21,
- BC1NEZ = ((3 << 2) + 1) << 21,
+ CVT_S_W = ((4U << 3) + 0),
+ CVT_D_W = ((4U << 3) + 1),
+ CVT_S_L = ((4U << 3) + 0),
+ CVT_D_L = ((4U << 3) + 1),
+ BC1EQZ = ((2U << 2) + 1) << 21,
+ BC1NEZ = ((3U << 2) + 1) << 21,
// COP1 CMP positive predicates Bit 5..4 = 00.
- CMP_AF = ((0 << 3) + 0),
- CMP_UN = ((0 << 3) + 1),
- CMP_EQ = ((0 << 3) + 2),
- CMP_UEQ = ((0 << 3) + 3),
- CMP_LT = ((0 << 3) + 4),
- CMP_ULT = ((0 << 3) + 5),
- CMP_LE = ((0 << 3) + 6),
- CMP_ULE = ((0 << 3) + 7),
- CMP_SAF = ((1 << 3) + 0),
- CMP_SUN = ((1 << 3) + 1),
- CMP_SEQ = ((1 << 3) + 2),
- CMP_SUEQ = ((1 << 3) + 3),
- CMP_SSLT = ((1 << 3) + 4),
- CMP_SSULT = ((1 << 3) + 5),
- CMP_SLE = ((1 << 3) + 6),
- CMP_SULE = ((1 << 3) + 7),
+ CMP_AF = ((0U << 3) + 0),
+ CMP_UN = ((0U << 3) + 1),
+ CMP_EQ = ((0U << 3) + 2),
+ CMP_UEQ = ((0U << 3) + 3),
+ CMP_LT = ((0U << 3) + 4),
+ CMP_ULT = ((0U << 3) + 5),
+ CMP_LE = ((0U << 3) + 6),
+ CMP_ULE = ((0U << 3) + 7),
+ CMP_SAF = ((1U << 3) + 0),
+ CMP_SUN = ((1U << 3) + 1),
+ CMP_SEQ = ((1U << 3) + 2),
+ CMP_SUEQ = ((1U << 3) + 3),
+ CMP_SSLT = ((1U << 3) + 4),
+ CMP_SSULT = ((1U << 3) + 5),
+ CMP_SLE = ((1U << 3) + 6),
+ CMP_SULE = ((1U << 3) + 7),
// COP1 CMP negative predicates Bit 5..4 = 01.
- CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
- CMP_OR = ((2 << 3) + 1),
- CMP_UNE = ((2 << 3) + 2),
- CMP_NE = ((2 << 3) + 3),
- CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
- CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
- CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
- CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
- CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
- CMP_SOR = ((3 << 3) + 1),
- CMP_SUNE = ((3 << 3) + 2),
- CMP_SNE = ((3 << 3) + 3),
- CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
- CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
- CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
- CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
-
- SEL = ((2 << 3) + 0),
- MOVZ_C = ((2 << 3) + 2),
- MOVN_C = ((2 << 3) + 3),
- SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
- MOVF = ((2 << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
- SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
+ CMP_AT = ((2U << 3) + 0), // Reserved, not implemented.
+ CMP_OR = ((2U << 3) + 1),
+ CMP_UNE = ((2U << 3) + 2),
+ CMP_NE = ((2U << 3) + 3),
+ CMP_UGE = ((2U << 3) + 4), // Reserved, not implemented.
+ CMP_OGE = ((2U << 3) + 5), // Reserved, not implemented.
+ CMP_UGT = ((2U << 3) + 6), // Reserved, not implemented.
+ CMP_OGT = ((2U << 3) + 7), // Reserved, not implemented.
+ CMP_SAT = ((3U << 3) + 0), // Reserved, not implemented.
+ CMP_SOR = ((3U << 3) + 1),
+ CMP_SUNE = ((3U << 3) + 2),
+ CMP_SNE = ((3U << 3) + 3),
+ CMP_SUGE = ((3U << 3) + 4), // Reserved, not implemented.
+ CMP_SOGE = ((3U << 3) + 5), // Reserved, not implemented.
+ CMP_SUGT = ((3U << 3) + 6), // Reserved, not implemented.
+ CMP_SOGT = ((3U << 3) + 7), // Reserved, not implemented.
+
+ SEL = ((2U << 3) + 0),
+ MOVZ_C = ((2U << 3) + 2),
+ MOVN_C = ((2U << 3) + 3),
+ SELEQZ_C = ((2U << 3) + 4), // COP1 on FPR registers.
+ MOVF = ((2U << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
+ SELNEZ_C = ((2U << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
- MADD_D = ((4 << 3) + 1),
+ MADD_D = ((4U << 3) + 1),
// PCREL Encoding of rt Field.
- ADDIUPC = ((0 << 2) + 0),
- LWPC = ((0 << 2) + 1),
- AUIPC = ((3 << 3) + 6),
- ALUIPC = ((3 << 3) + 7),
+ ADDIUPC = ((0U << 2) + 0),
+ LWPC = ((0U << 2) + 1),
+ AUIPC = ((3U << 3) + 6),
+ ALUIPC = ((3U << 3) + 7),
// POP66 Encoding of rs Field.
- JIC = ((0 << 5) + 0),
+ JIC = ((0U << 5) + 0),
// POP76 Encoding of rs Field.
- JIALC = ((0 << 5) + 0),
+ JIALC = ((0U << 5) + 0),
- NULLSF = 0
+ NULLSF = 0U
};
@@ -820,6 +828,10 @@ const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
const Instr nopInstr = 0;
+static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
+ return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
+}
+
class Instruction {
public:
@@ -848,7 +860,7 @@ class Instruction {
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
- return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
}
// Instruction type.
@@ -862,10 +874,7 @@ class Instruction {
enum TypeChecks { NORMAL, EXTRA };
-#define OpcodeToBitNumber(opcode) \
- (1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift))
-
- static const uint64_t kOpcodeImmediateTypeMask =
+ static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
OpcodeToBitNumber(BGTZ) | OpcodeToBitNumber(ADDI) |
@@ -1032,6 +1041,11 @@ class Instruction {
}
}
+ inline int32_t ImmValue(int bits) const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(bits - 1, 0);
+ }
+
inline int32_t Imm16Value() const {
DCHECK(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
@@ -1058,8 +1072,18 @@ class Instruction {
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
}
- // Say if the instruction should not be used in a branch delay slot.
- bool IsForbiddenInBranchDelay() const;
+ static bool IsForbiddenAfterBranchInstr(Instr instr);
+
+ // Say if the instruction should not be used in a branch delay slot or
+ // immediately after a compact branch.
+ inline bool IsForbiddenAfterBranch() const {
+ return IsForbiddenAfterBranchInstr(InstructionBits());
+ }
+
+ inline bool IsForbiddenInBranchDelay() const {
+ return IsForbiddenAfterBranch();
+ }
+
// Say if the instruction 'links'. e.g. jal, bal.
bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
@@ -1178,6 +1202,7 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
#undef OpcodeToBitNumber
#undef FunctionFieldToBitNumber
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // #ifndef V8_MIPS_CONSTANTS_H_
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 974692495a..8ea1b0bb3e 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -5,6 +5,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -88,7 +89,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -139,14 +140,16 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
- const int kDoubleRegsSize =
- kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kMaxNumRegisters;
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
@@ -215,9 +218,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
@@ -284,9 +288,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index f24ec436f0..5502f4170c 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1377,12 +1377,12 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "blez 'rs, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() != instr->RsValue()) &&
(instr->RsValue() != 0) && (instr->RtValue() != 0)) {
- Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() == instr->RsValue()) &&
(instr->RtValue() != 0)) {
- Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
} else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
- Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
@@ -1419,7 +1419,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bltzc 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() != instr->RsValue()) &&
(instr->RsValue() != 0) && (instr->RtValue() != 0)) {
- Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
Format(instr, "bgtzc 'rt, 'imm16u -> 'imm16p4s2");
} else {
@@ -1435,9 +1435,9 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
case POP76:
if (instr->RsValue() == JIALC) {
- Format(instr, "jialc 'rt, 'imm16x");
+ Format(instr, "jialc 'rt, 'imm16s");
} else {
- Format(instr, "bnezc 'rs, 'imm21x -> 'imm21p4s2");
+ Format(instr, "bnezc 'rs, 'imm21s -> 'imm21p4s2");
}
break;
// ------------- Arithmetic instructions.
@@ -1445,25 +1445,33 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (!IsMipsArchVariant(kMips32r6)) {
Format(instr, "addi 'rt, 'rs, 'imm16s");
} else {
- // Check if BOVC or BEQC instruction.
- if (instr->RsValue() >= instr->RtValue()) {
+ int rs_reg = instr->RsValue();
+ int rt_reg = instr->RtValue();
+ // Check if BOVC, BEQZALC or BEQC instruction.
+ if (rs_reg >= rt_reg) {
Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
- } else if (instr->RsValue() < instr->RtValue()) {
- Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- UNREACHABLE();
+ if (rs_reg == 0) {
+ Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2");
+ } else {
+ Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ }
}
}
break;
case DADDI:
if (IsMipsArchVariant(kMips32r6)) {
- // Check if BNVC or BNEC instruction.
- if (instr->RsValue() >= instr->RtValue()) {
+ int rs_reg = instr->RsValue();
+ int rt_reg = instr->RtValue();
+ // Check if BNVC, BNEZALC or BNEC instruction.
+ if (rs_reg >= rt_reg) {
Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
- } else if (instr->RsValue() < instr->RtValue()) {
- Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- UNREACHABLE();
+ if (rs_reg == 0) {
+ Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2");
+ } else {
+ Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ }
}
}
break;
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 0452ece222..849dea2841 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -169,6 +169,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index b85b1cbf4d..2fe3554b68 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -78,14 +78,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2};
@@ -108,6 +100,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return a0; }
@@ -229,6 +225,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -392,16 +395,39 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- a0, // argument count (including receiver)
+ a0, // argument count (not including receiver)
a2, // address of first argument
a1 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (not including receiver)
+ a3, // original constructor
+ a1, // constructor to call
+ a2 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (argc)
+ a2, // address of first argument (argv)
+ a1 // the runtime function to call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index e4cf09798b..4a5a386fa0 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -13,6 +13,7 @@
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/mips/macro-assembler-mips.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -24,8 +25,8 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
has_frame_(false),
has_double_zero_reg_set_(false) {
if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -146,7 +147,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
- int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
+ int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -435,7 +436,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
lw(scratch, FieldMemOperand(scratch, offset));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ lw(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1440,7 +1441,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(UN, D, cmp1, cmp2);
bc1f(&skip);
nop();
- Jr(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
c(UN, D, cmp1, cmp2);
@@ -1458,7 +1459,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
- Jr(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
@@ -1477,7 +1478,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
- Jr(target, bd);
+ BranchLong(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
@@ -1937,28 +1938,30 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
+ DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchShort(L, bdslot);
} else {
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
} else {
BranchShort(L, bdslot);
}
@@ -1970,17 +1973,15 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
}
}
} else {
@@ -1989,10 +1990,10 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
BranchShort(L, cond, rs, rt, bdslot);
@@ -2011,7 +2012,10 @@ void MacroAssembler::Branch(Label* L,
}
-void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
b(offset);
// Emit a nop in the branch delay slot if required.
@@ -2020,549 +2024,543 @@ void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
}
-void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- DCHECK(!rs.is(zero_reg));
- Register r2 = no_reg;
- Register scratch = at;
+void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ bc(offset);
+}
- if (rt.is_reg()) {
- // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
- // rt.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- r2 = rt.rm_;
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
- // Signed comparison.
- case greater:
- if (r2.is(zero_reg)) {
- bgtz(rs, offset);
- } else {
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (r2.is(zero_reg)) {
- bgez(rs, offset);
- } else {
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (r2.is(zero_reg)) {
- bltz(rs, offset);
- } else {
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (r2.is(zero_reg)) {
- blez(rs, offset);
- } else {
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (r2.is(zero_reg)) {
- bne(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (r2.is(zero_reg)) {
- b(offset);
- } else {
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
- } else {
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (r2.is(zero_reg)) {
- beq(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+
+void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchShortHelperR6(offset, nullptr);
} else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- if (rt.imm32_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- beq(rs, r2, offset);
- }
- break;
- case ne:
- if (rt.imm32_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- bne(rs, r2, offset);
- }
- break;
- // Signed comparison.
- case greater:
- if (rt.imm32_ == 0) {
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (rt.imm32_ == 0) {
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (rt.imm32_ == 0) {
- bltz(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (rt.imm32_ == 0) {
- blez(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (rt.imm32_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (rt.imm32_ == 0) {
- b(offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (rt.imm32_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (rt.imm32_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+ DCHECK(is_int16(offset));
+ BranchShortHelper(offset, nullptr, bdslot);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ BranchShortHelperR6(0, L);
+ } else {
+ BranchShortHelper(0, L, bdslot);
+ }
+}
- b(shifted_branch_offset(L, false));
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm().is(zero_reg);
+ } else {
+ return rt.immediate() == 0;
+ }
}
-void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
+int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+ if (L) {
+ offset = branch_offset_helper(L, bits) >> 2;
+ } else {
+ DCHECK(is_intn(offset, bits));
+ }
+ return offset;
+}
- int32_t offset = 0;
+
+Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
+ Register scratch) {
Register r2 = no_reg;
- Register scratch = at;
if (rt.is_reg()) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ return r2;
+}
+
+
+bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
break;
case eq:
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 beq is used here to make the code patchable. Otherwise bc
+ // should be used which has no condition field so is not patchable.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beq(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqc(rs, scratch, offset);
+ }
break;
case ne:
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 bne is used here to make the code patchable. Otherwise we
+ // should not generate any instruction.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bne(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnec(rs, scratch, offset);
+ }
break;
+
// Signed comparison.
case greater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(scratch, rs, offset);
}
break;
case greater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(rs, scratch, offset);
}
break;
case less:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(rs, scratch, offset);
}
break;
case less_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(scratch, rs, offset);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(scratch, rs, offset);
}
break;
case Ugreater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(rs, scratch, offset);
}
break;
case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ break; // No code needs to be emitted.
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(rs, scratch, offset);
}
break;
case Uless_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(scratch, rs, offset);
}
break;
default:
UNREACHABLE();
}
- } else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ }
+ CheckTrampolinePoolQuick(1);
+ return true;
+}
+
+
+bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
+
+ Register scratch = at;
+ int32_t offset32;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
break;
case eq:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, scratch, offset32);
}
break;
case ne:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, scratch, offset32);
}
break;
+
// Signed comparison.
case greater:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgtz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case greater_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
case less:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case less_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ blez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Ugreater_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
- case Uless:
- if (rt.imm32_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ case Uless:
+ if (IsZero(rt)) {
+ return true; // No code needs to be emitted.
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Uless_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
default:
UNREACHABLE();
}
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
+}
+
+
+bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ return BranchShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
+}
+
+
+void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
}
-void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(0, L, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchAndLinkShort(L, bdslot);
} else {
- Jalr(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- Jalr(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
} else {
BranchAndLinkShort(L, bdslot);
}
@@ -2574,13 +2572,11 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
}
} else {
@@ -2588,20 +2584,19 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
} else {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
}
}
}
-// We need to use a bgezal or bltzal, but they can't be used directly with the
-// slt instructions. We could use sub or add instead but we would miss overflow
-// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLinkShort(int16_t offset,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
bal(offset);
// Emit a nop in the branch delay slot if required.
@@ -2610,371 +2605,306 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset,
}
-void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Register r2 = no_reg;
- Register scratch = at;
-
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
-
- if (!IsMipsArchVariant(kMips32r6)) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
-
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
+void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ balc(offset);
+}
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- default:
- UNREACHABLE();
- }
+void MacroAssembler::BranchAndLinkShort(int32_t offset,
+ BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchAndLinkShortHelperR6(offset, nullptr);
} else {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
-
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
+ DCHECK(is_int16(offset));
+ BranchAndLinkShortHelper(offset, nullptr, bdslot);
+ }
+}
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- default:
- UNREACHABLE();
- }
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ BranchAndLinkShortHelper(0, L, bdslot);
}
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
-void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
- bal(shifted_branch_offset(L, false));
+bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
+ switch (cond) {
+ case cc_always:
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ break;
+ case eq:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case ne:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case greater_equal:
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
+ case less:
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case less_equal:
+ // rs <= r2
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
-void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- int32_t offset = 0;
- Register r2 = no_reg;
- Register scratch = at;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Ugreater_equal:
+ // rs >= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case Uless:
+ // rs < r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ default:
+ UNREACHABLE();
}
+ return true;
+}
- if (!IsMipsArchVariant(kMips32r6)) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
-
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
+// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
+// with the slt instructions. We could use sub or add instead but we would miss
+// overflow cases, so we keep slt and add an intermediate third instruction.
+bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
- default:
- UNREACHABLE();
- }
- } else {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ Register scratch = t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ switch (cond) {
+ case cc_always:
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ // Signed comparison.
+ case greater:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ // Unsigned comparison.
+ case Ugreater:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
- default:
- UNREACHABLE();
- }
+ default:
+ UNREACHABLE();
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
-
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
+}
+
+
+bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
}
@@ -3064,6 +2994,10 @@ void MacroAssembler::Call(Register target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+#ifdef DEBUG
+ int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
+#endif
+
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
@@ -3078,8 +3012,10 @@ void MacroAssembler::Call(Register target,
if (bd == PROTECT)
nop();
- DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
+#ifdef DEBUG
+ CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+#endif
}
@@ -3157,43 +3093,51 @@ void MacroAssembler::Ret(Condition cond,
}
-void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
+void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchShortHelperR6(0, L);
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
- uint32_t imm32;
- imm32 = jump_address(L);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- jr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
-void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
+void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jalr(at);
- uint32_t imm32;
- imm32 = jump_address(L);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- jalr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
@@ -3681,7 +3625,7 @@ void MacroAssembler::CopyFields(Register dst,
// Find a temp register in temps list.
for (int i = 0; i < kNumRegisters; i++) {
if ((temps & (1 << i)) != 0) {
- tmp.code_ = i;
+ tmp.reg_code = i;
break;
}
}
@@ -4580,7 +4524,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the builtins object into target register.
lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
+ lw(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
lw(target, ContextOperand(target, native_context_index));
}
@@ -4730,7 +4674,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
lw(dst, GlobalObjectOperand());
- lw(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+ lw(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
@@ -4743,7 +4687,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Load the global or builtins object from the current context.
lw(scratch,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ lw(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
lw(scratch,
@@ -4766,8 +4710,7 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
lw(function,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- lw(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
+ lw(function, FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
lw(function, MemOperand(function, Context::SlotOffset(index)));
}
@@ -5778,8 +5721,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
if (regs & candidate.bit()) continue;
return candidate;
}
@@ -5884,25 +5830,10 @@ void CodePatcher::Emit(Address addr) {
}
-void CodePatcher::ChangeBranchCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- DCHECK(Assembler::IsBranch(instr));
- uint32_t opcode = Assembler::GetOpcodeField(instr);
- // Currently only the 'eq' and 'ne' cond values are supported and the simple
- // branch instructions (with opcode being the branch type).
- // There are some special cases (see Assembler::IsBranch()) so extending this
- // would be tricky.
- DCHECK(opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL);
- opcode = (cond == eq) ? BEQ : BNE;
- instr = (instr & ~kOpcodeMask) | opcode;
- masm_.emit(instr);
+void CodePatcher::ChangeBranchCondition(Instr current_instr,
+ uint32_t new_opcode) {
+ current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
+ masm_.emit(current_instr);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 1608c951b6..8890be8131 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -13,17 +13,18 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_v0_Code};
-const Register kReturnRegister1 = {kRegister_v1_Code};
-const Register kJSFunctionRegister = {kRegister_a1_Code};
+const Register kReturnRegister0 = {Register::kCode_v0};
+const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
-const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_t3_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_t4_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_t5_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_t6_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_t3};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_t6};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
// Forward declaration.
class JumpTarget;
@@ -164,9 +165,9 @@ class MacroAssembler: public Assembler {
Name(target, COND_ARGS, bd); \
}
-#define DECLARE_BRANCH_PROTOTYPES(Name) \
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
- DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+ DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
@@ -203,6 +204,8 @@ class MacroAssembler: public Assembler {
Ret(cond, rs, rt, bd);
}
+ bool IsNear(Label* L, Condition cond, int rs_reg);
+
void Branch(Label* L,
Condition cond,
Register rs,
@@ -1625,21 +1628,39 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments);
- void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+ void BranchShortHelperR6(int32_t offset, Label* L);
+ void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
+ bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+
+ void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
+ void BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot);
+ void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void Jr(Label* L, BranchDelaySlot bdslot);
- void Jalr(Label* L, BranchDelaySlot bdslot);
+ bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ void BranchLong(Label* L, BranchDelaySlot bdslot);
+ void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
// Common implementation of BranchF functions for the different formats.
void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
@@ -1724,7 +1745,7 @@ class CodePatcher {
// Change the condition part of an instruction leaving the rest of the current
// instruction unchanged.
- void ChangeBranchCondition(Condition cond);
+ void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode);
private:
byte* address_; // The address of the code being patched.
@@ -1744,6 +1765,7 @@ class CodePatcher {
#define ACCESS_MASM(masm) masm->
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 4ef61abe3d..e9dd0d32dc 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -129,7 +129,7 @@ void MipsDebugger::Stop(Instruction* instr) {
#else // GENERATED_CODE_COVERAGE
-#define UNSUPPORTED() printf("Unsupported instruction.\n");
+#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n");
static void InitializeCoverage() {}
@@ -2310,7 +2310,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
} else if (fabs(fs) < fabs(ft)) {
result = fs;
} else {
- result = (fs > ft ? fs : ft);
+ result = (fs < ft ? fs : ft);
}
set_fpu_register_double(fd_reg(), result);
}
@@ -3070,7 +3070,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
} else if (fabs(fs) < fabs(ft)) {
result = fs;
} else {
- result = (fs > ft ? fs : ft);
+ result = (fs < ft ? fs : ft);
}
set_fpu_register_float(fd_reg(), result);
}
@@ -3736,26 +3736,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
}
-// Branch instructions common part.
-#define BranchAndLinkHelper(do_branch) \
- execute_branch_delay_instruction = true; \
- if (do_branch) { \
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
- set_register(31, current_pc + 2 * Instruction::kInstrSize); \
- } else { \
- next_pc = current_pc + 2 * Instruction::kInstrSize; \
- }
-
-#define BranchHelper(do_branch) \
- execute_branch_delay_instruction = true; \
- if (do_branch) { \
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
- } else { \
- next_pc = current_pc + 2 * Instruction::kInstrSize; \
- }
-
-
-// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
@@ -3765,20 +3746,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int32_t rt_reg = instr->RtValue(); // Destination register.
int32_t rt = get_register(rt_reg);
int16_t imm16 = instr->Imm16Value();
- int32_t imm21 = instr->Imm21Value();
- int32_t imm26 = instr->Imm26Value();
int32_t ft_reg = instr->FtValue(); // Destination register.
- int64_t ft;
// Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
// Sign extended immediate.
int32_t se_imm16 = imm16;
- int32_t se_imm26 = imm26 | ((imm26 & 0x2000000) ? 0xfc000000 : 0);
- // Get current pc.
- int32_t current_pc = get_pc();
// Next pc.
int32_t next_pc = bad_ra;
@@ -3791,7 +3766,58 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Used for memory instructions.
int32_t addr = 0x0;
- // ---------- Configuration (and execution for REGIMM).
+ // Branch instructions common part.
+ auto BranchAndLinkHelper = [this, instr, &next_pc,
+ &execute_branch_delay_instruction](
+ bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int32_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = instr->Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
+ auto BranchHelper = [this, instr, &next_pc,
+ &execute_branch_delay_instruction](bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int32_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = instr->Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
+ auto BranchAndLinkCompactHelper = [this, instr, &next_pc](bool do_branch,
+ int bits) {
+ int32_t current_pc = get_pc();
+ CheckForbiddenSlot(current_pc);
+ if (do_branch) {
+ int32_t imm = instr->ImmValue(bits);
+ imm <<= 32 - bits;
+ imm >>= 32 - bits;
+ next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + Instruction::kInstrSize);
+ }
+ };
+
+ auto BranchCompactHelper = [&next_pc, this, instr](bool do_branch, int bits) {
+ int32_t current_pc = get_pc();
+ CheckForbiddenSlot(current_pc);
+ if (do_branch) {
+ int32_t imm = instr->ImmValue(bits);
+ imm <<= 32 - bits;
+ imm >>= 32 - bits;
+ next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
+ }
+ };
+
+
switch (op) {
// ------------- COP1. Coprocessor instructions.
case COP1:
@@ -3802,34 +3828,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
uint32_t cc_value = test_fcsr_bit(fcsr_cc);
bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(do_branch);
break;
}
case BC1EQZ:
- ft = get_fpu_register(ft_reg);
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (!(ft & 0x1)) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(!(get_fpu_register(ft_reg) & 0x1));
break;
case BC1NEZ:
- ft = get_fpu_register(ft_reg);
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (ft & 0x1) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(get_fpu_register(ft_reg) & 0x1);
break;
default:
UNREACHABLE();
@@ -3863,54 +3869,158 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BNE:
BranchHelper(rs != rt);
break;
- case BLEZ:
- BranchHelper(rs <= 0);
- break;
- case BGTZ:
- BranchHelper(rs > 0);
- break;
- case POP66: {
- if (rs_reg) { // BEQZC
- int32_t se_imm21 =
- static_cast<int32_t>(imm21 << (kOpcodeBits + kRsBits));
- se_imm21 = se_imm21 >> (kOpcodeBits + kRsBits);
- if (rs == 0)
- next_pc = current_pc + 4 + (se_imm21 << 2);
- else
- next_pc = current_pc + 4;
+ case POP06: // BLEZALC, BGEZALC, BGEUC, BLEZ (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BLEZALC
+ BranchAndLinkCompactHelper(rt <= 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BGEZALC
+ BranchAndLinkCompactHelper(rt >= 0, 16);
+ } else { // BGEUC
+ BranchCompactHelper(
+ static_cast<uint32_t>(rs) >= static_cast<uint32_t>(rt), 16);
+ }
+ }
+ } else { // BLEZ
+ BranchHelper(rs <= 0);
+ }
+ } else { // BLEZ
+ BranchHelper(rs <= 0);
+ }
+ break;
+ case POP07: // BGTZALC, BLTZALC, BLTUC, BGTZ (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BGTZALC
+ BranchAndLinkCompactHelper(rt > 0, 16);
+ } else {
+ if (rt_reg == rs_reg) { // BLTZALC
+ BranchAndLinkCompactHelper(rt < 0, 16);
+ } else { // BLTUC
+ BranchCompactHelper(
+ static_cast<uint32_t>(rs) < static_cast<uint32_t>(rt), 16);
+ }
+ }
+ } else { // BGTZ
+ BranchHelper(rs > 0);
+ }
+ } else { // BGTZ
+ BranchHelper(rs > 0);
+ }
+ break;
+ case POP26: // BLEZC, BGEZC, BGEC/BLEC / BLEZL (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BLEZC
+ BranchCompactHelper(rt <= 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BGEZC
+ BranchCompactHelper(rt >= 0, 16);
+ } else { // BGEC/BLEC
+ BranchCompactHelper(rs >= rt, 16);
+ }
+ }
+ }
+ } else { // BLEZL
+ BranchAndLinkHelper(rs <= 0);
+ }
+ break;
+ case POP27: // BGTZC, BLTZC, BLTC/BGTC / BGTZL (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BGTZC
+ BranchCompactHelper(rt > 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BLTZC
+ BranchCompactHelper(rt < 0, 16);
+ } else { // BLTC/BGTC
+ BranchCompactHelper(rs < rt, 16);
+ }
+ }
+ }
+ } else { // BGTZL
+ BranchAndLinkHelper(rs > 0);
+ }
+ break;
+ case POP66: // BEQZC, JIC
+ if (rs_reg != 0) { // BEQZC
+ BranchCompactHelper(rs == 0, 21);
} else { // JIC
+ CheckForbiddenSlot(get_pc());
next_pc = rt + imm16;
}
break;
- }
- case BC: {
- next_pc = current_pc + 4 + (se_imm26 << 2);
- set_pc(next_pc);
- pc_modified_ = true;
+ case POP76: // BNEZC, JIALC
+ if (rs_reg != 0) { // BNEZC
+ BranchCompactHelper(rs != 0, 21);
+ } else { // JIALC
+ int32_t current_pc = get_pc();
+ CheckForbiddenSlot(current_pc);
+ set_register(31, current_pc + Instruction::kInstrSize);
+ next_pc = rt + imm16;
+ }
break;
- }
- case BALC: {
- set_register(31, current_pc + 4);
- next_pc = current_pc + 4 + (se_imm26 << 2);
- set_pc(next_pc);
- pc_modified_ = true;
+ case BC:
+ BranchCompactHelper(true, 26);
break;
- }
- // ------------- Arithmetic instructions.
- case ADDI:
- if (HaveSameSign(rs, se_imm16)) {
- if (rs > 0) {
- if (rs <= (Registers::kMaxValue - se_imm16)) {
- SignalException(kIntegerOverflow);
+ case BALC:
+ BranchAndLinkCompactHelper(true, 26);
+ break;
+ case POP10: // BOVC, BEQZALC, BEQC / ADDI (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rs_reg >= rt_reg) { // BOVC
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ BranchCompactHelper(rs > Registers::kMaxValue - rt, 16);
+ } else if (rs < 0) {
+ BranchCompactHelper(rs < Registers::kMinValue - rt, 16);
+ }
}
- } else if (rs < 0) {
- if (rs >= (Registers::kMinValue - se_imm16)) {
- SignalException(kIntegerUnderflow);
+ } else {
+ if (rs_reg == 0) { // BEQZALC
+ BranchAndLinkCompactHelper(rt == 0, 16);
+ } else { // BEQC
+ BranchCompactHelper(rt == rs, 16);
+ }
+ }
+ } else { // ADDI
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ if (rs <= Registers::kMaxValue - se_imm16) {
+ SignalException(kIntegerOverflow);
+ }
+ } else if (rs < 0) {
+ if (rs >= Registers::kMinValue - se_imm16) {
+ SignalException(kIntegerUnderflow);
+ }
+ }
+ }
+ SetResult(rt_reg, rs + se_imm16);
+ }
+ break;
+ case POP30: // BNVC, BNEZALC, BNEC / DADDI (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rs_reg >= rt_reg) { // BNVC
+ if (!HaveSameSign(rs, rt) || rs == 0 || rt == 0) {
+ BranchCompactHelper(true, 16);
+ } else {
+ if (rs > 0) {
+ BranchCompactHelper(rs <= Registers::kMaxValue - rt, 16);
+ } else if (rs < 0) {
+ BranchCompactHelper(rs >= Registers::kMinValue - rt, 16);
+ }
+ }
+ } else {
+ if (rs_reg == 0) { // BNEZALC
+ BranchAndLinkCompactHelper(rt != 0, 16);
+ } else { // BNEC
+ BranchCompactHelper(rt != rs, 16);
}
}
}
- SetResult(rt_reg, rs + se_imm16);
break;
+ // ------------- Arithmetic instructions.
case ADDIU:
SetResult(rt_reg, rs + se_imm16);
break;
@@ -4014,22 +4124,11 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case SDC1:
WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
break;
- // ------------- JIALC and BNEZC instructions.
- case POP76: {
- // Next pc.
- next_pc = rt + se_imm16;
- // The instruction after the jump is NOT executed.
- int16_t pc_increment = Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + pc_increment);
- }
- set_pc(next_pc);
- pc_modified_ = true;
- break;
- }
// ------------- PC-Relative instructions.
case PCREL: {
// rt field: checking 5-bits.
+ int32_t imm21 = instr->Imm21Value();
+ int32_t current_pc = get_pc();
uint8_t rt = (imm21 >> kImm16Bits);
switch (rt) {
case ALUIPC:
@@ -4076,7 +4175,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(get_pc() + Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@@ -4086,9 +4185,6 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
}
-#undef BranchHelper
-#undef BranchAndLinkHelper
-
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
@@ -4174,7 +4270,7 @@ void Simulator::Execute() {
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
- if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+ if (icount_ == static_cast<uint64_t>(::v8::internal::FLAG_stop_sim_at)) {
MipsDebugger dbg(this);
dbg.Debug();
} else {
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index bd30172d5b..624d4acf80 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -55,7 +55,8 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() { }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
@@ -314,6 +315,7 @@ class Simulator {
void DecodeTypeRegisterLRsType();
Instruction* currentInstr_;
+
inline Instruction* get_instr() const { return currentInstr_; }
inline void set_instr(Instruction* instr) { currentInstr_ = instr; }
@@ -345,6 +347,18 @@ class Simulator {
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
+ // Compact branch guard.
+ void CheckForbiddenSlot(int32_t current_pc) {
+ Instruction* instr_aftter_compact_branch =
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ if (instr_aftter_compact_branch->IsForbiddenInBranchDelay()) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Error: Unexpected instruction 0x%08x immediately after a "
+ "compact branch instruction.",
+ *reinterpret_cast<uint32_t*>(instr_aftter_compact_branch));
+ }
+ }
+
// Stop helper functions.
bool IsWatchpoint(uint32_t code);
void PrintWatchpoint(uint32_t code);
@@ -480,7 +494,8 @@ class SimulatorStack : public v8::internal::AllStatic {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 16ca33a9f3..e35bf2facb 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -84,36 +84,6 @@ bool Operand::is_reg() const {
}
-int Register::NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
-}
-
-
-int DoubleRegister::NumRegisters() {
- return FPURegister::kMaxNumRegisters;
-}
-
-
-int DoubleRegister::NumAllocatableRegisters() {
- return FPURegister::kMaxNumAllocatableRegisters;
-}
-
-
-int DoubleRegister::NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
-}
-
-
-int FPURegister::ToAllocationIndex(FPURegister reg) {
- DCHECK(reg.code() % 2 == 0);
- DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters);
- DCHECK(reg.is_valid());
- DCHECK(!reg.is(kDoubleRegZero));
- DCHECK(!reg.is(kLithiumScratchDouble));
- return (reg.code() / 2);
-}
-
-
// -----------------------------------------------------------------------------
// RelocInfo.
@@ -488,6 +458,7 @@ void Assembler::emit(uint64_t x) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index cb5e164ff9..e0f12ed020 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -64,28 +64,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
-const char* DoubleRegister::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
-}
-
-
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
@@ -229,31 +207,31 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
static const int kNegOffset = 0x00008000;
// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
-const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift)
- | (kPointerSize & kImm16Mask); // NOLINT
+const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
+ (Register::kCode_sp << kRtShift) |
+ (kPointerSize & kImm16Mask); // NOLINT
// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift)
- | (-kPointerSize & kImm16Mask); // NOLINT
+const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
+ (Register::kCode_sp << kRtShift) |
+ (-kPointerSize & kImm16Mask); // NOLINT
// sd(r, MemOperand(sp, 0))
-const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kPushRegPattern =
+ SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
// ld(r, MemOperand(sp, 0))
-const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kPopRegPattern =
+ LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kLwRegFpOffsetPattern =
+ LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kSwRegFpOffsetPattern =
+ SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
+ (kNegOffset & kImm16Mask); // NOLINT
-const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
+ (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xffe00000;
@@ -314,21 +292,21 @@ void Assembler::CodeTargetAlign() {
Register Assembler::GetRtReg(Instr instr) {
Register rt;
- rt.code_ = (instr & kRtFieldMask) >> kRtShift;
+ rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
return rt;
}
Register Assembler::GetRsReg(Instr instr) {
Register rs;
- rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+ rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
return rs;
}
Register Assembler::GetRdReg(Instr instr) {
Register rd;
- rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+ rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
return rd;
}
@@ -1366,9 +1344,11 @@ void Assembler::bgezalc(Register rt, int16_t offset) {
void Assembler::bgezall(Register rs, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK(kArchVariant != kMips64r6);
DCHECK(!(rs.is(zero_reg)));
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1431,15 +1411,19 @@ void Assembler::bnezc(Register rs, int32_t offset) {
void Assembler::j(int64_t target) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::j(Label* target) {
uint64_t imm = jump_offset(target);
if (target->is_bound()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrJump(static_cast<Opcode>(kJRawMark),
static_cast<uint32_t>(imm >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
} else {
j(imm);
}
@@ -1449,8 +1433,11 @@ void Assembler::j(Label* target) {
void Assembler::jal(Label* target) {
uint64_t imm = jump_offset(target);
if (target->is_bound()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrJump(static_cast<Opcode>(kJalRawMark),
static_cast<uint32_t>(imm >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
} else {
jal(imm);
}
@@ -1472,8 +1459,10 @@ void Assembler::jr(Register rs) {
void Assembler::jal(int64_t target) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -2211,14 +2200,14 @@ void Assembler::movn(Register rd, Register rs, Register rt) {
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 1;
+ rt.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 0;
+ rt.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
@@ -2304,6 +2293,16 @@ void Assembler::clz(Register rd, Register rs) {
}
+void Assembler::dclz(Register rd, Register rs) {
+ if (kArchVariant != kMips64r6) {
+ // dclz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
+ } else {
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
+ }
+}
+
+
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
@@ -2520,7 +2519,7 @@ void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 1;
+ ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
@@ -2528,7 +2527,7 @@ void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 1;
+ ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
@@ -2536,7 +2535,7 @@ void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 0;
+ ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
@@ -2544,7 +2543,7 @@ void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 0;
+ ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index be57f29806..f0f54aab1c 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -41,12 +41,33 @@
#include <set>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/mips64/constants-mips64.h"
namespace v8 {
namespace internal {
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(t3) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \
+ V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(s7)
+
+#define DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
+ V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
+// clang-format on
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -72,11 +93,7 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of Register and FPURegister.
-// Core register.
struct Register {
- static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kMaxNumAllocatableRegisters = 14; // v0 through t2 and cp.
- static const int kSizeInBytes = 8;
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
#if defined(V8_TARGET_LITTLE_ENDIAN)
@@ -89,117 +106,47 @@ struct Register {
#error Unknown endianness
#endif
- inline static int NumAllocatableRegisters();
-
- static int ToAllocationIndex(Register reg) {
- DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
- reg.is(from_code(kCpRegister)));
- return reg.is(from_code(kCpRegister)) ?
- kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'.
- reg.code() - 2; // zero_reg and 'at' are skipped.
- }
-
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return index == kMaxNumAllocatableRegisters - 1 ?
- from_code(kCpRegister) : // Last index is always the 'cp' register.
- from_code(index + 2); // zero_reg and 'at' are skipped.
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "v0",
- "v1",
- "a0",
- "a1",
- "a2",
- "a3",
- "a4",
- "a5",
- "a6",
- "a7",
- "t0",
- "t1",
- "t2",
- "s7",
- };
- return names[index];
- }
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
Register r = { code };
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-#define REGISTER(N, C) \
- const int kRegister_ ## N ## _Code = C; \
- const Register N = { C }
-
-REGISTER(no_reg, -1);
-// Always zero.
-REGISTER(zero_reg, 0);
-// at: Reserved for synthetic instructions.
-REGISTER(at, 1);
-// v0, v1: Used when returning multiple values from subroutines.
-REGISTER(v0, 2);
-REGISTER(v1, 3);
-// a0 - a4: Used to pass non-FP parameters.
-REGISTER(a0, 4);
-REGISTER(a1, 5);
-REGISTER(a2, 6);
-REGISTER(a3, 7);
-// a4 - a7 t0 - t3: Can be used without reservation, act as temporary registers
-// and are allowed to be destroyed by subroutines.
-REGISTER(a4, 8);
-REGISTER(a5, 9);
-REGISTER(a6, 10);
-REGISTER(a7, 11);
-REGISTER(t0, 12);
-REGISTER(t1, 13);
-REGISTER(t2, 14);
-REGISTER(t3, 15);
-// s0 - s7: Subroutine register variables. Subroutines that write to these
-// registers must restore their values before exiting so that the caller can
-// expect the values to be preserved.
-REGISTER(s0, 16);
-REGISTER(s1, 17);
-REGISTER(s2, 18);
-REGISTER(s3, 19);
-REGISTER(s4, 20);
-REGISTER(s5, 21);
-REGISTER(s6, 22);
-REGISTER(s7, 23);
-REGISTER(t8, 24);
-REGISTER(t9, 25);
-// k0, k1: Reserved for system calls and interrupt handlers.
-REGISTER(k0, 26);
-REGISTER(k1, 27);
-// gp: Reserved.
-REGISTER(gp, 28);
-// sp: Stack pointer.
-REGISTER(sp, 29);
-// fp: Frame pointer.
-REGISTER(fp, 30);
-// ra: Return address pointer.
-REGISTER(ra, 31);
-
-#undef REGISTER
+// s7: context register
+// s3: lithium scratch
+// s4: lithium scratch2
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
int ToNumber(Register reg);
@@ -207,77 +154,72 @@ int ToNumber(Register reg);
Register ToRegister(int num);
// Coprocessor register.
-struct FPURegister {
- static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
-
- // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
- // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
- // number of Double regs (64-bit regs, or FPU-reg-pairs).
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0.
- // f28: 0.0
- // f30: scratch register.
- static const int kNumReservedRegisters = 2;
- static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
- kNumReservedRegisters;
+ static const int kMaxNumRegisters = Code::kAfterLast;
inline static int NumRegisters();
- inline static int NumAllocatableRegisters();
-
- // TODO(turbofan): Proper support for float32.
- inline static int NumAllocatableAliasedRegisters();
- inline static int ToAllocationIndex(FPURegister reg);
- static const char* AllocationIndexToString(int index);
-
- static FPURegister FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index * 2);
- }
-
- static FPURegister from_code(int code) {
- FPURegister r = { code };
- return r;
- }
+ // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
+ // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
+ // number of Double regs (64-bit regs, or FPU-reg-pairs).
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
- bool is(FPURegister creg) const { return code_ == creg.code_; }
- FPURegister low() const {
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+ DoubleRegister low() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
// Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_;
+ DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
+ DoubleRegister reg;
+ reg.reg_code = reg_code;
DCHECK(reg.is_valid());
return reg;
}
- FPURegister high() const {
+ DoubleRegister high() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
// Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_ + 1;
+ DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
+ DoubleRegister reg;
+ reg.reg_code = reg_code + 1;
DCHECK(reg.is_valid());
return reg;
}
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
+ }
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
}
void setcode(int f) {
- code_ = f;
+ reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
+// A few double registers are reserved: one as a scratch register and one to
+// hold 0.0.
+// f28: 0.0
+// f30: scratch register.
+
// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
// 32-bit registers, f0 through f31. When used as 'double' they are used
// in pairs, starting with the even numbered register. So a double operation
@@ -287,43 +229,43 @@ struct FPURegister {
// but it is not in common use. Someday we will want to support this in v8.)
// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef FPURegister DoubleRegister;
-typedef FPURegister FloatRegister;
-
-const FPURegister no_freg = { -1 };
-
-const FPURegister f0 = { 0 }; // Return value in hard float mode.
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
+typedef DoubleRegister FPURegister;
+typedef DoubleRegister FloatRegister;
+
+const DoubleRegister no_freg = {-1};
+
+const DoubleRegister f0 = {0}; // Return value in hard float mode.
+const DoubleRegister f1 = {1};
+const DoubleRegister f2 = {2};
+const DoubleRegister f3 = {3};
+const DoubleRegister f4 = {4};
+const DoubleRegister f5 = {5};
+const DoubleRegister f6 = {6};
+const DoubleRegister f7 = {7};
+const DoubleRegister f8 = {8};
+const DoubleRegister f9 = {9};
+const DoubleRegister f10 = {10};
+const DoubleRegister f11 = {11};
+const DoubleRegister f12 = {12}; // Arg 0 in hard float mode.
+const DoubleRegister f13 = {13};
+const DoubleRegister f14 = {14}; // Arg 1 in hard float mode.
+const DoubleRegister f15 = {15};
+const DoubleRegister f16 = {16};
+const DoubleRegister f17 = {17};
+const DoubleRegister f18 = {18};
+const DoubleRegister f19 = {19};
+const DoubleRegister f20 = {20};
+const DoubleRegister f21 = {21};
+const DoubleRegister f22 = {22};
+const DoubleRegister f23 = {23};
+const DoubleRegister f24 = {24};
+const DoubleRegister f25 = {25};
+const DoubleRegister f26 = {26};
+const DoubleRegister f27 = {27};
+const DoubleRegister f28 = {28};
+const DoubleRegister f29 = {29};
+const DoubleRegister f30 = {30};
+const DoubleRegister f31 = {31};
// Register aliases.
// cp is assumed to be a callee saved register.
@@ -343,22 +285,22 @@ const FPURegister f31 = { 31 };
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
- bool is_valid() const { return code_ == kFCSRRegister; }
- bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+ bool is_valid() const { return reg_code == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
void setcode(int f) {
- code_ = f;
+ reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
@@ -912,14 +854,15 @@ class Assembler : public AssemblerBase {
void movz_s(FPURegister fd, FPURegister fs, Register rt);
void movz_d(FPURegister fd, FPURegister fs, Register rt);
- void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
- void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
- void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
- void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movn_s(FPURegister fd, FPURegister fs, Register rt);
void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
void clz(Register rd, Register rs);
+ void dclz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -1507,6 +1450,7 @@ class EnsureSpace BASE_EMBEDDED {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index a736019da1..fc81e712d2 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -22,8 +22,9 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
- // -- a1 : called function (only guaranteed when
- // -- extra_args requires it)
+ // (only guaranteed when the called function
+ // is not marked as DontAdaptArguments)
+ // -- a1 : called function
// -- sp[0] : last argument
// -- ...
// -- sp[8 * (argc - 1)] : first argument
@@ -48,8 +49,21 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
// JumpToExternalReference expects a0 to contain the number of arguments
- // including the receiver and the extra arguments.
+ // including the receiver and the extra arguments. But a0 is only valid
+ // if the called function is marked as DontAdaptArguments, otherwise we
+ // need to load the argument count from the SharedFunctionInfo.
+ Label argc, done_argc;
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Branch(&argc, eq, a2,
+ Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ Daddu(a0, a2, num_extra_args + 1);
+ __ jmp(&done_argc);
+ __ bind(&argc);
__ Daddu(a0, a0, num_extra_args + 1);
+ __ bind(&done_argc);
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -61,8 +75,7 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
__ ld(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ ld(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
__ ld(result,
MemOperand(result,
@@ -77,8 +90,7 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
__ ld(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ ld(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
__ ld(result,
MemOperand(result,
@@ -210,6 +222,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a3 : original constructor
// -- ra : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
@@ -243,10 +256,10 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ Push(a1);
+ __ Push(a1, a3);
__ CallStub(&stub);
__ Move(a0, v0);
- __ Pop(a1);
+ __ Pop(a1, a3);
}
__ bind(&done_convert);
}
@@ -256,10 +269,15 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the first argument
// -- a1 : constructor function
+ // -- a3 : original constructor
// -- ra : return address
// -----------------------------------
- Label allocate, done_allocate;
+ Label allocate, done_allocate, rt_call;
+
+ // Fall back to runtime if the original constructor and function differ.
+ __ Branch(&rt_call, ne, a1, Operand(a3));
+
__ Allocate(JSValue::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
__ bind(&done_allocate);
@@ -283,6 +301,17 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Pop(a0, a1);
}
__ jmp(&done_allocate);
+
+ // Fallback to the runtime to create new object.
+ __ bind(&rt_call);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a1, a3); // constructor function, original constructor
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Pop(a0, a1);
+ }
+ __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+ __ Ret();
}
}
@@ -364,16 +393,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ld(a2, MemOperand(a2));
__ Branch(&rt_call, ne, a2, Operand(zero_reg));
- // Fall back to runtime if the original constructor and function differ.
- __ Branch(&rt_call, ne, a1, Operand(a3));
+ // Verify that the original constructor is a JSFunction.
+ __ GetObjectType(a3, a5, a4);
+ __ Branch(&rt_call, ne, a4, Operand(JS_FUNCTION_TYPE));
// Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ // a3: original constructor
+ __ ld(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(a2, &rt_call);
__ GetObjectType(a2, t1, t0);
__ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ ld(a5, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&rt_call, ne, a1, Operand(a5));
+
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the
// initial map's instance type would be JS_FUNCTION_TYPE.
@@ -396,7 +431,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Operand(Map::kSlackTrackingCounterEnd));
__ sw(a4, bit_field3); // In delay slot.
- __ Push(a1, a2, a1); // a1 = Constructor.
+ __ Push(a1, a2, a2); // a2 = Initial map.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(a1, a2);
@@ -494,7 +529,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a3: original constructor
__ bind(&rt_call);
- __ Push(a1, a3); // arguments 2-3 / 1-2
+ __ Push(a1, a3); // constructor function, original constructor
__ CallRuntime(Runtime::kNewObject, 2);
__ mov(t0, v0);
@@ -889,28 +924,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
+ __ push(kInterpreterBytecodeArrayRegister);
__ CallRuntime(Runtime::kStackGuard, 0);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -960,6 +983,67 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ Daddu(a3, a0, Operand(1)); // Add one for receiver.
+ __ dsll(a3, a3, kPointerSizeLog2);
+ __ Dsubu(a3, a2, Operand(a3));
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ ld(t0, MemOperand(a2));
+ __ Daddu(a2, a2, Operand(-kPointerSize));
+ __ push(t0);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(a3));
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (not including receiver)
+ // -- a3 : original constructor
+ // -- a1 : constructor to call
+ // -- a2 : address of the first argument
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ dsll(t0, a0, kPointerSizeLog2);
+ __ Dsubu(t0, a2, Operand(t0));
+
+ // Push a slot for the receiver.
+ __ push(zero_reg);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ ld(t1, MemOperand(a2));
+ __ Daddu(a2, a2, Operand(-kPointerSize));
+ __ push(t1);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(t0));
+
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1488,72 +1572,84 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(a1);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that function is not a "classConstructor".
+ Label class_constructor;
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ Branch(&class_constructor, ne, at, Operand(zero_reg));
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
__ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
{
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
- __ ld(a3, MemOperand(at));
-
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
// -- a2 : the shared function info.
- // -- a3 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(a3, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ GetObjectType(a3, a4, a4);
- __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
- __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(a3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(at, sp, at);
+ __ ld(a3, MemOperand(at));
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, a4, a4);
+ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ Push(a0, a1);
+ __ mov(a0, a3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a3, v0);
+ __ Pop(a0, a1);
+ __ SmiUntag(a0);
+ }
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ Branch(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(a0);
- __ Push(a0, a1);
- __ mov(a0, a3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(a3, v0);
- __ Pop(a0, a1);
- __ SmiUntag(a0);
- }
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ dsll(at, a0, kPointerSizeLog2);
__ daddu(at, sp, at);
__ sd(a3, MemOperand(at));
@@ -1573,11 +1669,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
ParameterCount actual(a0);
ParameterCount expected(a2);
__ InvokeCode(a3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ }
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -1587,8 +1690,8 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq, t2, Operand(JS_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
__ Branch(&non_function, ne, t2, Operand(JS_FUNCTION_PROXY_TYPE));
// 1. Call to function proxy.
@@ -1610,7 +1713,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ sd(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
@@ -1706,35 +1811,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a2 : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- a1 : the target to call (can be any Object).
-
- // Find the address of the last argument.
- __ Daddu(a3, a0, Operand(1)); // Add one for receiver.
- __ dsll(a3, a3, kPointerSizeLog2);
- __ Dsubu(a3, a2, Operand(a3));
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ Branch(&loop_check);
- __ bind(&loop_header);
- __ ld(a4, MemOperand(a2));
- __ Daddu(a2, a2, Operand(-kPointerSize));
- __ push(a4);
- __ bind(&loop_check);
- __ Branch(&loop_header, gt, a2, Operand(a3));
-
- // Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index d0c05ad0cc..a6c4f33806 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -1064,13 +1064,21 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ //
+ // If argv_in_register():
+ // a2: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Compute the argv pointer in a callee-saved register.
- __ dsll(s1, a0, kPointerSizeLog2);
- __ Daddu(s1, sp, s1);
- __ Dsubu(s1, s1, kPointerSize);
+ if (argv_in_register()) {
+ // Move argv into the correct register.
+ __ mov(s1, a2);
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ dsll(s1, a0, kPointerSizeLog2);
+ __ Daddu(s1, sp, s1);
+ __ Dsubu(s1, s1, kPointerSize);
+ }
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
@@ -1150,8 +1158,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- // s0: still holds argc (callee-saved).
- __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
+ Register argc;
+ if (argv_in_register()) {
+ // We don't want to pop arguments so set argc to no_reg.
+ argc = no_reg;
+ } else {
+ // s0: still holds argc (callee-saved).
+ argc = s0;
+ }
+ __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -1686,7 +1701,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
+ __ ld(a4, FieldMemOperand(a4, JSGlobalObject::kNativeContextOffset));
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
__ ld(a4, MemOperand(a4, kNormalOffset));
@@ -1890,7 +1905,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the arguments boilerplate from the current native context.
__ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
+ __ ld(a4, FieldMemOperand(a4, JSGlobalObject::kNativeContextOffset));
__ ld(a4, MemOperand(a4, Context::SlotOffset(
Context::STRICT_ARGUMENTS_MAP_INDEX)));
@@ -2523,103 +2538,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-
- // Do not transform the receiver for strict mode functions.
- int32_t strict_mode_function_mask =
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
- // Do not transform the receiver for native (Compilerhints already in a3).
- int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
-
- __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset));
- __ And(at, a4, Operand(strict_mode_function_mask));
- __ Branch(cont, ne, at, Operand(zero_reg));
- __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset));
- __ And(at, a4, Operand(native_mask));
- __ Branch(cont, ne, at, Operand(zero_reg));
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ li(a0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ mov(a0, a3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(a1);
- }
- __ Branch(USE_DELAY_SLOT, cont);
- __ sd(v0, MemOperand(sp, argc * kPointerSize));
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // a1 : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
-
- // Goto slow case if we do not have a function.
- __ GetObjectType(a1, a4, a4);
- __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
- }
-
- // Fast-case: Invoke the function now.
- // a1: pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ ld(a3, MemOperand(sp, argc * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(a3, &wrap);
- __ GetObjectType(a3, a4, a4);
- __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
- __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- // Wrap the receiver and patch it back onto the stack.
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
@@ -2743,9 +2661,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2782,34 +2698,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
- // Compute the receiver in sloppy mode.
- __ ld(a3, MemOperand(sp, argc * kPointerSize));
-
- __ JumpIfSmi(a3, &wrap);
- __ GetObjectType(a3, a4, a4);
- __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call);
+ __ li(a0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&slow_start, eq, a4, Operand(at));
+ __ Branch(&call, eq, a4, Operand(at));
// Verify that a4 contains an AllocationSite
__ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
@@ -2844,7 +2741,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ sd(a4, FieldMemOperand(a2, with_types_offset));
__ ld(a4, FieldMemOperand(a2, generic_offset));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ Branch(USE_DELAY_SLOT, &slow_start);
+ __ Branch(USE_DELAY_SLOT, &call);
__ sd(a4, FieldMemOperand(a2, generic_offset)); // In delay slot.
__ bind(&uninitialized);
@@ -2884,23 +2781,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(a1);
}
- __ Branch(&have_js_function);
+ __ Branch(&call);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
-
- // Goto slow case if we do not have a function.
- __ GetObjectType(a1, a4, a4);
- __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
- __ Branch(&have_js_function);
+ __ Branch(&call);
}
@@ -3012,7 +2900,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode, 1);
__ Move(result_, v0);
call_helper.AfterCall(masm);
@@ -3333,6 +3221,23 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
}
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in a0.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(a0, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
+ __ mov(a0, zero_reg);
+ __ bind(&positive_smi);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_smi);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength, 1, 1);
+}
+
+
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes on argument in a0.
Label is_number;
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
index c54a3d07c5..2e18f59915 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ b/deps/v8/src/mips64/code-stubs-mips64.h
@@ -344,6 +344,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_CODE_STUBS_MIPS64_H_
diff --git a/deps/v8/src/mips64/codegen-mips64.h b/deps/v8/src/mips64/codegen-mips64.h
index f79ad4e41c..22784fcf53 100644
--- a/deps/v8/src/mips64/codegen-mips64.h
+++ b/deps/v8/src/mips64/codegen-mips64.h
@@ -46,6 +46,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index f23f103ac3..d2e0756e95 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -394,6 +394,8 @@ enum SecondaryField {
CLZ_R6 = ((2 << 3) + 0),
CLO_R6 = ((2 << 3) + 1),
MFLO = ((2 << 3) + 2),
+ DCLZ_R6 = ((2 << 3) + 2),
+ DCLO_R6 = ((2 << 3) + 3),
DSLLV = ((2 << 3) + 4),
DSRLV = ((2 << 3) + 6),
DSRAV = ((2 << 3) + 7),
@@ -462,6 +464,8 @@ enum SecondaryField {
MUL = ((0 << 3) + 2),
CLZ = ((4 << 3) + 0),
CLO = ((4 << 3) + 1),
+ DCLZ = ((4 << 3) + 4),
+ DCLO = ((4 << 3) + 5),
// SPECIAL3 Encoding of Function Field.
EXT = ((0 << 3) + 0),
@@ -927,6 +931,7 @@ class Instruction {
#define FunctionFieldToBitNumber(function) (1ULL << function)
+ // On r6, DCLZ_R6 aliases to existing MFLO.
static const uint64_t kFunctionFieldRegisterTypeMask =
FunctionFieldToBitNumber(JR) | FunctionFieldToBitNumber(JALR) |
FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) |
@@ -1171,6 +1176,7 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
switch (FunctionFieldRaw()) {
case MUL:
case CLZ:
+ case DCLZ:
return kRegisterType;
default:
return kUnsupported;
@@ -1252,6 +1258,7 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
#undef OpcodeToBitNumber
#undef FunctionFieldToBitNumber
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // #ifndef V8_MIPS_CONSTANTS_H_
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 958951a948..24e690dfb3 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -5,6 +5,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -88,7 +89,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -139,14 +140,16 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
- const int kDoubleRegsSize =
- kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kMaxNumRegisters;
// Save all FPU registers before messing with them.
__ Dsubu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
@@ -221,9 +224,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
@@ -289,9 +293,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
__ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index ffab261cd1..a8fd48e6a2 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -1179,7 +1179,16 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
}
break;
case MFLO:
- Format(instr, "mflo 'rd");
+ if (instr->Bits(25, 16) == 0) {
+ Format(instr, "mflo 'rd");
+ } else {
+ if ((instr->FunctionFieldRaw() == DCLZ_R6) && (instr->FdValue() == 1)) {
+ Format(instr, "dclz 'rd, 'rs");
+ } else if ((instr->FunctionFieldRaw() == DCLO_R6) &&
+ (instr->FdValue() == 1)) {
+ Format(instr, "dclo 'rd, 'rs");
+ }
+ }
break;
case D_MUL_MUH_U: // Equals to DMULTU.
if (kArchVariant != kMips64r6) {
@@ -1360,6 +1369,11 @@ void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) {
Format(instr, "clz 'rd, 'rs");
}
break;
+ case DCLZ:
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "dclz 'rd, 'rs");
+ }
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/mips64/frames-mips64.h b/deps/v8/src/mips64/frames-mips64.h
index 9b6d326275..9c42d8d95c 100644
--- a/deps/v8/src/mips64/frames-mips64.h
+++ b/deps/v8/src/mips64/frames-mips64.h
@@ -169,6 +169,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index ab697812de..cf8a4456f3 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -78,14 +78,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2};
@@ -108,6 +100,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return a0; }
@@ -229,6 +225,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -392,16 +395,39 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- a0, // argument count (including receiver)
+ a0, // argument count (not including receiver)
a2, // address of first argument
a1 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (not including receiver)
+ a3, // original constructor
+ a1, // constructor to call
+ a2 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (argc)
+ a2, // address of first argument (argv)
+ a1 // the runtime function to call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 26229c9d87..3cad6ba82f 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -11,6 +11,7 @@
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/mips64/macro-assembler-mips64.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -22,8 +23,8 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
has_frame_(false),
has_double_zero_reg_set_(false) {
if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -148,7 +149,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
- int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
+ int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -438,7 +439,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
ld(scratch, FieldMemOperand(scratch, offset));
- ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ ld(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -3761,7 +3762,7 @@ void MacroAssembler::CopyFields(Register dst,
// Find a temp register in temps list.
for (int i = 0; i < kNumRegisters; i++) {
if ((temps & (1 << i)) != 0) {
- tmp.code_ = i;
+ tmp.reg_code = i;
break;
}
}
@@ -4780,7 +4781,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the builtins object into target register.
ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ld(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
+ ld(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
ld(target, ContextOperand(target, native_context_index));
}
@@ -4930,7 +4931,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
ld(dst, GlobalObjectOperand());
- ld(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+ ld(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
@@ -4943,7 +4944,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Load the global or builtins object from the current context.
ld(scratch,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ ld(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
ld(scratch,
@@ -4965,8 +4966,7 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
ld(function,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- ld(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
+ ld(function, FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
ld(function, MemOperand(function, Context::SlotOffset(index)));
}
@@ -5794,8 +5794,12 @@ void MacroAssembler::EnsureNotWhite(
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
// Note that we are using a 4-byte aligned 8-byte load.
- LoadWordPair(load_scratch,
- MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ if (emit_debug_code()) {
+ LoadWordPair(load_scratch,
+ MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ } else {
+ lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ }
And(t8, mask_scratch, load_scratch);
Branch(&done, ne, t8, Operand(zero_reg));
@@ -5874,14 +5878,14 @@ void MacroAssembler::EnsureNotWhite(
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Or(t8, t8, Operand(mask_scratch));
- StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Daddu(t8, t8, Operand(length));
- StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ Addu(t8, t8, Operand(length));
+ sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
bind(&done);
}
@@ -6042,8 +6046,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
if (regs & candidate.bit()) continue;
return candidate;
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 5dfee07ad9..fa3808fa74 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -13,17 +13,18 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_v0_Code};
-const Register kReturnRegister1 = {kRegister_v1_Code};
-const Register kJSFunctionRegister = {kRegister_a1_Code};
-const Register kContextRegister = {kRegister_s7_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_a7_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_t0_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_t1_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_t2_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
+const Register kReturnRegister0 = {Register::kCode_v0};
+const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kJSFunctionRegister = {Register::kCode_a1};
+const Register kContextRegister = {Register::kCpRegister};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_a7};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_t2};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
// Forward declaration.
class JumpTarget;
@@ -1719,6 +1720,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@@ -1841,6 +1844,7 @@ class CodePatcher {
#define ACCESS_MASM(masm) masm->
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index b82b2d9b3c..4a7fd7c10f 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -2481,7 +2481,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
} else if (fabs(fs) < fabs(ft)) {
result = fs;
} else {
- result = (fs > ft ? fs : ft);
+ result = (fs < ft ? fs : ft);
}
set_fpu_register_float(fd_reg(), result);
}
@@ -2690,7 +2690,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
} else if (fabs(fs) < fabs(ft)) {
result = fs;
} else {
- result = (fs > ft ? fs : ft);
+ result = (fs < ft ? fs : ft);
}
set_fpu_register_double(fd_reg(), result);
}
@@ -3364,8 +3364,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
}
SetResult(rd_reg(), alu_out);
break;
- case MFLO:
- SetResult(rd_reg(), get_register(LO));
+ case MFLO: // MFLO == DCLZ on R6.
+ if (kArchVariant != kMips64r6) {
+ DCHECK(sa() == 0);
+ alu_out = get_register(LO);
+ } else {
+ // MIPS spec: If no bits were set in GPR rs(), the result written to
+ // GPR rd() is 64.
+ DCHECK(sa() == 1);
+ alu_out = base::bits::CountLeadingZeros64(static_cast<int64_t>(rs_u()));
+ }
+ SetResult(rd_reg(), alu_out);
break;
// Instructions using HI and LO registers.
case MULT: { // MULT == D_MUL_MUH.
@@ -3393,8 +3402,22 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case MULTU:
u64hilo = static_cast<uint64_t>(rs_u() & 0xffffffff) *
static_cast<uint64_t>(rt_u() & 0xffffffff);
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ if (kArchVariant != kMips64r6) {
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ } else {
+ switch (sa()) {
+ case MUL_OP:
+ set_register(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+ break;
+ case MUH_OP:
+ set_register(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case DMULT: // DMULT == D_MUL_MUH.
if (kArchVariant != kMips64r6) {
@@ -3462,17 +3485,61 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
break;
}
case DIVU:
- if (rt_u() != 0) {
- uint32_t rt_u_32 = static_cast<uint32_t>(rt_u());
- uint32_t rs_u_32 = static_cast<uint32_t>(rs_u());
- set_register(LO, rs_u_32 / rt_u_32);
- set_register(HI, rs_u_32 % rt_u_32);
+ switch (kArchVariant) {
+ case kMips64r6: {
+ uint32_t rt_u_32 = static_cast<uint32_t>(rt_u());
+ uint32_t rs_u_32 = static_cast<uint32_t>(rs_u());
+ switch (get_instr()->SaValue()) {
+ case DIV_OP:
+ if (rt_u_32 != 0) {
+ set_register(rd_reg(), rs_u_32 / rt_u_32);
+ }
+ break;
+ case MOD_OP:
+ if (rt_u() != 0) {
+ set_register(rd_reg(), rs_u_32 % rt_u_32);
+ }
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } break;
+ default: {
+ if (rt_u() != 0) {
+ uint32_t rt_u_32 = static_cast<uint32_t>(rt_u());
+ uint32_t rs_u_32 = static_cast<uint32_t>(rs_u());
+ set_register(LO, rs_u_32 / rt_u_32);
+ set_register(HI, rs_u_32 % rt_u_32);
+ }
+ }
}
break;
case DDIVU:
- if (rt_u() != 0) {
- set_register(LO, rs_u() / rt_u());
- set_register(HI, rs_u() % rt_u());
+ switch (kArchVariant) {
+ case kMips64r6: {
+ switch (get_instr()->SaValue()) {
+ case DIV_OP:
+ if (rt_u() != 0) {
+ set_register(rd_reg(), rs_u() / rt_u());
+ }
+ break;
+ case MOD_OP:
+ if (rt_u() != 0) {
+ set_register(rd_reg(), rs_u() % rt_u());
+ }
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } break;
+ default: {
+ if (rt_u() != 0) {
+ set_register(LO, rs_u() / rt_u());
+ set_register(HI, rs_u() % rt_u());
+ }
+ }
}
break;
case ADD:
@@ -3607,7 +3674,13 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
// MIPS32 spec: If no bits were set in GPR rs(), the result written to
// GPR rd is 32.
alu_out = base::bits::CountLeadingZeros32(static_cast<uint32_t>(rs_u()));
- set_register(rd_reg(), alu_out);
+ SetResult(rd_reg(), alu_out);
+ break;
+ case DCLZ:
+ // MIPS64 spec: If no bits were set in GPR rs(), the result written to
+ // GPR rd is 64.
+ alu_out = base::bits::CountLeadingZeros64(static_cast<uint64_t>(rs_u()));
+ SetResult(rd_reg(), alu_out);
break;
default:
alu_out = 0x12345678;
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index e45cbd449e..68d518ea10 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -84,7 +84,8 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() { }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
@@ -526,7 +527,8 @@ class SimulatorStack : public v8::internal::AllStatic {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/deps/v8/src/modules.h b/deps/v8/src/modules.h
index 33afd6128d..f1dbd2516a 100644
--- a/deps/v8/src/modules.h
+++ b/deps/v8/src/modules.h
@@ -115,6 +115,7 @@ class ModuleDescriptor : public ZoneObject {
int index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MODULES_H_
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 4b1bdf78bd..242ff754ad 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -119,9 +119,6 @@ void HeapObject::HeapObjectVerify() {
case JS_GLOBAL_OBJECT_TYPE:
JSGlobalObject::cast(this)->JSGlobalObjectVerify();
break;
- case JS_BUILTINS_OBJECT_TYPE:
- JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
- break;
case CELL_TYPE:
Cell::cast(this)->CellVerify();
break;
@@ -574,23 +571,12 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
void JSGlobalObject::JSGlobalObjectVerify() {
CHECK(IsJSGlobalObject());
- JSObjectVerify();
- for (int i = GlobalObject::kBuiltinsOffset;
- i < JSGlobalObject::kSize;
- i += kPointerSize) {
- VerifyObjectField(i);
+ // Do not check the dummy global object for the builtins.
+ if (GlobalDictionary::cast(properties())->NumberOfElements() == 0 &&
+ elements()->length() == 0) {
+ return;
}
-}
-
-
-void JSBuiltinsObject::JSBuiltinsObjectVerify() {
- CHECK(IsJSBuiltinsObject());
JSObjectVerify();
- for (int i = GlobalObject::kBuiltinsOffset;
- i < JSBuiltinsObject::kSize;
- i += kPointerSize) {
- VerifyObjectField(i);
- }
}
@@ -933,6 +919,7 @@ void AccessCheckInfo::AccessCheckInfoVerify() {
CHECK(IsAccessCheckInfo());
VerifyPointer(named_callback());
VerifyPointer(indexed_callback());
+ VerifyPointer(callback());
VerifyPointer(data());
}
@@ -1051,7 +1038,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_objects_with_fast_properties_++;
info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
info->number_of_fast_unused_fields_ += map()->unused_property_fields();
- } else if (IsGlobalObject()) {
+ } else if (IsJSGlobalObject()) {
GlobalDictionary* dict = global_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
info->number_of_slow_unused_properties_ +=
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 3d39278cce..052fc51472 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -286,6 +286,24 @@ bool Object::KeyEquals(Object* second) {
}
+bool Object::FilterKey(PropertyAttributes filter) {
+ if ((filter & SYMBOLIC) && IsSymbol()) {
+ return true;
+ }
+
+ if ((filter & PRIVATE_SYMBOL) && IsSymbol() &&
+ Symbol::cast(this)->is_private()) {
+ return true;
+ }
+
+ if ((filter & STRING) && !IsSymbol()) {
+ return true;
+ }
+
+ return false;
+}
+
+
Handle<Object> Object::NewStorageFor(Isolate* isolate,
Handle<Object> object,
Representation representation) {
@@ -730,7 +748,11 @@ bool Object::IsTransitionArray() const {
bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
+bool Object::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
+
+
bool Object::IsLiteralsArray() const { return IsFixedArray(); }
+bool Object::IsBindingsArray() const { return IsFixedArray(); }
bool Object::IsDeoptimizationInputData() const {
@@ -988,14 +1010,7 @@ bool Object::IsJSGlobalProxy() const {
}
-bool Object::IsGlobalObject() const {
- if (!IsHeapObject()) return false;
- return HeapObject::cast(this)->map()->IsGlobalObjectMap();
-}
-
-
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
bool Object::IsUndetectableObject() const {
@@ -1008,7 +1023,7 @@ bool Object::IsAccessCheckNeeded() const {
if (!IsHeapObject()) return false;
if (IsJSGlobalProxy()) {
const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
- GlobalObject* global = proxy->GetIsolate()->context()->global_object();
+ JSGlobalObject* global = proxy->GetIsolate()->context()->global_object();
return proxy->IsDetachedFrom(global);
}
return HeapObject::cast(this)->map()->is_access_check_needed();
@@ -1173,19 +1188,28 @@ MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
uint32_t index, Handle<Object> value,
LanguageMode language_mode) {
LookupIterator it(isolate, object, index);
- return SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED);
+ MAYBE_RETURN_NULL(
+ SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED));
+ return value;
}
-Handle<Object> Object::GetPrototypeSkipHiddenPrototypes(
- Isolate* isolate, Handle<Object> receiver) {
- PrototypeIterator iter(isolate, receiver);
- while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+Handle<Object> Object::GetPrototype(Isolate* isolate, Handle<Object> obj) {
+ // We don't expect access checks to be needed on JSProxy objects.
+ DCHECK(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
+ Handle<Context> context(isolate->context());
+ if (obj->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(context, Handle<JSObject>::cast(obj))) {
+ return isolate->factory()->null_value();
+ }
+
+ PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
+ do {
+ iter.AdvanceIgnoringProxies();
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
return PrototypeIterator::GetCurrent(iter);
}
- iter.Advance();
- }
+ } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
return PrototypeIterator::GetCurrent(iter);
}
@@ -2103,8 +2127,10 @@ void WeakCell::clear_next(Heap* heap) {
bool WeakCell::next_cleared() { return next()->IsTheHole(); }
-int JSObject::GetHeaderSize() {
- InstanceType type = map()->instance_type();
+int JSObject::GetHeaderSize() { return GetHeaderSize(map()->instance_type()); }
+
+
+int JSObject::GetHeaderSize(InstanceType type) {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
@@ -2118,8 +2144,6 @@ int JSObject::GetHeaderSize() {
return JSGlobalProxy::kSize;
case JS_GLOBAL_OBJECT_TYPE:
return JSGlobalObject::kSize;
- case JS_BUILTINS_OBJECT_TYPE:
- return JSBuiltinsObject::kSize;
case JS_FUNCTION_TYPE:
return JSFunction::kSize;
case JS_VALUE_TYPE:
@@ -2161,15 +2185,18 @@ int JSObject::GetHeaderSize() {
}
-int JSObject::GetInternalFieldCount() {
- DCHECK(1 << kPointerSizeLog2 == kPointerSize);
- // Make sure to adjust for the number of in-object properties. These
- // properties do contribute to the size, but are not internal fields.
- return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
- map()->GetInObjectProperties();
+int JSObject::GetInternalFieldCount(Map* map) {
+ int instance_size = map->instance_size();
+ if (instance_size == kVariableSizeSentinel) return 0;
+ InstanceType instance_type = map->instance_type();
+ return ((instance_size - GetHeaderSize(instance_type)) >> kPointerSizeLog2) -
+ map->GetInObjectProperties();
}
+int JSObject::GetInternalFieldCount() { return GetInternalFieldCount(map()); }
+
+
int JSObject::GetInternalFieldOffset(int index) {
DCHECK(index < GetInternalFieldCount() && index >= 0);
return GetHeaderSize() + (kPointerSize * index);
@@ -2361,27 +2388,11 @@ void Struct::InitializeBody(int object_size) {
}
-bool Object::ToArrayLength(uint32_t* index) {
- if (IsSmi()) {
- int value = Smi::cast(this)->value();
- if (value < 0) return false;
- *index = value;
- return true;
- }
- if (IsHeapNumber()) {
- double value = HeapNumber::cast(this)->value();
- uint32_t uint_value = static_cast<uint32_t>(value);
- if (value == static_cast<double>(uint_value)) {
- *index = uint_value;
- return true;
- }
- }
- return false;
-}
+bool Object::ToArrayLength(uint32_t* index) { return Object::ToUint32(index); }
bool Object::ToArrayIndex(uint32_t* index) {
- return ToArrayLength(index) && *index != kMaxUInt32;
+ return Object::ToUint32(index) && *index != kMaxUInt32;
}
@@ -3292,7 +3303,6 @@ CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Float32x4)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(GlobalDictionary)
-CAST_ACCESSOR(GlobalObject)
CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(Int16x8)
@@ -3301,7 +3311,6 @@ CAST_ACCESSOR(Int8x16)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
-CAST_ACCESSOR(JSBuiltinsObject)
CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
@@ -3506,6 +3515,75 @@ int LiteralsArray::literals_count() const {
}
+Object* BindingsArray::get(int index) const { return FixedArray::get(index); }
+
+
+void BindingsArray::set(int index, Object* value) {
+ FixedArray::set(index, value);
+}
+
+
+void BindingsArray::set(int index, Smi* value) {
+ FixedArray::set(index, value);
+}
+
+
+void BindingsArray::set(int index, Object* value, WriteBarrierMode mode) {
+ FixedArray::set(index, value, mode);
+}
+
+
+int BindingsArray::length() const { return FixedArray::length(); }
+
+
+BindingsArray* BindingsArray::cast(Object* object) {
+ SLOW_DCHECK(object->IsBindingsArray());
+ return reinterpret_cast<BindingsArray*>(object);
+}
+
+void BindingsArray::set_feedback_vector(TypeFeedbackVector* vector) {
+ set(kVectorIndex, vector);
+}
+
+
+TypeFeedbackVector* BindingsArray::feedback_vector() const {
+ return TypeFeedbackVector::cast(get(kVectorIndex));
+}
+
+
+JSReceiver* BindingsArray::bound_function() const {
+ return JSReceiver::cast(get(kBoundFunctionIndex));
+}
+
+
+void BindingsArray::set_bound_function(JSReceiver* function) {
+ set(kBoundFunctionIndex, function);
+}
+
+
+Object* BindingsArray::bound_this() const { return get(kBoundThisIndex); }
+
+
+void BindingsArray::set_bound_this(Object* bound_this) {
+ set(kBoundThisIndex, bound_this);
+}
+
+
+Object* BindingsArray::binding(int binding_index) const {
+ return get(kFirstBindingIndex + binding_index);
+}
+
+
+void BindingsArray::set_binding(int binding_index, Object* binding) {
+ set(kFirstBindingIndex + binding_index, binding);
+}
+
+
+int BindingsArray::bindings_count() const {
+ return length() - kFirstBindingIndex;
+}
+
+
void HandlerTable::SetRangeStart(int index, int value) {
set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
}
@@ -3585,14 +3663,6 @@ FreeSpace* FreeSpace::next() {
}
-FreeSpace** FreeSpace::next_address() {
- DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
- (!GetHeap()->deserialization_complete() && map() == NULL));
- DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
- return reinterpret_cast<FreeSpace**>(address() + kNextOffset);
-}
-
-
void FreeSpace::set_next(FreeSpace* next) {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == NULL));
@@ -3650,6 +3720,7 @@ bool Name::Equals(Handle<Name> one, Handle<Name> two) {
ACCESSORS(Symbol, name, Object, kNameOffset)
SMI_ACCESSORS(Symbol, flags, kFlagsOffset)
BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
+BOOL_ACCESSORS(Symbol, flags, is_well_known_symbol, kWellKnownSymbolBit)
bool String::Equals(String* other) {
@@ -4826,6 +4897,7 @@ bool Map::CanTransition() {
}
+bool Map::IsBooleanMap() { return this == GetHeap()->boolean_map(); }
bool Map::IsPrimitiveMap() {
STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
return instance_type() <= LAST_PRIMITIVE_TYPE;
@@ -4847,10 +4919,7 @@ bool Map::IsJSGlobalProxyMap() {
bool Map::IsJSGlobalObjectMap() {
return instance_type() == JS_GLOBAL_OBJECT_TYPE;
}
-bool Map::IsGlobalObjectMap() {
- const InstanceType type = instance_type();
- return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE;
-}
+bool Map::IsJSTypedArrayMap() { return instance_type() == JS_TYPED_ARRAY_TYPE; }
bool Map::CanOmitMapChecks() {
@@ -4920,12 +4989,8 @@ bool Code::IsCodeStubOrIC() {
bool Code::IsJavaScriptCode() {
- if (kind() == FUNCTION || kind() == OPTIMIZED_FUNCTION) {
- return true;
- }
- Handle<Code> interpreter_entry =
- GetIsolate()->builtins()->InterpreterEntryTrampoline();
- return interpreter_entry.location() != nullptr && *interpreter_entry == this;
+ return kind() == FUNCTION || kind() == OPTIMIZED_FUNCTION ||
+ is_interpreter_entry_trampoline();
}
@@ -4974,6 +5039,12 @@ inline bool Code::is_hydrogen_stub() {
}
+inline bool Code::is_interpreter_entry_trampoline() {
+ Handle<Code> interpreter_entry =
+ GetIsolate()->builtins()->InterpreterEntryTrampoline();
+ return interpreter_entry.location() != nullptr && *interpreter_entry == this;
+}
+
inline void Code::set_is_crankshafted(bool value) {
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = IsCrankshaftedField::update(previous, value);
@@ -5544,13 +5615,18 @@ void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
}
+Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
+ return CopyInitialMap(map, map->instance_size(), map->GetInObjectProperties(),
+ map->unused_property_fields());
+}
+
+
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
-ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
-ACCESSORS(GlobalObject, native_context, Context, kNativeContextOffset)
-ACCESSORS(GlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
+ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
+ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
ACCESSORS(JSGlobalProxy, hash, Object, kHashOffset)
@@ -5581,6 +5657,7 @@ ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
+ACCESSORS(AccessCheckInfo, callback, Object, kCallbackOffset)
ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
@@ -6144,20 +6221,6 @@ bool SharedFunctionInfo::IsBuiltin() {
bool SharedFunctionInfo::IsSubjectToDebugging() { return !IsBuiltin(); }
-bool JSFunction::IsBuiltin() { return shared()->IsBuiltin(); }
-
-
-bool JSFunction::IsSubjectToDebugging() {
- return shared()->IsSubjectToDebugging();
-}
-
-
-bool JSFunction::NeedsArgumentsAdaption() {
- return shared()->internal_formal_parameter_count() !=
- SharedFunctionInfo::kDontAdaptArgumentsSentinel;
-}
-
-
bool JSFunction::IsOptimized() {
return code()->kind() == Code::OPTIMIZED_FUNCTION;
}
@@ -6305,11 +6368,6 @@ bool JSFunction::is_compiled() {
}
-bool JSFunction::has_simple_parameters() {
- return shared()->has_simple_parameters();
-}
-
-
LiteralsArray* JSFunction::literals() {
DCHECK(!shared()->bound());
return LiteralsArray::cast(literals_or_bindings());
@@ -6322,13 +6380,13 @@ void JSFunction::set_literals(LiteralsArray* literals) {
}
-FixedArray* JSFunction::function_bindings() {
+BindingsArray* JSFunction::function_bindings() {
DCHECK(shared()->bound());
- return literals_or_bindings();
+ return BindingsArray::cast(literals_or_bindings());
}
-void JSFunction::set_function_bindings(FixedArray* bindings) {
+void JSFunction::set_function_bindings(BindingsArray* bindings) {
DCHECK(shared()->bound());
// Bound function literal may be initialized to the empty fixed array
// before the bindings are set.
@@ -6714,6 +6772,8 @@ ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
ACCESSORS(JSRegExp, data, Object, kDataOffset)
+ACCESSORS(JSRegExp, flags, Object, kFlagsOffset)
+ACCESSORS(JSRegExp, source, Object, kSourceOffset)
JSRegExp::Type JSRegExp::TypeTag() {
@@ -6877,14 +6937,14 @@ bool JSObject::HasIndexedInterceptor() {
NameDictionary* JSObject::property_dictionary() {
DCHECK(!HasFastProperties());
- DCHECK(!IsGlobalObject());
+ DCHECK(!IsJSGlobalObject());
return NameDictionary::cast(properties());
}
GlobalDictionary* JSObject::global_dictionary() {
DCHECK(!HasFastProperties());
- DCHECK(IsGlobalObject());
+ DCHECK(IsJSGlobalObject());
return GlobalDictionary::cast(properties());
}
@@ -7185,29 +7245,29 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
}
+MaybeHandle<Object> Object::GetPropertyOrElement(Handle<JSReceiver> holder,
+ Handle<Name> name,
+ Handle<Object> receiver,
+ LanguageMode language_mode) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ name->GetIsolate(), receiver, name, holder);
+ return GetProperty(&it, language_mode);
+}
+
+
Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
Handle<Name> name) {
- // Call the "has" trap on proxies.
- if (object->IsJSProxy()) {
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasPropertyWithHandler(proxy, name);
- }
-
- Maybe<PropertyAttributes> result = GetPropertyAttributes(object, name);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(object->GetIsolate(), object, name);
+ return HasProperty(&it);
}
Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
Handle<Name> name) {
- // Call the "has" trap on proxies.
- if (object->IsJSProxy()) {
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasPropertyWithHandler(proxy, name);
- }
-
- Maybe<PropertyAttributes> result = GetOwnPropertyAttributes(object, name);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ object->GetIsolate(), object, name, LookupIterator::HIDDEN);
+ return HasProperty(&it);
}
@@ -7228,31 +7288,16 @@ Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
- // Call the "has" trap on proxies.
- if (object->IsJSProxy()) {
- Isolate* isolate = object->GetIsolate();
- Handle<Name> name = isolate->factory()->Uint32ToString(index);
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasPropertyWithHandler(proxy, name);
- }
-
- Maybe<PropertyAttributes> result = GetElementAttributes(object, index);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+ LookupIterator it(object->GetIsolate(), object, index);
+ return HasProperty(&it);
}
Maybe<bool> JSReceiver::HasOwnElement(Handle<JSReceiver> object,
uint32_t index) {
- // Call the "has" trap on proxies.
- if (object->IsJSProxy()) {
- Isolate* isolate = object->GetIsolate();
- Handle<Name> name = isolate->factory()->Uint32ToString(index);
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasPropertyWithHandler(proxy, name);
- }
-
- Maybe<PropertyAttributes> result = GetOwnElementAttributes(object, index);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+ LookupIterator it(object->GetIsolate(), object, index,
+ LookupIterator::HIDDEN);
+ return HasProperty(&it);
}
@@ -7277,7 +7322,7 @@ bool JSGlobalObject::IsDetached() {
}
-bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) const {
+bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject* global) const {
const PrototypeIterator iter(this->GetIsolate(),
const_cast<JSGlobalProxy*>(this));
return iter.GetCurrent() != global;
@@ -7798,7 +7843,8 @@ Relocatable::~Relocatable() {
// static
-int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
+template <int start_offset>
+int FlexibleBodyDescriptor<start_offset>::SizeOf(Map* map, HeapObject* object) {
return map->instance_size();
}
@@ -7809,12 +7855,6 @@ int FixedArray::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
}
-// static
-int StructBodyDescriptor::SizeOf(Map* map, HeapObject* object) {
- return map->instance_size();
-}
-
-
void Foreign::ForeignIterateBody(ObjectVisitor* v) {
v->VisitExternalReference(
reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
@@ -7871,52 +7911,120 @@ void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
}
-static inline void IterateBodyUsingLayoutDescriptor(HeapObject* object,
- int start_offset,
- int end_offset,
- ObjectVisitor* v) {
- DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(start_offset, kPointerSize) &&
- IsAligned(end_offset, kPointerSize));
-
- LayoutDescriptorHelper helper(object->map());
- DCHECK(!helper.all_fields_tagged());
-
- for (int offset = start_offset; offset < end_offset; offset += kPointerSize) {
- // Visit all tagged fields.
- if (helper.IsTagged(offset)) {
- v->VisitPointer(HeapObject::RawField(object, offset));
+void BodyDescriptorBase::IterateBodyImpl(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v) {
+ if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+ IteratePointers(obj, start_offset, end_offset, v);
+ } else {
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(start_offset, kPointerSize) &&
+ IsAligned(end_offset, kPointerSize));
+
+ LayoutDescriptorHelper helper(obj->map());
+ DCHECK(!helper.all_fields_tagged());
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+ IteratePointers(obj, offset, end_of_region_offset, v);
+ }
+ offset = end_of_region_offset;
}
}
}
-template<int start_offset, int end_offset, int size>
-void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
- HeapObject* obj,
- ObjectVisitor* v) {
+template <typename StaticVisitor>
+void BodyDescriptorBase::IterateBodyImpl(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset) {
if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
- v->VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, end_offset));
+ IteratePointers<StaticVisitor>(heap, obj, start_offset, end_offset);
} else {
- IterateBodyUsingLayoutDescriptor(obj, start_offset, end_offset, v);
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(start_offset, kPointerSize) &&
+ IsAligned(end_offset, kPointerSize));
+
+ LayoutDescriptorHelper helper(obj->map());
+ DCHECK(!helper.all_fields_tagged());
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+ IteratePointers<StaticVisitor>(heap, obj, offset, end_of_region_offset);
+ }
+ offset = end_of_region_offset;
+ }
}
}
-template<int start_offset>
-void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
- int object_size,
- ObjectVisitor* v) {
- if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
- v->VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, object_size));
- } else {
- IterateBodyUsingLayoutDescriptor(obj, start_offset, object_size, v);
- }
+void BodyDescriptorBase::IteratePointers(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v) {
+ v->VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
}
+template <typename StaticVisitor>
+void BodyDescriptorBase::IteratePointers(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset) {
+ StaticVisitor::VisitPointers(heap, obj,
+ HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
+}
+
+
+// Iterates the function object according to the visiting policy.
+template <JSFunction::BodyVisitingPolicy body_visiting_policy>
+class JSFunction::BodyDescriptorImpl : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kNonWeakFieldsEndOffset == kCodeEntryOffset);
+ STATIC_ASSERT(kCodeEntryOffset + kPointerSize == kNextFunctionLinkOffset);
+ STATIC_ASSERT(kNextFunctionLinkOffset + kPointerSize == kSize);
+
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kPropertiesOffset, kNonWeakFieldsEndOffset, v);
+
+ if (body_visiting_policy & kVisitCodeEntry) {
+ v->VisitCodeEntry(obj->address() + kCodeEntryOffset);
+ }
+
+ if (body_visiting_policy & kVisitNextFunction) {
+ IteratePointers(obj, kNextFunctionLinkOffset, kSize, v);
+ }
+
+ // TODO(ishell): v8:4531, fix when JFunctions are allowed to have in-object
+ // properties
+ // IterateBodyImpl(obj, kSize, object_size, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ kNonWeakFieldsEndOffset);
+
+ if (body_visiting_policy & kVisitCodeEntry) {
+ StaticVisitor::VisitCodeEntry(heap, obj,
+ obj->address() + kCodeEntryOffset);
+ }
+
+ if (body_visiting_policy & kVisitNextFunction) {
+ IteratePointers<StaticVisitor>(heap, obj, kNextFunctionLinkOffset, kSize);
+ }
+
+ // TODO(ishell): v8:4531, fix when JFunctions are allowed to have in-object
+ // properties
+ // IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ // TODO(ishell): v8:4531, fix when JFunctions are allowed to have in-object
+ // properties
+ return JSFunction::kSize;
+ }
+};
+
+
template<class Derived, class TableType>
Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType* table(TableType::cast(this->table()));
@@ -8046,6 +8154,7 @@ String::SubStringRange::iterator String::SubStringRange::end() {
#undef NOBARRIER_READ_BYTE_FIELD
#undef NOBARRIER_WRITE_BYTE_FIELD
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 8dfd0a17b0..a845e06f23 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -33,7 +33,7 @@ void Object::Print(std::ostream& os) { // NOLINT
void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
- os << "" << reinterpret_cast<void*>(this) << ": [" << id << "]\n";
+ os << reinterpret_cast<void*>(this) << ": [" << id << "]\n";
}
@@ -95,9 +95,11 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_ARRAY_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
- case JS_REGEXP_TYPE:
JSObject::cast(this)->JSObjectPrint(os);
break;
+ case JS_REGEXP_TYPE:
+ JSRegExp::cast(this)->JSRegExpPrint(os);
+ break;
case ODDBALL_TYPE:
Oddball::cast(this)->to_string()->Print(os);
break;
@@ -113,12 +115,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_GLOBAL_OBJECT_TYPE:
JSGlobalObject::cast(this)->JSGlobalObjectPrint(os);
break;
- case JS_BUILTINS_OBJECT_TYPE:
- JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(os);
- break;
case JS_VALUE_TYPE:
- os << "Value wrapper around:";
- JSValue::cast(this)->value()->Print(os);
+ JSValue::cast(this)->JSValuePrint(os);
break;
case JS_DATE_TYPE:
JSDate::cast(this)->JSDatePrint(os);
@@ -273,7 +271,7 @@ void JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
- os << " ";
+ os << "\n ";
descs->GetKey(i)->NamePrint(os);
os << ": ";
switch (descs->GetType(i)) {
@@ -284,24 +282,23 @@ void JSObject::PrintProperties(std::ostream& os) { // NOLINT
} else {
os << Brief(RawFastPropertyAt(index));
}
- os << " (data field at offset " << index.property_index() << ")\n";
+ os << " (data field at offset " << index.property_index() << ")";
break;
}
case ACCESSOR: {
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
- os << " (accessor field at offset " << index.property_index()
- << ")\n";
+ os << " (accessor field at offset " << index.property_index() << ")";
break;
}
case DATA_CONSTANT:
- os << Brief(descs->GetConstant(i)) << " (data constant)\n";
+ os << Brief(descs->GetConstant(i)) << " (data constant)";
break;
case ACCESSOR_CONSTANT:
- os << Brief(descs->GetCallbacksObject(i)) << " (accessor constant)\n";
+ os << Brief(descs->GetCallbacksObject(i)) << " (accessor constant)";
break;
}
}
- } else if (IsGlobalObject()) {
+ } else if (IsJSGlobalObject()) {
global_dictionary()->Print(os);
} else {
property_dictionary()->Print(os);
@@ -313,7 +310,7 @@ template <class T>
static void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
T* p = T::cast(object);
for (int i = 0; i < p->length(); i++) {
- os << " " << i << ": " << p->get_scalar(i) << "\n";
+ os << "\n " << i << ": " << p->get_scalar(i);
}
}
@@ -329,7 +326,7 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
// Print in array notation for non-sparse arrays.
FixedArray* p = FixedArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- os << " " << i << ": " << Brief(p->get(i)) << "\n";
+ os << "\n " << i << ": " << Brief(p->get(i));
}
break;
}
@@ -339,13 +336,12 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
if (elements()->length() > 0) {
FixedDoubleArray* p = FixedDoubleArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- os << " " << i << ": ";
+ os << "\n " << i << ": ";
if (p->is_the_hole(i)) {
os << "<the hole>";
} else {
os << p->get_scalar(i);
}
- os << "\n";
}
}
break;
@@ -376,44 +372,58 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
- os << " parameter map:";
+ os << "\n parameter map:";
for (int i = 2; i < p->length(); i++) {
os << " " << (i - 2) << ":" << Brief(p->get(i));
}
os << "\n context: " << Brief(p->get(0))
- << "\n arguments: " << Brief(p->get(1)) << "\n";
+ << "\n arguments: " << Brief(p->get(1));
break;
}
}
}
-void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSObject");
+static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
+ const char* id) { // NOLINT
+ obj->PrintHeader(os, id);
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- PrototypeIterator iter(GetIsolate(), this);
- os << " - map = " << reinterpret_cast<void*>(map()) << " ["
- << ElementsKindToString(this->map()->elements_kind())
- << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent())
- << "\n {\n";
- PrintProperties(os);
- PrintTransitions(os);
- PrintElements(os);
- os << " }\n";
+ PrototypeIterator iter(obj->GetIsolate(), obj);
+ os << " - map = " << reinterpret_cast<void*>(obj->map()) << " ["
+ << ElementsKindToString(obj->map()->elements_kind())
+ << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
+}
+
+
+static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
+ bool print_elements = true) {
+ os << "\n {";
+ obj->PrintProperties(os);
+ obj->PrintTransitions(os);
+ if (print_elements) obj->PrintElements(os);
+ os << "\n }\n";
+}
+
+
+void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSObject");
+ JSObjectPrintBody(os, this);
+}
+
+
+void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSRegExp");
+ os << "\n - data = " << Brief(data());
+ JSObjectPrintBody(os, this);
}
void JSModule::JSModulePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSModule");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n"
- << " - context = ";
- context()->Print(os);
- os << " - scope_info = " << Brief(scope_info())
- << ElementsKindToString(this->map()->elements_kind()) << " {\n";
- PrintProperties(os);
- PrintElements(os);
- os << " }\n";
+ JSObjectPrintHeader(os, this, "JSModule");
+ os << "\n - context = " << Brief(context());
+ os << " - scope_info = " << Brief(scope_info());
+ JSObjectPrintBody(os, this);
}
@@ -461,6 +471,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (is_access_check_needed()) os << " - access_check_needed\n";
if (!is_extensible()) os << " - non-extensible\n";
if (is_observed()) os << " - observed\n";
+ if (is_strong()) os << " - strong_map\n";
if (is_prototype_map()) {
os << " - prototype_map\n";
os << " - prototype info: " << Brief(prototype_info());
@@ -473,9 +484,11 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (FLAG_unbox_double_fields) {
os << "\n - layout descriptor: " << Brief(layout_descriptor());
}
- if (TransitionArray::NumberOfTransitions(raw_transitions()) > 0) {
- os << "\n - transitions: ";
- TransitionArray::PrintTransitions(os, raw_transitions());
+ int nof_transitions = TransitionArray::NumberOfTransitions(raw_transitions());
+ if (nof_transitions > 0) {
+ os << "\n - transitions #" << nof_transitions << ": "
+ << Brief(raw_transitions());
+ TransitionArray::PrintTransitions(os, raw_transitions(), false);
}
os << "\n - prototype: " << Brief(prototype());
os << "\n - constructor: " << Brief(GetConstructor());
@@ -539,6 +552,32 @@ void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
}
+void TypeFeedbackMetadata::Print() {
+ OFStream os(stdout);
+ TypeFeedbackMetadataPrint(os);
+ os << std::flush;
+}
+
+
+void TypeFeedbackMetadata::TypeFeedbackMetadataPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "TypeFeedbackMetadata");
+ os << " - length: " << length();
+ if (length() == 0) {
+ os << " (empty)\n";
+ return;
+ }
+
+ TypeFeedbackMetadataIterator iter(this);
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
+ os << "\n Slot " << slot << " " << kind;
+ }
+ os << "\n";
+}
+
+
void TypeFeedbackVector::Print() {
OFStream os(stdout);
TypeFeedbackVectorPrint(os);
@@ -557,56 +596,50 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
os << "\n - ics with type info: " << ic_with_type_info_count();
os << "\n - generic ics: " << ic_generic_count();
- if (Slots() > 0) {
- for (int i = 0; i < Slots(); i++) {
- FeedbackVectorSlot slot(i);
- os << "\n Slot " << i << " [" << GetIndex(slot)
- << "]: " << Brief(Get(slot));
- }
- }
-
- if (ICSlots() > 0) {
- DCHECK(elements_per_ic_slot() == 2);
-
- for (int i = 0; i < ICSlots(); i++) {
- FeedbackVectorICSlot slot(i);
- FeedbackVectorSlotKind kind = GetKind(slot);
- os << "\n ICSlot " << i << " " << kind << " ";
- switch (kind) {
- case FeedbackVectorSlotKind::LOAD_IC: {
- LoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
- KeyedLoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::CALL_IC: {
- CallICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::STORE_IC: {
- StoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::KEYED_STORE_IC: {
- KeyedStoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::UNUSED:
- case FeedbackVectorSlotKind::KINDS_NUMBER:
- UNREACHABLE();
- break;
+ TypeFeedbackMetadataIterator iter(metadata());
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
+
+ os << "\n Slot " << slot << " " << kind << " ";
+ switch (kind) {
+ case FeedbackVectorSlotKind::LOAD_IC: {
+ LoadICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
+ KeyedLoadICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::CALL_IC: {
+ CallICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::STORE_IC: {
+ StoreICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+ KeyedStoreICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
}
+ case FeedbackVectorSlotKind::GENERAL:
+ break;
+ case FeedbackVectorSlotKind::INVALID:
+ case FeedbackVectorSlotKind::KINDS_NUMBER:
+ UNREACHABLE();
+ break;
+ }
- os << "\n [" << GetIndex(slot) << "]: " << Brief(Get(slot));
- os << "\n [" << (GetIndex(slot) + 1)
- << "]: " << Brief(get(GetIndex(slot) + 1));
+ int entry_size = iter.entry_size();
+ for (int i = 0; i < entry_size; i++) {
+ int index = GetIndex(slot) + i;
+ os << "\n [" << index << "]: " << Brief(get(index));
}
}
os << "\n";
@@ -614,20 +647,21 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "ValueObject");
- value()->Print(os);
+ JSObjectPrintHeader(os, this, "JSValue");
+ os << "\n - value = " << Brief(value());
+ JSObjectPrintBody(os, this);
}
void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSMessageObject");
- os << " - type: " << type();
+ JSObjectPrintHeader(os, this, "JSMessageObject");
+ os << "\n - type: " << type();
os << "\n - arguments: " << Brief(argument());
os << "\n - start_position: " << start_position();
os << "\n - end_position: " << end_position();
os << "\n - script: " << Brief(script());
os << "\n - stack_frames: " << Brief(stack_frames());
- os << "\n";
+ JSObjectPrintBody(os, this);
}
@@ -675,17 +709,15 @@ static const char* const weekdays[] = {
void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSDate");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - value = ";
- value()->Print(os);
+ JSObjectPrintHeader(os, this, "JSDate");
+ os << "\n - value = " << Brief(value());
if (!year()->IsSmi()) {
- os << " - time = NaN\n";
+ os << "\n - time = NaN\n";
} else {
// TODO(svenpanne) Add some basic formatting to our streams.
ScopedVector<char> buf(100);
SNPrintF(
- buf, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
+ buf, "\n - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
year()->IsSmi() ? Smi::cast(year())->value() : -1,
month()->IsSmi() ? Smi::cast(month())->value() : -1,
@@ -695,6 +727,7 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
sec()->IsSmi() ? Smi::cast(sec())->value() : -1);
os << buf.start();
}
+ JSObjectPrintBody(os, this);
}
@@ -723,18 +756,16 @@ void JSFunctionProxy::JSFunctionProxyPrint(std::ostream& os) { // NOLINT
void JSSet::JSSetPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSSet");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+ JSObjectPrintHeader(os, this, "JSSet");
os << " - table = " << Brief(table());
- os << "\n";
+ JSObjectPrintBody(os, this);
}
void JSMap::JSMapPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSMap");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+ JSObjectPrintHeader(os, this, "JSMap");
os << " - table = " << Brief(table());
- os << "\n";
+ JSObjectPrintBody(os, this);
}
@@ -742,8 +773,7 @@ template <class Derived, class TableType>
void
OrderedHashTableIterator<Derived, TableType>::OrderedHashTableIteratorPrint(
std::ostream& os) { // NOLINT
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - table = " << Brief(table());
+ os << "\n - table = " << Brief(table());
os << "\n - index = " << Brief(index());
os << "\n - kind = " << Brief(kind());
os << "\n";
@@ -761,83 +791,78 @@ template void OrderedHashTableIterator<
void JSSetIterator::JSSetIteratorPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSSetIterator");
+ JSObjectPrintHeader(os, this, "JSSetIterator");
OrderedHashTableIteratorPrint(os);
}
void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSMapIterator");
+ JSObjectPrintHeader(os, this, "JSMapIterator");
OrderedHashTableIteratorPrint(os);
}
void JSIteratorResult::JSIteratorResultPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSIteratorResult");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - done = " << Brief(done()) << "\n";
- os << " - value = " << Brief(value()) << "\n";
+ JSObjectPrintHeader(os, this, "JSIteratorResult");
+ os << "\n - done = " << Brief(done());
+ os << "\n - value = " << Brief(value());
os << "\n";
}
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSWeakMap");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - table = " << Brief(table());
- os << "\n";
+ JSObjectPrintHeader(os, this, "JSWeakMap");
+ os << "\n - table = " << Brief(table());
+ JSObjectPrintBody(os, this);
}
void JSWeakSet::JSWeakSetPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSWeakSet");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - table = " << Brief(table());
- os << "\n";
+ JSObjectPrintHeader(os, this, "JSWeakSet");
+ os << "\n - table = " << Brief(table());
+ JSObjectPrintBody(os, this);
}
void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSArrayBuffer");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - backing_store = " << backing_store() << "\n";
- os << " - byte_length = " << Brief(byte_length());
+ JSObjectPrintHeader(os, this, "JSArrayBuffer");
+ os << "\n - backing_store = " << backing_store();
+ os << "\n - byte_length = " << Brief(byte_length());
if (was_neutered()) os << " - neutered\n";
- os << "\n";
+ JSObjectPrintBody(os, this, !was_neutered());
}
void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSTypedArray");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - buffer = " << Brief(buffer());
+ JSObjectPrintHeader(os, this, "JSTypedArray");
+ os << "\n - buffer = " << Brief(buffer());
os << "\n - byte_offset = " << Brief(byte_offset());
os << "\n - byte_length = " << Brief(byte_length());
os << "\n - length = " << Brief(length());
if (WasNeutered()) os << " - neutered\n";
- os << "\n";
- if (!WasNeutered()) PrintElements(os);
+ JSObjectPrintBody(os, this, !WasNeutered());
}
void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSDataView");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - buffer =" << Brief(buffer());
+ JSObjectPrintHeader(os, this, "JSDataView");
+ os << "\n - buffer =" << Brief(buffer());
os << "\n - byte_offset = " << Brief(byte_offset());
os << "\n - byte_length = " << Brief(byte_length());
if (WasNeutered()) os << " - neutered\n";
- os << "\n";
+ JSObjectPrintBody(os, this, !WasNeutered());
}
void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Function");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - initial_map = ";
+ JSObjectPrintHeader(os, this, "Function");
+ os << "\n - initial_map = ";
if (has_initial_map()) os << Brief(initial_map());
os << "\n - shared_info = " << Brief(shared());
os << "\n - name = " << Brief(shared()->name());
+ if (shared()->is_generator()) {
+ os << "\n - generator";
+ }
os << "\n - context = " << Brief(context());
if (shared()->bound()) {
os << "\n - bindings = " << Brief(function_bindings());
@@ -845,10 +870,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - literals = " << Brief(literals());
}
os << "\n - code = " << Brief(code());
- os << "\n";
- PrintProperties(os);
- PrintElements(os);
- os << "\n";
+ JSObjectPrintBody(os, this);
}
@@ -904,19 +926,18 @@ void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
}
-void JSBuiltinsObject::JSBuiltinsObjectPrint(std::ostream& os) { // NOLINT
- os << "builtins ";
- JSObjectPrint(os);
-}
-
-
void Cell::CellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Cell");
+ os << " - value: " << Brief(value());
+ os << "\n";
}
void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyCell");
+ os << " - value: " << Brief(value());
+ os << "\n - details: " << property_details();
+ os << "\n";
}
@@ -927,6 +948,7 @@ void WeakCell::WeakCellPrint(std::ostream& os) { // NOLINT
} else {
os << "\n - value: " << Brief(value());
}
+ os << "\n";
}
@@ -942,6 +964,7 @@ void Code::CodePrint(std::ostream& os) { // NOLINT
void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
os << "foreign address : " << foreign_address();
+ os << "\n";
}
@@ -995,6 +1018,7 @@ void AccessCheckInfo::AccessCheckInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AccessCheckInfo");
os << "\n - named_callback: " << Brief(named_callback());
os << "\n - indexed_callback: " << Brief(indexed_callback());
+ os << "\n - callback: " << Brief(callback());
os << "\n - data: " << Brief(data());
os << "\n";
}
@@ -1237,11 +1261,11 @@ void DescriptorArray::Print() {
void DescriptorArray::PrintDescriptors(std::ostream& os) { // NOLINT
HandleScope scope(GetIsolate());
- os << "Descriptor array " << number_of_descriptors() << "\n";
+ os << "Descriptor array #" << number_of_descriptors();
for (int i = 0; i < number_of_descriptors(); i++) {
Descriptor desc;
Get(i, &desc);
- os << " " << i << ": " << desc << "\n";
+ os << "\n " << i << ": " << desc;
}
os << "\n";
}
@@ -1250,7 +1274,7 @@ void DescriptorArray::PrintDescriptors(std::ostream& os) { // NOLINT
void TransitionArray::Print() {
OFStream os(stdout);
TransitionArray::PrintTransitions(os, this);
- os << std::flush;
+ os << "\n" << std::flush;
}
@@ -1258,12 +1282,12 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
bool print_header) { // NOLINT
int num_transitions = NumberOfTransitions(transitions);
if (print_header) {
- os << "Transition array " << num_transitions << "\n";
+ os << "Transition array #" << num_transitions << ":";
}
for (int i = 0; i < num_transitions; i++) {
Name* key = GetKey(transitions, i);
Map* target = GetTarget(transitions, i);
- os << " ";
+ os << "\n ";
#ifdef OBJECT_PRINT
key->NamePrint(os);
#else
@@ -1272,19 +1296,19 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
os << ": ";
Heap* heap = key->GetHeap();
if (key == heap->nonextensible_symbol()) {
- os << " (transition to non-extensible)";
+ os << "(transition to non-extensible)";
} else if (key == heap->sealed_symbol()) {
- os << " (transition to sealed)";
+ os << "(transition to sealed)";
} else if (key == heap->frozen_symbol()) {
- os << " (transition to frozen)";
+ os << "(transition to frozen)";
} else if (key == heap->elements_transition_symbol()) {
- os << " (transition to " << ElementsKindToString(target->elements_kind())
+ os << "(transition to " << ElementsKindToString(target->elements_kind())
<< ")";
} else if (key == heap->observed_symbol()) {
os << " (transition to Object.observe)";
} else {
PropertyDetails details = GetTargetDetails(key, target);
- os << " (transition to ";
+ os << "(transition to ";
if (details.location() == kDescriptor) {
os << "immutable ";
}
@@ -1296,13 +1320,17 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
}
os << "), attrs: " << details.attributes();
}
- os << " -> " << Brief(target) << "\n";
+ os << " -> " << Brief(target);
}
}
void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
- TransitionArray::PrintTransitions(os, map()->raw_transitions());
+ Object* transitions = map()->raw_transitions();
+ int num_transitions = TransitionArray::NumberOfTransitions(transitions);
+ if (num_transitions == 0) return;
+ os << "\n - transitions";
+ TransitionArray::PrintTransitions(os, transitions, false);
}
#endif // defined(DEBUG) || defined(OBJECT_PRINT)
} // namespace internal
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 9b2e1d0b7e..7f5ce5a091 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -24,19 +24,21 @@
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/execution.h"
-#include "src/field-index-inl.h"
#include "src/field-index.h"
+#include "src/field-index-inl.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/hydrogen.h"
#include "src/ic/ic.h"
#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
+#include "src/key-accumulator.h"
+#include "src/list.h"
#include "src/log.h"
#include "src/lookup.h"
#include "src/macro-assembler.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
+#include "src/property-descriptor.h"
#include "src/prototype.h"
#include "src/safepoint-table.h"
#include "src/string-builder.h"
@@ -333,21 +335,37 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
return Just(false);
}
} else if (x->IsSymbol()) {
- return Just(x.is_identical_to(y));
+ if (y->IsSymbol()) {
+ return Just(x.is_identical_to(y));
+ } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(false);
+ }
} else if (x->IsSimd128Value()) {
- if (!y->IsSimd128Value()) return Just(false);
- return Just(Simd128Value::Equals(Handle<Simd128Value>::cast(x),
- Handle<Simd128Value>::cast(y)));
+ if (y->IsSimd128Value()) {
+ return Just(Simd128Value::Equals(Handle<Simd128Value>::cast(x),
+ Handle<Simd128Value>::cast(y)));
+ } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(false);
+ }
} else if (x->IsJSReceiver() && !x->IsUndetectableObject()) {
if (y->IsJSReceiver()) {
return Just(x.is_identical_to(y));
- } else if (y->IsNull() || y->IsSimd128Value() || y->IsSymbol() ||
- y->IsUndefined()) {
+ } else if (y->IsNull() || y->IsUndefined()) {
return Just(false);
} else if (y->IsBoolean()) {
y = Oddball::ToNumber(Handle<Oddball>::cast(y));
- }
- if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x)).ToHandle(&x)) {
+ } else if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x))
+ .ToHandle(&x)) {
return Nothing<bool>();
}
} else {
@@ -381,7 +399,9 @@ Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
return isolate->factory()->undefined_string();
}
if (object->IsBoolean()) return isolate->factory()->boolean_string();
+ if (object->IsString()) return isolate->factory()->string_string();
if (object->IsSymbol()) return isolate->factory()->symbol_string();
+ if (object->IsString()) return isolate->factory()->string_string();
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
if (object->Is##Type()) return isolate->factory()->type##_string();
SIMD128_TYPES(SIMD128_TYPE)
@@ -622,6 +642,44 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
}
+// static
+Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ // Call the "has" trap on proxies.
+ return JSProxy::HasPropertyWithHandler(it->GetHolder<JSProxy>(),
+ it->GetName());
+ case LookupIterator::INTERCEPTOR: {
+ Maybe<PropertyAttributes> result =
+ JSObject::GetPropertyAttributesWithInterceptor(it);
+ if (!result.IsJust()) return Nothing<bool>();
+ if (result.FromJust() != ABSENT) return Just(true);
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK: {
+ if (it->HasAccess()) break;
+ Maybe<PropertyAttributes> result =
+ JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
+ if (!result.IsJust()) return Nothing<bool>();
+ return Just(result.FromJust() != ABSENT);
+ }
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ // TypedArray out-of-bounds access.
+ return Just(false);
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::DATA:
+ return Just(true);
+ }
+ }
+ return Just(false);
+}
+
+
+// static
MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
LanguageMode language_mode) {
for (; it->IsFound(); it->Next()) {
@@ -712,15 +770,16 @@ bool Object::ToInt32(int32_t* value) {
bool Object::ToUint32(uint32_t* value) {
if (IsSmi()) {
int num = Smi::cast(this)->value();
- if (num >= 0) {
- *value = static_cast<uint32_t>(num);
- return true;
- }
+ if (num < 0) return false;
+ *value = static_cast<uint32_t>(num);
+ return true;
}
if (IsHeapNumber()) {
double num = HeapNumber::cast(this)->value();
- if (num >= 0 && FastUI2D(FastD2UI(num)) == num) {
- *value = FastD2UI(num);
+ if (num < 0) return false;
+ uint32_t uint_value = FastD2UI(num);
+ if (FastUI2D(uint_value) == num) {
+ *value = uint_value;
return true;
}
}
@@ -862,8 +921,9 @@ bool AccessorInfo::IsCompatibleReceiverMap(Isolate* isolate,
}
-MaybeHandle<Object> Object::SetPropertyWithAccessor(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode) {
+Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
Isolate* isolate = it->isolate();
Handle<Object> structure = it->GetAccessors();
Handle<Object> receiver = it->GetReceiver();
@@ -879,21 +939,24 @@ MaybeHandle<Object> Object::SetPropertyWithAccessor(
Handle<ExecutableAccessorInfo> info =
Handle<ExecutableAccessorInfo>::cast(structure);
if (!info->IsCompatibleReceiver(*receiver)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- name, receiver),
- Object);
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, name, receiver));
+ return Nothing<bool>();
}
v8::AccessorNameSetterCallback call_fun =
v8::ToCData<v8::AccessorNameSetterCallback>(info->setter());
- if (call_fun == nullptr) return value;
+ if (call_fun == nullptr) return Just(true);
+ // TODO(verwaest): Shouldn't this case be unreachable (at least in the
+ // long run?) Should we have ExecutableAccessorPairs with missing setter
+ // that are "writable"? If they aren't writable, shouldn't we have bailed
+ // out already earlier?
LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
args.Call(call_fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return value;
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(true);
}
// Regular accessor.
@@ -901,15 +964,12 @@ MaybeHandle<Object> Object::SetPropertyWithAccessor(
if (setter->IsCallable()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(
- receiver, Handle<JSReceiver>::cast(setter), value);
+ receiver, Handle<JSReceiver>::cast(setter), value, should_throw);
}
- if (is_sloppy(language_mode)) return value;
-
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kNoSetterInCallback,
- it->GetName(), it->GetHolder<JSObject>()),
- Object);
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoSetterInCallback,
+ it->GetName(), it->GetHolder<JSObject>()));
}
@@ -941,10 +1001,10 @@ MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
}
-MaybeHandle<Object> Object::SetPropertyWithDefinedSetter(
- Handle<Object> receiver,
- Handle<JSReceiver> setter,
- Handle<Object> value) {
+Maybe<bool> Object::SetPropertyWithDefinedSetter(Handle<Object> receiver,
+ Handle<JSReceiver> setter,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
Isolate* isolate = setter->GetIsolate();
Debug* debug = isolate->debug();
@@ -953,10 +1013,10 @@ MaybeHandle<Object> Object::SetPropertyWithDefinedSetter(
if (debug->is_active()) debug->HandleStepIn(setter, false);
Handle<Object> argv[] = { value };
- RETURN_ON_EXCEPTION(isolate, Execution::Call(isolate, setter, receiver,
- arraysize(argv), argv),
- Object);
- return value;
+ RETURN_ON_EXCEPTION_VALUE(isolate, Execution::Call(isolate, setter, receiver,
+ arraysize(argv), argv),
+ Nothing<bool>());
+ return Just(true);
}
@@ -994,6 +1054,14 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
GetPropertyWithInterceptor(it, &done), Object);
if (done) return result;
}
+
+ // Cross-Origin [[Get]] of Well-Known Symbols does not throw, and returns
+ // undefined.
+ Handle<Name> name = it->GetName();
+ if (name->IsSymbol() && Symbol::cast(*name)->is_well_known_symbol()) {
+ return it->factory()->undefined_value();
+ }
+
it->isolate()->ReportFailedAccessCheck(checked);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
return it->factory()->undefined_value();
@@ -1033,17 +1101,16 @@ bool JSObject::AllCanWrite(LookupIterator* it) {
}
-MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value) {
+Maybe<bool> JSObject::SetPropertyWithFailedAccessCheck(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw) {
Handle<JSObject> checked = it->GetHolder<JSObject>();
if (AllCanWrite(it)) {
- // The supplied language-mode is ignored by SetPropertyWithAccessor.
- return SetPropertyWithAccessor(it, value, SLOPPY);
+ return SetPropertyWithAccessor(it, value, should_throw);
}
it->isolate()->ReportFailedAccessCheck(checked);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
- return value;
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(true);
}
@@ -1057,7 +1124,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<String>::cast(name));
}
- if (object->IsGlobalObject()) {
+ if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> property_dictionary(object->global_dictionary());
int entry = property_dictionary->FindEntry(name);
@@ -1661,6 +1728,20 @@ void JSObject::PrintElementsTransition(
}
+// static
+MaybeHandle<JSFunction> Map::GetConstructorFunction(
+ Handle<Map> map, Handle<Context> native_context) {
+ if (map->IsPrimitiveMap()) {
+ int const constructor_function_index = map->GetConstructorFunctionIndex();
+ if (constructor_function_index != kNoConstructorFunctionIndex) {
+ return handle(
+ JSFunction::cast(native_context->get(constructor_function_index)));
+ }
+ }
+ return MaybeHandle<JSFunction>();
+}
+
+
void Map::PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
PropertyAttributes attributes) {
OFStream os(file);
@@ -1797,7 +1878,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<BytecodeArray[" << BytecodeArray::cast(this)->length() << "]>";
break;
case FREE_SPACE_TYPE:
- os << "<FreeSpace[" << FreeSpace::cast(this)->Size() << "]>";
+ os << "<FreeSpace[" << FreeSpace::cast(this)->size() << "]>";
break;
#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size) \
case FIXED_##TYPE##_ARRAY_TYPE: \
@@ -1900,7 +1981,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
StringStream accumulator(&allocator);
PropertyCell* cell = PropertyCell::cast(this);
cell->value()->ShortPrint(&accumulator);
- os << accumulator.ToCString().get() << " " << cell->property_details();
+ os << accumulator.ToCString().get();
break;
}
case WEAK_CELL_TYPE: {
@@ -2153,7 +2234,7 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
PropertyAttributes attributes) {
DCHECK(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
- if (object->IsGlobalObject()) {
+ if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> dict(object->global_dictionary());
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
int entry = dict->FindEntry(name);
@@ -2659,20 +2740,21 @@ bool Map::DeprecateTarget(PropertyKind kind, Name* key,
transition_target_deprecated = true;
}
- // Don't overwrite the empty descriptor array.
- if (NumberOfOwnDescriptors() == 0) return transition_target_deprecated;
+ // Don't overwrite the empty descriptor array or initial map's descriptors.
+ if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined()) {
+ return transition_target_deprecated;
+ }
DescriptorArray* to_replace = instance_descriptors();
- Map* current = this;
GetHeap()->incremental_marking()->RecordWrites(to_replace);
+ Map* current = this;
while (current->instance_descriptors() == to_replace) {
+ Object* next = current->GetBackPointer();
+ if (next->IsUndefined()) break; // Stop overwriting at initial map.
current->SetEnumLength(kInvalidEnumCacheSentinel);
current->UpdateDescriptors(new_descriptors, new_layout_descriptor);
- Object* next = current->GetBackPointer();
- if (next->IsUndefined()) break;
current = Map::cast(next);
}
-
set_owns_descriptors(false);
return transition_target_deprecated;
}
@@ -2682,7 +2764,14 @@ Map* Map::FindRootMap() {
Map* result = this;
while (true) {
Object* back = result->GetBackPointer();
- if (back->IsUndefined()) return result;
+ if (back->IsUndefined()) {
+ // Initial map always owns descriptors and doesn't have unused entries
+ // in the descriptor array.
+ DCHECK(result->owns_descriptors());
+ DCHECK_EQ(result->NumberOfOwnDescriptors(),
+ result->instance_descriptors()->number_of_descriptors());
+ return result;
+ }
result = Map::cast(back);
}
}
@@ -3508,8 +3597,8 @@ Handle<Map> Map::Update(Handle<Map> map) {
}
-MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
- Handle<Object> value) {
+Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
+ Handle<Object> value) {
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -3517,7 +3606,7 @@ MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (interceptor->setter()->IsUndefined()) return MaybeHandle<Object>();
+ if (interceptor->setter()->IsUndefined()) return Just(false);
Handle<JSObject> holder = it->GetHolder<JSObject>();
v8::Local<v8::Value> result;
@@ -3535,7 +3624,7 @@ MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
Handle<Name> name = it->name();
if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
- return MaybeHandle<Object>();
+ return Just(false);
}
v8::GenericNamedPropertySetterCallback setter =
@@ -3547,13 +3636,13 @@ MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
args.Call(setter, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
}
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
- if (result.IsEmpty()) return MaybeHandle<Object>();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ if (result.IsEmpty()) return Just(false);
#ifdef DEBUG
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
#endif
- return value;
+ return Just(true);
}
@@ -3562,15 +3651,19 @@ MaybeHandle<Object> Object::SetProperty(Handle<Object> object,
LanguageMode language_mode,
StoreFromKeyed store_mode) {
LookupIterator it(object, name);
- return SetProperty(&it, value, language_mode, store_mode);
+ MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_mode));
+ return value;
}
-MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
- Handle<Object> value,
- LanguageMode language_mode,
- StoreFromKeyed store_mode,
- bool* found) {
+Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
+ Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode,
+ bool* found) {
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc(it->isolate());
@@ -3587,20 +3680,21 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
if (it->HasAccess()) break;
// Check whether it makes sense to reuse the lookup iterator. Here it
// might still call into setters up the prototype chain.
- return JSObject::SetPropertyWithFailedAccessCheck(it, value);
+ return JSObject::SetPropertyWithFailedAccessCheck(it, value,
+ should_throw);
case LookupIterator::JSPROXY:
if (it->HolderIsReceiverOrHiddenPrototype()) {
return JSProxy::SetPropertyWithHandler(
it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName(), value,
- language_mode);
+ should_throw);
} else {
// TODO(verwaest): Use the MaybeHandle to indicate result.
bool has_result = false;
- MaybeHandle<Object> maybe_result =
+ Maybe<bool> maybe_result =
JSProxy::SetPropertyViaPrototypesWithHandler(
it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName(),
- value, language_mode, &has_result);
+ value, should_throw, &has_result);
if (has_result) return maybe_result;
done = true;
}
@@ -3608,24 +3702,22 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
case LookupIterator::INTERCEPTOR:
if (it->HolderIsReceiverOrHiddenPrototype()) {
- MaybeHandle<Object> maybe_result =
- JSObject::SetPropertyWithInterceptor(it, value);
- if (!maybe_result.is_null()) return maybe_result;
- if (it->isolate()->has_pending_exception()) return maybe_result;
+ Maybe<bool> result = JSObject::SetPropertyWithInterceptor(it, value);
+ if (result.IsNothing() || result.FromJust()) return result;
} else {
Maybe<PropertyAttributes> maybe_attributes =
JSObject::GetPropertyAttributesWithInterceptor(it);
- if (!maybe_attributes.IsJust()) return MaybeHandle<Object>();
+ if (!maybe_attributes.IsJust()) return Nothing<bool>();
done = maybe_attributes.FromJust() != ABSENT;
if (done && (maybe_attributes.FromJust() & READ_ONLY) != 0) {
- return WriteToReadOnlyProperty(it, value, language_mode);
+ return WriteToReadOnlyProperty(it, value, should_throw);
}
}
break;
case LookupIterator::ACCESSOR: {
if (it->IsReadOnly()) {
- return WriteToReadOnlyProperty(it, value, language_mode);
+ return WriteToReadOnlyProperty(it, value, should_throw);
}
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo() &&
@@ -3634,15 +3726,15 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
done = true;
break;
}
- return SetPropertyWithAccessor(it, value, language_mode);
+ return SetPropertyWithAccessor(it, value, should_throw);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
// TODO(verwaest): We should throw an exception.
- return value;
+ return Just(true);
case LookupIterator::DATA:
if (it->IsReadOnly()) {
- return WriteToReadOnlyProperty(it, value, language_mode);
+ return WriteToReadOnlyProperty(it, value, should_throw);
}
if (it->HolderIsReceiverOrHiddenPrototype()) {
return SetDataProperty(it, value);
@@ -3660,42 +3752,47 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
// If the receiver is the JSGlobalObject, the store was contextual. In case
// the property did not exist yet on the global object itself, we have to
- // throw a reference error in strict mode.
+ // throw a reference error in strict mode. In sloppy mode, we continue.
if (it->GetReceiver()->IsJSGlobalObject() && is_strict(language_mode)) {
- THROW_NEW_ERROR(it->isolate(),
- NewReferenceError(MessageTemplate::kNotDefined, it->name()),
- Object);
+ it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
+ MessageTemplate::kNotDefined, it->name()));
+ return Nothing<bool>();
}
*found = false;
- return MaybeHandle<Object>();
+ return Nothing<bool>();
}
-MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
- Handle<Object> value,
- LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode) {
bool found = false;
- MaybeHandle<Object> result =
+ Maybe<bool> result =
SetPropertyInternal(it, value, language_mode, store_mode, &found);
if (found) return result;
- return AddDataProperty(it, value, NONE, language_mode, store_mode);
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ return AddDataProperty(it, value, NONE, should_throw, store_mode);
}
-MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
- Handle<Object> value,
- LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode) {
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+
bool found = false;
- MaybeHandle<Object> result =
+ Maybe<bool> result =
SetPropertyInternal(it, value, language_mode, store_mode, &found);
if (found) return result;
+ // The property either doesn't exist on the holder or exists there as a data
+ // property.
+
if (!it->GetReceiver()->IsJSReceiver()) {
- return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(),
- it->GetName(), value, language_mode);
+ return WriteToReadOnlyProperty(it, value, should_throw);
}
LookupIterator::Configuration c = LookupIterator::OWN;
@@ -3708,38 +3805,32 @@ MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
switch (own_lookup.state()) {
case LookupIterator::ACCESS_CHECK:
if (!own_lookup.HasAccess()) {
- return JSObject::SetPropertyWithFailedAccessCheck(&own_lookup, value);
+ return JSObject::SetPropertyWithFailedAccessCheck(&own_lookup, value,
+ should_throw);
}
break;
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
- value, language_mode);
+ return RedefineIncompatibleProperty(it->isolate(), it->GetName(), value,
+ should_throw);
case LookupIterator::DATA: {
PropertyDetails details = own_lookup.property_details();
- if (details.IsConfigurable() || !details.IsReadOnly()) {
- return JSObject::DefineOwnPropertyIgnoreAttributes(
- &own_lookup, value, details.attributes());
+ if (details.IsReadOnly()) {
+ return WriteToReadOnlyProperty(&own_lookup, value, should_throw);
}
- return WriteToReadOnlyProperty(&own_lookup, value, language_mode);
+ return SetDataProperty(&own_lookup, value);
}
case LookupIterator::ACCESSOR: {
- PropertyDetails details = own_lookup.property_details();
- if (details.IsConfigurable()) {
- return JSObject::DefineOwnPropertyIgnoreAttributes(
- &own_lookup, value, details.attributes());
- }
-
- return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
- value, language_mode);
+ return RedefineIncompatibleProperty(it->isolate(), it->GetName(), value,
+ should_throw);
}
case LookupIterator::INTERCEPTOR:
case LookupIterator::JSPROXY: {
bool found = false;
- MaybeHandle<Object> result = SetPropertyInternal(
+ Maybe<bool> result = SetPropertyInternal(
&own_lookup, value, language_mode, store_mode, &found);
if (found) return result;
break;
@@ -3751,7 +3842,7 @@ MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
}
}
- return JSObject::AddDataProperty(&own_lookup, value, NONE, language_mode,
+ return JSObject::AddDataProperty(&own_lookup, value, NONE, should_throw,
store_mode);
}
@@ -3781,36 +3872,47 @@ MaybeHandle<Object> Object::ReadAbsentProperty(Isolate* isolate,
}
-MaybeHandle<Object> Object::WriteToReadOnlyProperty(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode) {
+Maybe<bool> Object::CannotCreateProperty(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> name,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrictCannotCreateProperty, name,
+ Object::TypeOf(isolate, receiver), receiver));
+}
+
+
+Maybe<bool> Object::WriteToReadOnlyProperty(LookupIterator* it,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(),
- it->GetName(), value, language_mode);
+ it->GetName(), value, should_throw);
}
-MaybeHandle<Object> Object::WriteToReadOnlyProperty(
- Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
- Handle<Object> value, LanguageMode language_mode) {
- if (is_sloppy(language_mode)) return value;
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kStrictReadOnlyProperty, name, receiver),
- Object);
+Maybe<bool> Object::WriteToReadOnlyProperty(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> name,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrictReadOnlyProperty, name,
+ Object::TypeOf(isolate, receiver), receiver));
}
-MaybeHandle<Object> Object::RedefineNonconfigurableProperty(
- Isolate* isolate, Handle<Object> name, Handle<Object> value,
- LanguageMode language_mode) {
- if (is_sloppy(language_mode)) return value;
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kRedefineDisallowed, name),
- Object);
+Maybe<bool> Object::RedefineIncompatibleProperty(Isolate* isolate,
+ Handle<Object> name,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed, name));
}
-MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
- Handle<Object> value) {
+Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
// Proxies are handled on the WithHandler path. Other non-JSObjects cannot
// have own properties.
Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
@@ -3831,8 +3933,8 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
// Convert the incoming value to a number for storing into typed arrays.
if (it->IsElement() && receiver->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(it->isolate(), to_assign,
- Object::ToNumber(value), Object);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(), to_assign, Object::ToNumber(value), Nothing<bool>());
// ToNumber above might modify the receiver, causing the cached
// holder_map to mismatch the actual holder->map() after this point.
// Reload the map to be in consistent state. Other cached state cannot
@@ -3843,7 +3945,8 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
// We have to recheck the length. However, it can only change if the
// underlying buffer was neutered, so just check that.
if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
- return value;
+ return Just(true);
+ // TODO(neis): According to the spec, this should throw a TypeError.
}
}
}
@@ -3857,10 +3960,11 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
// Send the change record if there are observers.
if (is_observed && !value->SameValue(*maybe_old.ToHandleChecked())) {
- RETURN_ON_EXCEPTION(it->isolate(), JSObject::EnqueueChangeRecord(
- receiver, "update", it->GetName(),
- maybe_old.ToHandleChecked()),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(),
+ JSObject::EnqueueChangeRecord(receiver, "update", it->GetName(),
+ maybe_old.ToHandleChecked()),
+ Nothing<bool>());
}
#if VERIFY_HEAP
@@ -3868,7 +3972,7 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
receiver->JSObjectVerify();
}
#endif
- return value;
+ return Just(true);
}
@@ -3913,15 +4017,14 @@ MUST_USE_RESULT static MaybeHandle<Object> EnqueueSpliceRecord(
}
-MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
- Handle<Object> value,
- PropertyAttributes attributes,
- LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
+ PropertyAttributes attributes,
+ ShouldThrow should_throw,
+ StoreFromKeyed store_mode) {
DCHECK(!it->GetReceiver()->IsJSProxy());
if (!it->GetReceiver()->IsJSObject()) {
- // TODO(verwaest): Throw a TypeError with a more specific message.
- return WriteToReadOnlyProperty(it, value, language_mode);
+ return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
+ value, should_throw);
}
DCHECK_NE(LookupIterator::INTEGER_INDEXED_EXOTIC, it->state());
@@ -3930,24 +4033,25 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
// If the receiver is a JSGlobalProxy, store on the prototype (JSGlobalObject)
// instead. If the prototype is Null, the proxy is detached.
- if (receiver->IsJSGlobalProxy()) return value;
+ if (receiver->IsJSGlobalProxy()) return Just(true);
Isolate* isolate = it->isolate();
if (!receiver->map()->is_extensible() &&
(it->IsElement() || !isolate->IsInternallyUsedPropertyName(it->name()))) {
- if (is_sloppy(language_mode)) return value;
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kObjectNotExtensible,
- it->GetName()),
- Object);
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kObjectNotExtensible, it->GetName()));
}
if (it->IsElement()) {
if (receiver->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
if (JSArray::WouldChangeReadOnlyLength(array, it->index())) {
- if (is_sloppy(language_mode)) return value;
- return JSArray::ReadOnlyLengthError(array);
+ RETURN_FAILURE(array->GetIsolate(), should_throw,
+ NewTypeError(MessageTemplate::kStrictReadOnlyProperty,
+ isolate->factory()->length_string(),
+ Object::TypeOf(isolate, array), array));
}
if (FLAG_trace_external_array_abuse &&
@@ -3960,8 +4064,8 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
}
}
- MaybeHandle<Object> result =
- JSObject::AddDataElement(receiver, it->index(), value, attributes);
+ Maybe<bool> result = JSObject::AddDataElement(receiver, it->index(), value,
+ attributes, should_throw);
JSObject::ValidateElements(receiver);
return result;
} else {
@@ -3985,10 +4089,10 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
// Send the change record if there are observers.
if (receiver->map()->is_observed() &&
!isolate->IsInternallyUsedPropertyName(it->name())) {
- RETURN_ON_EXCEPTION(isolate, JSObject::EnqueueChangeRecord(
- receiver, "add", it->name(),
- it->factory()->the_hole_value()),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(isolate, JSObject::EnqueueChangeRecord(
+ receiver, "add", it->name(),
+ it->factory()->the_hole_value()),
+ Nothing<bool>());
}
#if VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -3997,7 +4101,7 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
#endif
}
- return value;
+ return Just(true);
}
@@ -4033,15 +4137,13 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
// Replace descriptors by new_descriptors in all maps that share it.
map->GetHeap()->incremental_marking()->RecordWrites(*descriptors);
- Map* walk_map;
- for (Object* current = map->GetBackPointer();
- !current->IsUndefined();
- current = walk_map->GetBackPointer()) {
- walk_map = Map::cast(current);
- if (walk_map->instance_descriptors() != *descriptors) break;
- walk_map->UpdateDescriptors(*new_descriptors, layout_descriptor);
+ Map* current = *map;
+ while (current->instance_descriptors() == *descriptors) {
+ Object* next = current->GetBackPointer();
+ if (next->IsUndefined()) break; // Stop overwriting at initial map.
+ current->UpdateDescriptors(*new_descriptors, layout_descriptor);
+ current = Map::cast(next);
}
-
map->UpdateDescriptors(*new_descriptors, layout_descriptor);
}
@@ -4349,68 +4451,61 @@ Maybe<bool> JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy,
}
-MaybeHandle<Object> JSProxy::SetPropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode) {
+Maybe<bool> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return value;
+ if (name->IsSymbol()) return Just(true);
Handle<Object> args[] = { receiver, name, value };
- RETURN_ON_EXCEPTION(
- isolate,
- CallTrap(proxy,
- "set",
- isolate->derived_set_trap(),
- arraysize(args),
- args),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(isolate,
+ CallTrap(proxy, "set", isolate->derived_set_trap(),
+ arraysize(args), args),
+ Nothing<bool>());
- return value;
+ return Just(true);
+ // TODO(neis): This needs to be made spec-conformant by looking at the
+ // trap's result.
}
-MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
+Maybe<bool> JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode, bool* done) {
+ Handle<Object> value, ShouldThrow should_throw, bool* done) {
Isolate* isolate = proxy->GetIsolate();
Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) {
- *done = false;
- return isolate->factory()->the_hole_value();
+ *done = false; // Return value will be ignored.
+ return Nothing<bool>();
}
*done = true; // except where redefined...
Handle<Object> args[] = { name };
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- CallTrap(proxy,
- "getPropertyDescriptor",
- Handle<Object>(),
- arraysize(args),
- args),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, result, CallTrap(proxy, "getPropertyDescriptor",
+ Handle<Object>(), arraysize(args), args),
+ Nothing<bool>());
if (result->IsUndefined()) {
- *done = false;
- return isolate->factory()->the_hole_value();
+ *done = false; // Return value will be ignored.
+ return Nothing<bool>();
}
// Emulate [[GetProperty]] semantics for proxies.
Handle<Object> argv[] = { result };
Handle<Object> desc;
- ASSIGN_RETURN_ON_EXCEPTION(
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, desc,
- Execution::Call(isolate,
- isolate->to_complete_property_descriptor(),
- result,
- arraysize(argv),
- argv),
- Object);
+ Execution::Call(isolate, isolate->to_complete_property_descriptor(),
+ result, arraysize(argv), argv),
+ Nothing<bool>());
// [[GetProperty]] requires to check that all properties are configurable.
Handle<String> configurable_name =
@@ -4420,12 +4515,11 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
Object::GetProperty(desc, configurable_name).ToHandleChecked();
DCHECK(configurable->IsBoolean());
if (configurable->IsFalse()) {
- Handle<String> trap = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("getPropertyDescriptor"));
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kProxyPropNotConfigurable,
- handler, name, trap),
- Object);
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyPropNotConfigurable, handler, name,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "getPropertyDescriptor")));
+ return Nothing<bool>();
}
DCHECK(configurable->IsTrue());
@@ -4443,9 +4537,9 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
Object::GetProperty(desc, writable_name).ToHandleChecked();
DCHECK(writable->IsBoolean());
*done = writable->IsFalse();
- if (!*done) return isolate->factory()->the_hole_value();
+ if (!*done) return Nothing<bool>(); // Return value will be ignored.
return WriteToReadOnlyProperty(isolate, receiver, name, value,
- language_mode);
+ should_throw);
}
// We have an AccessorDescriptor.
@@ -4455,13 +4549,12 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
if (!setter->IsUndefined()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(
- receiver, Handle<JSReceiver>::cast(setter), value);
+ receiver, Handle<JSReceiver>::cast(setter), value, should_throw);
}
- if (is_sloppy(language_mode)) return value;
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kNoSetterInCallback, name, proxy),
- Object);
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoSetterInCallback, name, proxy));
}
@@ -4692,8 +4785,9 @@ void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
DCHECK(object->map()->is_extensible() ||
it.isolate()->IsInternallyUsedPropertyName(name));
#endif
- AddDataProperty(&it, value, attributes, STRICT,
- CERTAINLY_NOT_STORE_FROM_KEYED).Check();
+ CHECK(AddDataProperty(&it, value, attributes, THROW_ON_ERROR,
+ CERTAINLY_NOT_STORE_FROM_KEYED)
+ .IsJust());
}
@@ -4711,6 +4805,15 @@ void ExecutableAccessorInfo::ClearSetter(Handle<ExecutableAccessorInfo> info) {
MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ExecutableAccessorInfoHandling handling) {
+ MAYBE_RETURN_NULL(DefineOwnPropertyIgnoreAttributes(
+ it, value, attributes, THROW_ON_ERROR, handling));
+ return value;
+}
+
+
+Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ ShouldThrow should_throw, ExecutableAccessorInfoHandling handling) {
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
bool is_observed = object->map()->is_observed() &&
(it->IsElement() ||
@@ -4726,8 +4829,8 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
case LookupIterator::ACCESS_CHECK:
if (!it->HasAccess()) {
it->isolate()->ReportFailedAccessCheck(it->GetHolder<JSObject>());
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
- return value;
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(true);
}
break;
@@ -4741,10 +4844,8 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
// they throw. Here we should do the same.
case LookupIterator::INTERCEPTOR:
if (handling == DONT_FORCE_FIELD) {
- MaybeHandle<Object> maybe_result =
- JSObject::SetPropertyWithInterceptor(it, value);
- if (!maybe_result.is_null()) return maybe_result;
- if (it->isolate()->has_pending_exception()) return maybe_result;
+ Maybe<bool> result = JSObject::SetPropertyWithInterceptor(it, value);
+ if (result.IsNothing() || result.FromJust()) return result;
}
break;
@@ -4759,13 +4860,11 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
// Ensure the context isn't changed after calling into accessors.
AssertNoContextChange ncc(it->isolate());
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- it->isolate(), result,
- JSObject::SetPropertyWithAccessor(it, value, STRICT), Object);
- DCHECK(result->SameValue(*value));
+ Maybe<bool> result =
+ JSObject::SetPropertyWithAccessor(it, value, should_throw);
+ if (result.IsNothing() || !result.FromJust()) return result;
- if (details.attributes() == attributes) return value;
+ if (details.attributes() == attributes) return Just(true);
// Reconfigure the accessor if attributes mismatch.
Handle<ExecutableAccessorInfo> new_data = Accessors::CloneAccessor(
@@ -4784,18 +4883,18 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
}
if (is_observed) {
- RETURN_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION_VALUE(
it->isolate(),
EnqueueChangeRecord(object, "reconfigure", it->GetName(),
it->factory()->the_hole_value()),
- Object);
+ Nothing<bool>());
}
- return value;
+ return Just(true);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
- value, STRICT);
+ return RedefineIncompatibleProperty(it->isolate(), it->GetName(), value,
+ should_throw);
case LookupIterator::DATA: {
PropertyDetails details = it->property_details();
@@ -4808,8 +4907,8 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
// Special case: properties of typed arrays cannot be reconfigured to
// non-writable nor to non-enumerable.
if (it->IsElement() && object->HasFixedTypedArrayElements()) {
- return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
- value, STRICT);
+ return RedefineIncompatibleProperty(it->isolate(), it->GetName(),
+ value, should_throw);
}
// Reconfigure the data property if the attributes mismatch.
@@ -4821,17 +4920,17 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
if (old_value->SameValue(*value)) {
old_value = it->factory()->the_hole_value();
}
- RETURN_ON_EXCEPTION(it->isolate(),
- EnqueueChangeRecord(object, "reconfigure",
- it->GetName(), old_value),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(), EnqueueChangeRecord(object, "reconfigure",
+ it->GetName(), old_value),
+ Nothing<bool>());
}
- return value;
+ return Just(true);
}
}
}
- return AddDataProperty(it, value, attributes, STRICT,
+ return AddDataProperty(it, value, attributes, should_throw,
CERTAINLY_NOT_STORE_FROM_KEYED);
}
@@ -4873,7 +4972,8 @@ Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
if (it->IsFound()) {
if (!it->IsConfigurable()) return Just(false);
} else {
- if (!JSObject::cast(*it->GetReceiver())->IsExtensible()) return Just(false);
+ if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver())))
+ return Just(false);
}
RETURN_ON_EXCEPTION_VALUE(
@@ -5044,7 +5144,7 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
Handle<Map> new_map,
int expected_additional_properties) {
// The global object is always normalized.
- DCHECK(!object->IsGlobalObject());
+ DCHECK(!object->IsJSGlobalObject());
// JSGlobalProxy must never be normalized
DCHECK(!object->IsJSGlobalProxy());
@@ -5161,7 +5261,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
int unused_property_fields,
const char* reason) {
if (object->HasFastProperties()) return;
- DCHECK(!object->IsGlobalObject());
+ DCHECK(!object->IsJSGlobalObject());
Isolate* isolate = object->GetIsolate();
Factory* factory = isolate->factory();
Handle<NameDictionary> dictionary(object->property_dictionary());
@@ -5708,7 +5808,7 @@ void JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
DCHECK(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
- if (object->IsGlobalObject()) {
+ if (object->IsJSGlobalObject()) {
// If we have a global object, invalidate the cell and swap in a new one.
Handle<GlobalDictionary> dictionary(object->global_dictionary());
DCHECK_NE(GlobalDictionary::kNotFound, entry);
@@ -5819,7 +5919,7 @@ MaybeHandle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
Handle<Name> name,
LanguageMode language_mode) {
LookupIterator it(object, name, LookupIterator::HIDDEN);
- return JSObject::DeleteProperty(&it, language_mode);
+ return DeleteProperty(&it, language_mode);
}
@@ -5827,7 +5927,760 @@ MaybeHandle<Object> JSReceiver::DeletePropertyOrElement(
Handle<JSReceiver> object, Handle<Name> name, LanguageMode language_mode) {
LookupIterator it = LookupIterator::PropertyOrElement(
name->GetIsolate(), object, name, LookupIterator::HIDDEN);
- return JSObject::DeleteProperty(&it, language_mode);
+ return DeleteProperty(&it, language_mode);
+}
+
+
+// ES6 7.1.14
+MaybeHandle<Object> ToPropertyKey(Isolate* isolate, Handle<Object> value) {
+ // 1. Let key be ToPrimitive(argument, hint String).
+ MaybeHandle<Object> maybe_key =
+ Object::ToPrimitive(value, ToPrimitiveHint::kString);
+ // 2. ReturnIfAbrupt(key).
+ Handle<Object> key;
+ if (!maybe_key.ToHandle(&key)) return key;
+ // 3. If Type(key) is Symbol, then return key.
+ if (key->IsSymbol()) return key;
+ // 4. Return ToString(key).
+ // Extending spec'ed behavior, we'd be happy to return an element index.
+ if (key->IsSmi()) return key;
+ if (key->IsHeapNumber()) {
+ uint32_t uint_value;
+ if (value->ToArrayLength(&uint_value) &&
+ uint_value <= static_cast<uint32_t>(Smi::kMaxValue)) {
+ return handle(Smi::FromInt(static_cast<int>(uint_value)), isolate);
+ }
+ }
+ return Object::ToString(isolate, key);
+}
+
+
+// ES6 19.1.2.4
+// static
+Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> attributes) {
+ // 1. If Type(O) is not Object, throw a TypeError exception.
+ // TODO(jkummerow): Implement Proxy support, change to "IsSpecObject".
+ if (!object->IsJSObject()) {
+ Handle<String> fun_name =
+ isolate->factory()->InternalizeUtf8String("Object.defineProperty");
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name));
+ }
+ // 2. Let key be ToPropertyKey(P).
+ // 3. ReturnIfAbrupt(key).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key, ToPropertyKey(isolate, key));
+ // 4. Let desc be ToPropertyDescriptor(Attributes).
+ // 5. ReturnIfAbrupt(desc).
+ PropertyDescriptor desc;
+ if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
+ return isolate->heap()->exception();
+ }
+ // 6. Let success be DefinePropertyOrThrow(O,key, desc).
+ bool success = DefineOwnProperty(isolate, Handle<JSObject>::cast(object), key,
+ &desc, THROW_ON_ERROR);
+ // 7. ReturnIfAbrupt(success).
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ CHECK(success == true);
+ // 8. Return O.
+ return *object;
+}
+
+
+// ES6 19.1.2.3.1
+// static
+Object* JSReceiver::DefineProperties(Isolate* isolate, Handle<Object> object,
+ Handle<Object> properties) {
+ // 1. If Type(O) is not Object, throw a TypeError exception.
+ // TODO(jkummerow): Implement Proxy support, change to "IsSpecObject".
+ if (!object->IsJSObject()) {
+ Handle<String> fun_name =
+ isolate->factory()->InternalizeUtf8String("Object.defineProperties");
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name));
+ }
+ // 2. Let props be ToObject(Properties).
+ // 3. ReturnIfAbrupt(props).
+ Handle<JSReceiver> props;
+ if (!Object::ToObject(isolate, properties).ToHandle(&props)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
+ }
+ // 4. Let keys be props.[[OwnPropertyKeys]]().
+ // 5. ReturnIfAbrupt(keys).
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ JSReceiver::GetKeys(props, JSReceiver::OWN_ONLY, INCLUDE_SYMBOLS));
+ // 6. Let descriptors be an empty List.
+ int capacity = keys->length();
+ std::vector<PropertyDescriptor> descriptors(capacity);
+ // 7. Repeat for each element nextKey of keys in List order,
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> next_key(keys->get(i), isolate);
+ // 7a. Let propDesc be props.[[GetOwnProperty]](nextKey).
+ // 7b. ReturnIfAbrupt(propDesc).
+ bool success = false;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, props, next_key, &success, LookupIterator::HIDDEN);
+ DCHECK(success);
+ // TODO(jkummerow): Support JSProxies. Make sure we call the correct
+ // getOwnPropertyDescriptor trap, and convert the result object to a
+ // PropertyDescriptor.
+ Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ PropertyAttributes attrs = maybe.FromJust();
+ // 7c. If propDesc is not undefined and propDesc.[[Enumerable]] is true:
+ if (attrs == ABSENT) continue;
+ // GetKeys() only returns enumerable keys.
+ DCHECK((attrs & DONT_ENUM) == 0);
+ // 7c i. Let descObj be Get(props, nextKey).
+ // 7c ii. ReturnIfAbrupt(descObj).
+ Handle<Object> desc_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, desc_obj,
+ JSObject::GetProperty(&it));
+ // 7c iii. Let desc be ToPropertyDescriptor(descObj).
+ success = PropertyDescriptor::ToPropertyDescriptor(isolate, desc_obj,
+ &descriptors[i]);
+ // 7c iv. ReturnIfAbrupt(desc).
+ if (!success) return isolate->heap()->exception();
+ // 7c v. Append the pair (a two element List) consisting of nextKey and
+ // desc to the end of descriptors.
+ descriptors[i].set_name(next_key);
+ }
+ // 8. For each pair from descriptors in list order,
+ for (size_t i = 0; i < descriptors.size(); ++i) {
+ PropertyDescriptor* desc = &descriptors[i];
+ // 8a. Let P be the first element of pair.
+ // 8b. Let desc be the second element of pair.
+ // 8c. Let status be DefinePropertyOrThrow(O, P, desc).
+ bool status = DefineOwnProperty(isolate, Handle<JSObject>::cast(object),
+ desc->name(), desc, THROW_ON_ERROR);
+ // 8d. ReturnIfAbrupt(status).
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ CHECK(status == true);
+ }
+ // 9. Return o.
+ return *object;
+}
+
+
+// static
+bool JSReceiver::DefineOwnProperty(Isolate* isolate, Handle<JSReceiver> object,
+ Handle<Object> key, PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ if (object->IsJSArray()) {
+ return JSArray::DefineOwnProperty(isolate, Handle<JSArray>::cast(object),
+ key, desc, should_throw);
+ }
+ // TODO(jkummerow): Support Modules (ES6 9.4.6.6)
+ // TODO(jkummerow): Support Proxies (ES6 9.5.6)
+ if (!object->IsJSObject()) return true;
+
+ // OrdinaryDefineOwnProperty, by virtue of calling
+ // DefineOwnPropertyIgnoreAttributes, can handle arguments (ES6 9.4.4.2)
+ // and IntegerIndexedExotics (ES6 9.4.5.3), with one exception:
+ // TODO(jkummerow): Setting an indexed accessor on a typed array should throw.
+ return OrdinaryDefineOwnProperty(isolate, Handle<JSObject>::cast(object), key,
+ desc, should_throw);
+}
+
+
+// static
+bool JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ bool success = false;
+ DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success, LookupIterator::HIDDEN);
+ DCHECK(success); // ...so creating a LookupIterator can't fail.
+
+ // Deal with access checks first.
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ if (!it.HasAccess()) {
+ isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, false);
+ return false;
+ }
+ it.Next();
+ }
+
+ return OrdinaryDefineOwnProperty(&it, desc, should_throw);
+}
+
+
+// ES6 9.1.6.1
+// static
+bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ Isolate* isolate = it->isolate();
+ // == OrdinaryDefineOwnProperty (O, P, Desc) ==
+ // 1. Let current be O.[[GetOwnProperty]](P).
+ // 2. ReturnIfAbrupt(current).
+ PropertyDescriptor current;
+ if (!GetOwnPropertyDescriptor(it, &current) &&
+ isolate->has_pending_exception()) {
+ return false;
+ }
+ // TODO(jkummerow/verwaest): It would be nice if we didn't have to reset
+ // the iterator every time. Currently, the reasons why we need it are:
+ // - handle interceptors correctly
+ // - handle accessors correctly (which might change the holder's map)
+ it->Restart();
+ // 3. Let extensible be the value of the [[Extensible]] internal slot of O.
+ Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
+ bool extensible = JSObject::IsExtensible(object);
+
+ bool desc_is_data_descriptor = PropertyDescriptor::IsDataDescriptor(desc);
+ bool desc_is_accessor_descriptor =
+ PropertyDescriptor::IsAccessorDescriptor(desc);
+ bool desc_is_generic_descriptor =
+ PropertyDescriptor::IsGenericDescriptor(desc);
+
+ // == ValidateAndApplyPropertyDescriptor (O, P, extensible, Desc, current) ==
+ // 2. If current is undefined, then
+ if (current.is_empty()) {
+ // 2a. If extensible is false, return false.
+ if (!extensible) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kDefineDisallowed, it->GetName()));
+ }
+ return false;
+ }
+ // 2c. If IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true, then:
+ // (This is equivalent to !IsAccessorDescriptor(desc).)
+ DCHECK((desc_is_generic_descriptor || desc_is_data_descriptor) ==
+ !desc_is_accessor_descriptor);
+ if (!desc_is_accessor_descriptor) {
+ // 2c i. If O is not undefined, create an own data property named P of
+ // object O whose [[Value]], [[Writable]], [[Enumerable]] and
+ // [[Configurable]] attribute values are described by Desc. If the value
+ // of an attribute field of Desc is absent, the attribute of the newly
+ // created property is set to its default value.
+ if (!object->IsUndefined()) {
+ if (!desc->has_writable()) desc->set_writable(false);
+ if (!desc->has_enumerable()) desc->set_enumerable(false);
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ Handle<Object> value(
+ desc->has_value()
+ ? desc->value()
+ : Handle<Object>::cast(isolate->factory()->undefined_value()));
+ MaybeHandle<Object> result =
+ JSObject::DefineOwnPropertyIgnoreAttributes(
+ it, value, desc->ToAttributes(), JSObject::DONT_FORCE_FIELD);
+ if (result.is_null()) return false;
+ }
+ } else {
+ // 2d. Else Desc must be an accessor Property Descriptor,
+ DCHECK(desc_is_accessor_descriptor);
+ // 2d i. If O is not undefined, create an own accessor property named P
+ // of object O whose [[Get]], [[Set]], [[Enumerable]] and
+ // [[Configurable]] attribute values are described by Desc. If the value
+ // of an attribute field of Desc is absent, the attribute of the newly
+ // created property is set to its default value.
+ if (!object->IsUndefined()) {
+ if (!desc->has_enumerable()) desc->set_enumerable(false);
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ Handle<Object> getter(
+ desc->has_get()
+ ? desc->get()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ Handle<Object> setter(
+ desc->has_set()
+ ? desc->set()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ MaybeHandle<Object> result =
+ JSObject::DefineAccessor(it, getter, setter, desc->ToAttributes());
+ if (result.is_null()) return false;
+ }
+ }
+ // 2e. Return true.
+ return true;
+ }
+ // 3. Return true, if every field in Desc is absent.
+ // 4. Return true, if every field in Desc also occurs in current and the
+ // value of every field in Desc is the same value as the corresponding field
+ // in current when compared using the SameValue algorithm.
+ if ((!desc->has_enumerable() || desc->enumerable() == current.enumerable()) &&
+ (!desc->has_configurable() ||
+ desc->configurable() == current.configurable()) &&
+ (!desc->has_value() ||
+ (current.has_value() && current.value()->SameValue(*desc->value()))) &&
+ (!desc->has_writable() ||
+ (current.has_writable() && current.writable() == desc->writable())) &&
+ (!desc->has_get() ||
+ (current.has_get() && current.get()->SameValue(*desc->get()))) &&
+ (!desc->has_set() ||
+ (current.has_set() && current.set()->SameValue(*desc->set())))) {
+ return true;
+ }
+ // 5. If the [[Configurable]] field of current is false, then
+ if (!current.configurable()) {
+ // 5a. Return false, if the [[Configurable]] field of Desc is true.
+ if (desc->has_configurable() && desc->configurable()) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kRedefineDisallowed, it->GetName()));
+ }
+ return false;
+ }
+ // 5b. Return false, if the [[Enumerable]] field of Desc is present and the
+ // [[Enumerable]] fields of current and Desc are the Boolean negation of
+ // each other.
+ if (desc->has_enumerable() && desc->enumerable() != current.enumerable()) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kRedefineDisallowed, it->GetName()));
+ }
+ return false;
+ }
+ }
+
+ bool current_is_data_descriptor =
+ PropertyDescriptor::IsDataDescriptor(&current);
+ // 6. If IsGenericDescriptor(Desc) is true, no further validation is required.
+ if (desc_is_generic_descriptor) {
+ // Nothing to see here.
+
+ // 7. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) have
+ // different results, then:
+ } else if (current_is_data_descriptor != desc_is_data_descriptor) {
+ // 7a. Return false, if the [[Configurable]] field of current is false.
+ if (!current.configurable()) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kRedefineDisallowed, it->GetName()));
+ }
+ return false;
+ }
+ // 7b. If IsDataDescriptor(current) is true, then:
+ if (current_is_data_descriptor) {
+ // 7b i. If O is not undefined, convert the property named P of object O
+ // from a data property to an accessor property. Preserve the existing
+ // values of the converted property's [[Configurable]] and [[Enumerable]]
+ // attributes and set the rest of the property's attributes to their
+ // default values.
+ // --> Folded into step 10.
+ } else {
+ // 7c i. If O is not undefined, convert the property named P of object O
+ // from an accessor property to a data property. Preserve the existing
+ // values of the converted property’s [[Configurable]] and [[Enumerable]]
+ // attributes and set the rest of the property’s attributes to their
+ // default values.
+ // --> Folded into step 10.
+ }
+
+ // 8. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) are both
+ // true, then:
+ } else if (current_is_data_descriptor && desc_is_data_descriptor) {
+ // 8a. If the [[Configurable]] field of current is false, then:
+ if (!current.configurable()) {
+ // [Strong mode] Disallow changing writable -> readonly for
+ // non-configurable properties.
+ if (current.writable() && desc->has_writable() && !desc->writable() &&
+ object->map()->is_strong()) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kStrongRedefineDisallowed, object,
+ it->GetName()));
+ }
+ return false;
+ }
+ // 8a i. Return false, if the [[Writable]] field of current is false and
+ // the [[Writable]] field of Desc is true.
+ if (!current.writable() && desc->has_writable() && desc->writable()) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kRedefineDisallowed, it->GetName()));
+ }
+ return false;
+ }
+ // 8a ii. If the [[Writable]] field of current is false, then:
+ if (!current.writable()) {
+ // 8a ii 1. Return false, if the [[Value]] field of Desc is present and
+ // SameValue(Desc.[[Value]], current.[[Value]]) is false.
+ if (desc->has_value() && !desc->value()->SameValue(*current.value())) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kRedefineDisallowed, it->GetName()));
+ }
+ return false;
+ }
+ }
+ }
+ } else {
+ // 9. Else IsAccessorDescriptor(current) and IsAccessorDescriptor(Desc)
+ // are both true,
+ DCHECK(PropertyDescriptor::IsAccessorDescriptor(&current) &&
+ desc_is_accessor_descriptor);
+ // 9a. If the [[Configurable]] field of current is false, then:
+ if (!current.configurable()) {
+ // 9a i. Return false, if the [[Set]] field of Desc is present and
+ // SameValue(Desc.[[Set]], current.[[Set]]) is false.
+ if (desc->has_set() && !desc->set()->SameValue(*current.set())) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kRedefineDisallowed, it->GetName()));
+ }
+ return false;
+ }
+ // 9a ii. Return false, if the [[Get]] field of Desc is present and
+ // SameValue(Desc.[[Get]], current.[[Get]]) is false.
+ if (desc->has_get() && !desc->get()->SameValue(*current.get())) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kRedefineDisallowed, it->GetName()));
+ }
+ return false;
+ }
+ }
+ }
+
+ // 10. If O is not undefined, then:
+ if (!object->IsUndefined()) {
+ // 10a. For each field of Desc that is present, set the corresponding
+ // attribute of the property named P of object O to the value of the field.
+ PropertyAttributes attrs = NONE;
+
+ if (desc->has_enumerable()) {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (desc->enumerable() ? NONE : DONT_ENUM));
+ } else {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (current.enumerable() ? NONE : DONT_ENUM));
+ }
+ if (desc->has_configurable()) {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (desc->configurable() ? NONE : DONT_DELETE));
+ } else {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (current.configurable() ? NONE : DONT_DELETE));
+ }
+ if (desc_is_data_descriptor ||
+ (desc_is_generic_descriptor && current_is_data_descriptor)) {
+ if (desc->has_writable()) {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (desc->writable() ? NONE : READ_ONLY));
+ } else {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (current.writable() ? NONE : READ_ONLY));
+ }
+ Handle<Object> value(
+ desc->has_value() ? desc->value()
+ : current.has_value()
+ ? current.value()
+ : Handle<Object>::cast(
+ isolate->factory()->undefined_value()));
+ MaybeHandle<Object> result = JSObject::DefineOwnPropertyIgnoreAttributes(
+ it, value, attrs, JSObject::DONT_FORCE_FIELD);
+ if (result.is_null()) return false;
+ } else {
+ DCHECK(desc_is_accessor_descriptor ||
+ (desc_is_generic_descriptor &&
+ PropertyDescriptor::IsAccessorDescriptor(&current)));
+ Handle<Object> getter(
+ desc->has_get()
+ ? desc->get()
+ : current.has_get()
+ ? current.get()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ Handle<Object> setter(
+ desc->has_set()
+ ? desc->set()
+ : current.has_set()
+ ? current.set()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ MaybeHandle<Object> result =
+ JSObject::DefineAccessor(it, getter, setter, attrs);
+ if (result.is_null()) return false;
+ }
+ }
+
+ // 11. Return true.
+ return true;
+}
+
+
+// TODO(jkummerow): Consider unification with FastAsArrayLength() in
+// accessors.cc.
+bool PropertyKeyToArrayLength(Handle<Object> value, uint32_t* length) {
+ DCHECK(value->IsNumber() || value->IsName());
+ if (value->ToArrayLength(length)) return true;
+ if (value->IsString()) return String::cast(*value)->AsArrayIndex(length);
+ return false;
+}
+
+
+bool PropertyKeyToArrayIndex(Handle<Object> index_obj, uint32_t* output) {
+ return PropertyKeyToArrayLength(index_obj, output) && *output != kMaxUInt32;
+}
+
+
+// ES6 9.4.2.1
+// static
+bool JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
+ Handle<Object> name, PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ // 1. Assert: IsPropertyKey(P) is true. ("P" is |name|.)
+ // 2. If P is "length", then:
+ // TODO(jkummerow): Check if we need slow string comparison.
+ if (*name == isolate->heap()->length_string()) {
+ // 2a. Return ArraySetLength(A, Desc).
+ return ArraySetLength(isolate, o, desc, should_throw);
+ }
+ // 3. Else if P is an array index, then:
+ uint32_t index = 0;
+ if (PropertyKeyToArrayIndex(name, &index)) {
+ // 3a. Let oldLenDesc be OrdinaryGetOwnProperty(A, "length").
+ PropertyDescriptor old_len_desc;
+ bool success = GetOwnPropertyDescriptor(
+ isolate, o, isolate->factory()->length_string(), &old_len_desc);
+ // 3b. (Assert)
+ DCHECK(success);
+ USE(success);
+ // 3c. Let oldLen be oldLenDesc.[[Value]].
+ uint32_t old_len = 0;
+ CHECK(old_len_desc.value()->ToArrayLength(&old_len));
+ // 3d. Let index be ToUint32(P).
+ // (Already done above.)
+ // 3e. (Assert)
+ // 3f. If index >= oldLen and oldLenDesc.[[Writable]] is false,
+ // return false.
+ if (index >= old_len && old_len_desc.has_writable() &&
+ !old_len_desc.writable()) {
+ if (should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kDefineDisallowed, name));
+ }
+ return false;
+ }
+ // 3g. Let succeeded be OrdinaryDefineOwnProperty(A, P, Desc).
+ bool succeeded =
+ OrdinaryDefineOwnProperty(isolate, o, name, desc, should_throw);
+ // 3h. (Assert)
+ // 3i. If succeeded is false, return false.
+ if (!succeeded) return false;
+ // 3j. If index >= oldLen, then:
+ if (index >= old_len) {
+ // 3j i. Set oldLenDesc.[[Value]] to index + 1.
+ old_len_desc.set_value(isolate->factory()->NewNumberFromUint(index + 1));
+ // 3j ii. Let succeeded be
+ // OrdinaryDefineOwnProperty(A, "length", oldLenDesc).
+ OrdinaryDefineOwnProperty(isolate, o, isolate->factory()->length_string(),
+ &old_len_desc, should_throw);
+ // 3j iii. (Assert)
+ }
+ // 3k. Return true.
+ return true;
+ }
+
+ // 4. Return OrdinaryDefineOwnProperty(A, P, Desc).
+ return OrdinaryDefineOwnProperty(isolate, o, name, desc, should_throw);
+}
+
+
+// Part of ES6 9.4.2.4 ArraySetLength.
+// static
+bool JSArray::AnythingToArrayLength(Isolate* isolate,
+ Handle<Object> length_object,
+ uint32_t* output) {
+ // Fast path: check numbers and strings that can be converted directly
+ // and unobservably.
+ if (length_object->ToArrayLength(output)) return true;
+ if (length_object->IsString() &&
+ Handle<String>::cast(length_object)->AsArrayIndex(output)) {
+ return true;
+ }
+ // Slow path: follow steps in ES6 9.4.2.4 "ArraySetLength".
+ // 3. Let newLen be ToUint32(Desc.[[Value]]).
+ Handle<Object> uint32_v;
+ if (!Object::ToUint32(isolate, length_object).ToHandle(&uint32_v)) {
+ // 4. ReturnIfAbrupt(newLen).
+ return false;
+ }
+ // 5. Let numberLen be ToNumber(Desc.[[Value]]).
+ Handle<Object> number_v;
+ if (!Object::ToNumber(length_object).ToHandle(&number_v)) {
+ // 6. ReturnIfAbrupt(newLen).
+ return false;
+ }
+ // 7. If newLen != numberLen, throw a RangeError exception.
+ if (uint32_v->Number() != number_v->Number()) {
+ Handle<Object> exception =
+ isolate->factory()->NewRangeError(MessageTemplate::kInvalidArrayLength);
+ isolate->Throw(*exception);
+ return false;
+ }
+ CHECK(uint32_v->ToArrayLength(output));
+ return true;
+}
+
+
+// ES6 9.4.2.4
+// static
+bool JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ // 1. If the [[Value]] field of Desc is absent, then
+ if (!desc->has_value()) {
+ // 1a. Return OrdinaryDefineOwnProperty(A, "length", Desc).
+ return OrdinaryDefineOwnProperty(
+ isolate, a, isolate->factory()->length_string(), desc, should_throw);
+ }
+ // 2. Let newLenDesc be a copy of Desc.
+ // (Actual copying is not necessary.)
+ PropertyDescriptor* new_len_desc = desc;
+ // 3. - 7. Convert Desc.[[Value]] to newLen.
+ uint32_t new_len = 0;
+ if (!AnythingToArrayLength(isolate, desc->value(), &new_len)) {
+ if (should_throw == THROW_ON_ERROR && !isolate->has_pending_exception()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kCannotConvertToPrimitive));
+ }
+ return false;
+ }
+ // 8. Set newLenDesc.[[Value]] to newLen.
+ // (Done below, if needed.)
+ // 9. Let oldLenDesc be OrdinaryGetOwnProperty(A, "length").
+ PropertyDescriptor old_len_desc;
+ bool success = GetOwnPropertyDescriptor(
+ isolate, a, isolate->factory()->length_string(), &old_len_desc);
+ // 10. (Assert)
+ DCHECK(success);
+ USE(success);
+ // 11. Let oldLen be oldLenDesc.[[Value]].
+ uint32_t old_len = 0;
+ CHECK(old_len_desc.value()->ToArrayLength(&old_len));
+ // 12. If newLen >= oldLen, then
+ if (new_len >= old_len) {
+ // 8. Set newLenDesc.[[Value]] to newLen.
+ // 12a. Return OrdinaryDefineOwnProperty(A, "length", newLenDesc).
+ new_len_desc->set_value(isolate->factory()->NewNumberFromUint(new_len));
+ return OrdinaryDefineOwnProperty(isolate, a,
+ isolate->factory()->length_string(),
+ new_len_desc, should_throw);
+ }
+ // 13. If oldLenDesc.[[Writable]] is false, return false.
+ if (!old_len_desc.writable()) {
+ if (should_throw == THROW_ON_ERROR)
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kRedefineDisallowed,
+ isolate->factory()->length_string()));
+ return false;
+ }
+ // 14. If newLenDesc.[[Writable]] is absent or has the value true,
+ // let newWritable be true.
+ bool new_writable = false;
+ if (!new_len_desc->has_writable() || new_len_desc->writable()) {
+ new_writable = true;
+ } else {
+ // 15. Else,
+ // 15a. Need to defer setting the [[Writable]] attribute to false in case
+ // any elements cannot be deleted.
+ // 15b. Let newWritable be false. (It's initialized as "false" anyway.)
+ // 15c. Set newLenDesc.[[Writable]] to true.
+ // (Not needed.)
+ }
+ // Most of steps 16 through 19 is implemented by JSArray::SetLength.
+ if (JSArray::ObservableSetLength(a, new_len).is_null()) {
+ DCHECK(isolate->has_pending_exception());
+ return false;
+ }
+ // Steps 19d-ii, 20.
+ if (!new_writable) {
+ PropertyDescriptor readonly;
+ readonly.set_writable(false);
+ OrdinaryDefineOwnProperty(isolate, a, isolate->factory()->length_string(),
+ &readonly, should_throw);
+ }
+ uint32_t actual_new_len = 0;
+ CHECK(a->length()->ToArrayLength(&actual_new_len));
+ // Steps 19d-v, 21. Return false if there were non-deletable elements.
+ success = actual_new_len == new_len;
+ if (!success && should_throw == THROW_ON_ERROR) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kStrictDeleteProperty,
+ isolate->factory()->NewNumberFromUint(actual_new_len - 1), a));
+ }
+ return success;
+}
+
+
+// static
+bool JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc) {
+ bool success = false;
+ DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success, LookupIterator::HIDDEN);
+ DCHECK(success); // ...so creating a LookupIterator can't fail.
+ return GetOwnPropertyDescriptor(&it, desc);
+}
+
+
+// TODO(jkummerow): Any chance to unify this with
+// "MaybeHandle<Object> GetOwnProperty()" in runtime-object.cc?
+
+// TODO(jkummerow/verwaest): Proxy support: call getOwnPropertyDescriptor trap
+// and convert the result (if it's an object) with ToPropertyDescriptor.
+
+// ES6 9.1.5.1
+// Returns true on success; false if there was an exception or no property.
+// static
+bool JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
+ PropertyDescriptor* desc) {
+ Isolate* isolate = it->isolate();
+ // 1. (Assert)
+ // 2. If O does not have an own property with key P, return undefined.
+ Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(it);
+
+ if (!maybe.IsJust()) return false;
+ PropertyAttributes attrs = maybe.FromJust();
+ if (attrs == ABSENT) return false;
+ DCHECK(!isolate->has_pending_exception());
+
+ // 3. Let D be a newly created Property Descriptor with no fields.
+ DCHECK(desc->is_empty());
+ // 4. Let X be O's own property whose key is P.
+ // 5. If X is a data property, then
+ bool is_accessor_pair = it->state() == LookupIterator::ACCESSOR &&
+ it->GetAccessors()->IsAccessorPair();
+ if (!is_accessor_pair) {
+ // 5a. Set D.[[Value]] to the value of X's [[Value]] attribute.
+ Handle<Object> value;
+ if (!JSObject::GetProperty(it).ToHandle(&value)) {
+ DCHECK(isolate->has_pending_exception());
+ return false;
+ }
+ desc->set_value(value);
+ // 5b. Set D.[[Writable]] to the value of X's [[Writable]] attribute
+ desc->set_writable((attrs & READ_ONLY) == 0);
+ } else {
+ // 6. Else X is an accessor property, so
+ Handle<AccessorPair> accessors =
+ Handle<AccessorPair>::cast(it->GetAccessors());
+ // 6a. Set D.[[Get]] to the value of X's [[Get]] attribute.
+ desc->set_get(handle(accessors->GetComponent(ACCESSOR_GETTER), isolate));
+ // 6b. Set D.[[Set]] to the value of X's [[Set]] attribute.
+ desc->set_set(handle(accessors->GetComponent(ACCESSOR_SETTER), isolate));
+ }
+
+ // 7. Set D.[[Enumerable]] to the value of X's [[Enumerable]] attribute.
+ desc->set_enumerable((attrs & DONT_ENUM) == 0);
+ // 8. Set D.[[Configurable]] to the value of X's [[Configurable]] attribute.
+ desc->set_configurable((attrs & DONT_DELETE) == 0);
+ // 9. Return D.
+ DCHECK(PropertyDescriptor::IsAccessorDescriptor(desc) !=
+ PropertyDescriptor::IsDataDescriptor(desc));
+ return true;
}
@@ -5966,41 +6819,51 @@ bool JSObject::ReferencesObject(Object* obj) {
}
-MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
- if (!object->map()->is_extensible()) return object;
+Maybe<bool> JSReceiver::PreventExtensions(Handle<JSReceiver> object,
+ ShouldThrow should_throw) {
+ if (!object->IsJSObject()) return Just(false);
+ // TODO(neis): Deal with proxies.
+ return JSObject::PreventExtensions(Handle<JSObject>::cast(object),
+ should_throw);
+}
- if (!object->HasSloppyArgumentsElements() && !object->map()->is_observed()) {
- return PreventExtensionsWithTransition<NONE>(object);
- }
+Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
+ ShouldThrow should_throw) {
Isolate* isolate = object->GetIsolate();
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ if (!object->HasSloppyArgumentsElements() && !object->map()->is_observed()) {
+ return PreventExtensionsWithTransition<NONE>(object, should_throw);
+ }
+
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->false_value();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ UNREACHABLE();
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoAccess));
}
+ if (!object->map()->is_extensible()) return Just(true);
+
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return object;
+ if (iter.IsAtEnd()) return Just(true);
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return PreventExtensions(PrototypeIterator::GetCurrent<JSObject>(iter));
+ return PreventExtensions(PrototypeIterator::GetCurrent<JSObject>(iter),
+ should_throw);
}
- // It's not possible to seal objects with external array elements
- if (object->HasFixedTypedArrayElements()) {
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kCannotPreventExtExternalArray),
- Object);
- }
-
- // If there are fast elements we normalize.
- Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
- DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
+ if (!object->HasFixedTypedArrayElements()) {
+ // If there are fast elements we normalize.
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ DCHECK(object->HasDictionaryElements() ||
+ object->HasSlowArgumentsElements());
- // Make sure that we never go back to fast case.
- object->RequireSlowElements(*dictionary);
+ // Make sure that we never go back to fast case.
+ object->RequireSlowElements(*dictionary);
+ }
// Do a map transition, other objects with this map may still
// be extensible.
@@ -6012,24 +6875,29 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
DCHECK(!object->map()->is_extensible());
if (object->map()->is_observed()) {
- RETURN_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION_VALUE(
isolate,
EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
isolate->factory()->the_hole_value()),
- Object);
+ Nothing<bool>());
}
- return object;
+ return Just(true);
}
-bool JSObject::IsExtensible() {
- if (IsJSGlobalProxy()) {
- PrototypeIterator iter(GetIsolate(), this);
+bool JSObject::IsExtensible(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
+ return true;
+ }
+ if (object->IsJSGlobalProxy()) {
+ PrototypeIterator iter(isolate, *object);
if (iter.IsAtEnd()) return false;
DCHECK(iter.GetCurrent()->IsJSGlobalObject());
return iter.GetCurrent<JSObject>()->map()->is_extensible();
}
- return map()->is_extensible();
+ return object->map()->is_extensible();
}
@@ -6058,8 +6926,8 @@ static void ApplyAttributesToDictionary(Dictionary* dictionary,
template <PropertyAttributes attrs>
-MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
- Handle<JSObject> object) {
+Maybe<bool> JSObject::PreventExtensionsWithTransition(
+ Handle<JSObject> object, ShouldThrow should_throw) {
STATIC_ASSERT(attrs == NONE || attrs == SEALED || attrs == FROZEN);
// Sealing/freezing sloppy arguments should be handled elsewhere.
@@ -6067,29 +6935,28 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
DCHECK(!object->map()->is_observed());
Isolate* isolate = object->GetIsolate();
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->false_value();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ UNREACHABLE();
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoAccess));
}
+ if (attrs == NONE && !object->map()->is_extensible()) return Just(true);
+
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return object;
+ if (iter.IsAtEnd()) return Just(true);
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
return PreventExtensionsWithTransition<attrs>(
- PrototypeIterator::GetCurrent<JSObject>(iter));
- }
-
- // It's not possible to seal or freeze objects with external array elements
- if (object->HasFixedTypedArrayElements()) {
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kCannotPreventExtExternalArray),
- Object);
+ PrototypeIterator::GetCurrent<JSObject>(iter), should_throw);
}
Handle<SeededNumberDictionary> new_element_dictionary;
- if (!object->HasDictionaryElements()) {
+ if (!object->HasFixedTypedArrayElements() &&
+ !object->HasDictionaryElements()) {
int length =
object->IsJSArray()
? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
@@ -6115,7 +6982,8 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
TransitionArray::SearchSpecial(*old_map, *transition_marker);
if (transition != NULL) {
Handle<Map> transition_map(transition, isolate);
- DCHECK(transition_map->has_dictionary_elements());
+ DCHECK(transition_map->has_dictionary_elements() ||
+ transition_map->has_fixed_typed_array_elements());
DCHECK(!transition_map->is_extensible());
JSObject::MigrateToMap(object, transition_map);
} else if (TransitionArray::CanHaveMoreTransitions(old_map)) {
@@ -6134,11 +7002,13 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
Handle<Map> new_map =
Map::Copy(handle(object->map()), "SlowCopyForPreventExtensions");
new_map->set_is_extensible(false);
- new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ if (!new_element_dictionary.is_null()) {
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ }
JSObject::MigrateToMap(object, new_map);
if (attrs != NONE) {
- if (object->IsGlobalObject()) {
+ if (object->IsJSGlobalObject()) {
ApplyAttributesToDictionary(object->global_dictionary(), attrs);
} else {
ApplyAttributesToDictionary(object->property_dictionary(), attrs);
@@ -6146,6 +7016,18 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
}
}
+ // Both seal and preventExtensions always go through without modifications to
+ // typed array elements. Freeze works only if there are no actual elements.
+ if (object->HasFixedTypedArrayElements()) {
+ if (attrs == FROZEN &&
+ JSArrayBufferView::cast(*object)->byte_length()->Number() > 0) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kCannotFreezeArrayBufferView));
+ return Nothing<bool>();
+ }
+ return Just(true);
+ }
+
DCHECK(object->map()->has_dictionary_elements());
if (!new_element_dictionary.is_null()) {
object->set_elements(*new_element_dictionary);
@@ -6160,17 +7042,21 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
}
}
- return object;
+ return Just(true);
}
MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
- return PreventExtensionsWithTransition<FROZEN>(object);
+ MAYBE_RETURN_NULL(
+ PreventExtensionsWithTransition<FROZEN>(object, THROW_ON_ERROR));
+ return object;
}
MaybeHandle<Object> JSObject::Seal(Handle<JSObject> object) {
- return PreventExtensionsWithTransition<SEALED>(object);
+ MAYBE_RETURN_NULL(
+ PreventExtensionsWithTransition<SEALED>(object, THROW_ON_ERROR));
+ return object;
}
@@ -6546,24 +7432,6 @@ bool JSReceiver::IsSimpleEnum() {
}
-static bool FilterKey(Object* key, PropertyAttributes filter) {
- if ((filter & SYMBOLIC) && key->IsSymbol()) {
- return true;
- }
-
- if ((filter & PRIVATE_SYMBOL) &&
- key->IsSymbol() && Symbol::cast(key)->is_private()) {
- return true;
- }
-
- if ((filter & STRING) && !key->IsSymbol()) {
- return true;
- }
-
- return false;
-}
-
-
int Map::NumberOfDescribedProperties(DescriptorFlag which,
PropertyAttributes filter) {
int result = 0;
@@ -6573,7 +7441,7 @@ int Map::NumberOfDescribedProperties(DescriptorFlag which,
: NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- !FilterKey(descs->GetKey(i), filter)) {
+ !descs->GetKey(i)->FilterKey(filter)) {
result++;
}
}
@@ -6618,92 +7486,91 @@ static Handle<FixedArray> ReduceFixedArrayTo(
}
-Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_result) {
- Isolate* isolate = object->GetIsolate();
- if (object->HasFastProperties()) {
- int own_property_count = object->map()->EnumLength();
- // If the enum length of the given map is set to kInvalidEnumCache, this
- // means that the map itself has never used the present enum cache. The
- // first step to using the cache is to set the enum length of the map by
- // counting the number of own descriptors that are not DONT_ENUM or
- // SYMBOLIC.
- if (own_property_count == kInvalidEnumCacheSentinel) {
- own_property_count = object->map()->NumberOfDescribedProperties(
- OWN_DESCRIPTORS, DONT_SHOW);
- } else {
- DCHECK(own_property_count == object->map()->NumberOfDescribedProperties(
- OWN_DESCRIPTORS, DONT_SHOW));
- }
-
- if (object->map()->instance_descriptors()->HasEnumCache()) {
- DescriptorArray* desc = object->map()->instance_descriptors();
- Handle<FixedArray> keys(desc->GetEnumCache(), isolate);
-
- // In case the number of properties required in the enum are actually
- // present, we can reuse the enum cache. Otherwise, this means that the
- // enum cache was generated for a previous (smaller) version of the
- // Descriptor Array. In that case we regenerate the enum cache.
- if (own_property_count <= keys->length()) {
- if (cache_result) object->map()->SetEnumLength(own_property_count);
- isolate->counters()->enum_cache_hits()->Increment();
- return ReduceFixedArrayTo(keys, own_property_count);
- }
- }
+namespace {
- Handle<Map> map(object->map());
+Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
+ Handle<JSObject> object,
+ bool cache_enum_length) {
+ Handle<Map> map(object->map());
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+ int own_property_count = map->EnumLength();
+ // If the enum length of the given map is set to kInvalidEnumCache, this
+ // means that the map itself has never used the present enum cache. The
+ // first step to using the cache is to set the enum length of the map by
+ // counting the number of own descriptors that are not DONT_ENUM or
+ // SYMBOLIC.
+ if (own_property_count == kInvalidEnumCacheSentinel) {
+ own_property_count =
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_SHOW);
+ } else {
+ DCHECK(own_property_count ==
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_SHOW));
+ }
- if (map->instance_descriptors()->IsEmpty()) {
+ if (descs->HasEnumCache()) {
+ Handle<FixedArray> keys(descs->GetEnumCache(), isolate);
+ // In case the number of properties required in the enum are actually
+ // present, we can reuse the enum cache. Otherwise, this means that the
+ // enum cache was generated for a previous (smaller) version of the
+ // Descriptor Array. In that case we regenerate the enum cache.
+ if (own_property_count <= keys->length()) {
isolate->counters()->enum_cache_hits()->Increment();
- if (cache_result) map->SetEnumLength(0);
- return isolate->factory()->empty_fixed_array();
+ if (cache_enum_length) map->SetEnumLength(own_property_count);
+ return ReduceFixedArrayTo(keys, own_property_count);
}
+ }
- isolate->counters()->enum_cache_misses()->Increment();
+ if (descs->IsEmpty()) {
+ isolate->counters()->enum_cache_hits()->Increment();
+ if (cache_enum_length) map->SetEnumLength(0);
+ return isolate->factory()->empty_fixed_array();
+ }
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(
- own_property_count);
- Handle<FixedArray> indices = isolate->factory()->NewFixedArray(
- own_property_count);
+ isolate->counters()->enum_cache_misses()->Increment();
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
+ Handle<FixedArray> storage =
+ isolate->factory()->NewFixedArray(own_property_count);
+ Handle<FixedArray> indices =
+ isolate->factory()->NewFixedArray(own_property_count);
- int size = map->NumberOfOwnDescriptors();
- int index = 0;
+ int size = map->NumberOfOwnDescriptors();
+ int index = 0;
- for (int i = 0; i < size; i++) {
- PropertyDetails details = descs->GetDetails(i);
- Object* key = descs->GetKey(i);
- if (!(details.IsDontEnum() || key->IsSymbol())) {
- storage->set(index, key);
- if (!indices.is_null()) {
- if (details.type() != DATA) {
- indices = Handle<FixedArray>();
- } else {
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- int load_by_field_index = field_index.GetLoadByFieldIndex();
- indices->set(index, Smi::FromInt(load_by_field_index));
- }
- }
- index++;
+ for (int i = 0; i < size; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ Object* key = descs->GetKey(i);
+ if (details.IsDontEnum() || key->IsSymbol()) continue;
+ storage->set(index, key);
+ if (!indices.is_null()) {
+ if (details.type() != DATA) {
+ indices = Handle<FixedArray>();
+ } else {
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ int load_by_field_index = field_index.GetLoadByFieldIndex();
+ indices->set(index, Smi::FromInt(load_by_field_index));
}
}
- DCHECK(index == storage->length());
+ index++;
+ }
+ DCHECK(index == storage->length());
- Handle<FixedArray> bridge_storage =
- isolate->factory()->NewFixedArray(
- DescriptorArray::kEnumCacheBridgeLength);
- DescriptorArray* desc = object->map()->instance_descriptors();
- desc->SetEnumCache(*bridge_storage,
- *storage,
- indices.is_null() ? Object::cast(Smi::FromInt(0))
- : Object::cast(*indices));
- if (cache_result) {
- object->map()->SetEnumLength(own_property_count);
- }
- return storage;
- } else if (object->IsGlobalObject()) {
+ DescriptorArray::SetEnumCache(descs, isolate, storage, indices);
+ if (cache_enum_length) {
+ map->SetEnumLength(own_property_count);
+ }
+ return storage;
+}
+
+} // namespace
+
+
+Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
+ bool cache_enum_length) {
+ Isolate* isolate = object->GetIsolate();
+ if (object->HasFastProperties()) {
+ return GetFastEnumPropertyKeys(isolate, object, cache_enum_length);
+ } else if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(object->global_dictionary());
int length = dictionary->NumberOfEnumElements();
if (length == 0) {
@@ -6725,126 +7592,15 @@ Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
}
-Handle<FixedArray> KeyAccumulator::GetKeys() {
- if (length_ == 0) {
- return isolate_->factory()->empty_fixed_array();
- }
- if (set_.is_null()) {
- keys_->Shrink(length_);
- return keys_;
- }
- // copy over results from set_
- Handle<FixedArray> result = isolate_->factory()->NewFixedArray(length_);
- for (int i = 0; i < length_; i++) {
- result->set(i, set_->KeyAt(i));
- }
- return result;
-}
-
-
-void KeyAccumulator::AddKey(Handle<Object> key, int check_limit) {
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- DCHECK(key->IsNumber() || key->IsName());
- }
-#endif
- if (!set_.is_null()) {
- set_ = OrderedHashSet::Add(set_, key);
- length_ = set_->NumberOfElements();
- return;
- }
- // check if we already have the key in the case we are still using
- // the keys_ FixedArray
- check_limit = Min(check_limit, length_);
- for (int i = 0; i < check_limit; i++) {
- Object* current = keys_->get(i);
- if (current->KeyEquals(*key)) return;
- }
- EnsureCapacity(length_);
- keys_->set(length_, *key);
- length_++;
-}
-
-
-void KeyAccumulator::AddKeys(Handle<FixedArray> array,
- FixedArray::KeyFilter filter) {
- int add_length = array->length();
- if (add_length == 0) return;
- if (keys_.is_null() && filter == FixedArray::ALL_KEYS) {
- keys_ = array;
- length_ = keys_->length();
- return;
- }
- PrepareForComparisons(add_length);
- int previous_key_count = length_;
- for (int i = 0; i < add_length; i++) {
- Handle<Object> current(array->get(i), isolate_);
- if (filter == FixedArray::NON_SYMBOL_KEYS && current->IsSymbol()) continue;
- AddKey(current, previous_key_count);
- }
-}
-
-
-void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
- FixedArray::KeyFilter filter) {
- DCHECK(array_like->IsJSArray() || array_like->HasSloppyArgumentsElements());
- ElementsAccessor* accessor = array_like->GetElementsAccessor();
- accessor->AddElementsToKeyAccumulator(array_like, this, filter);
-}
-
-
-void KeyAccumulator::PrepareForComparisons(int count) {
- // Depending on how many comparisons we do we should switch to the
- // hash-table-based checks which have a one-time overhead for
- // initializing but O(1) for HasKey checks.
- if (!set_.is_null()) return;
- // This limit was obtained through evaluation of a microbench.
- if (length_ * count < 50) return;
- set_ = OrderedHashSet::Allocate(isolate_, length_);
- for (int i = 0; i < length_; i++) {
- Handle<Object> value(keys_->get(i), isolate_);
- set_ = OrderedHashSet::Add(set_, value);
- }
-}
-
-
-void KeyAccumulator::EnsureCapacity(int capacity) {
- if (keys_.is_null() || keys_->length() <= capacity) {
- Grow();
- }
-}
-
-
-void KeyAccumulator::Grow() {
- // The OrderedHashSet handles growing by itself.
- if (!set_.is_null()) return;
- // Otherwise, grow the internal keys_ FixedArray
- int capacity = keys_.is_null() ? 16 : keys_->length() * 2 + 16;
- Handle<FixedArray> new_keys = isolate_->factory()->NewFixedArray(capacity);
- if (keys_.is_null()) {
- keys_ = new_keys;
- return;
- }
- int buffer_length = keys_->length();
- {
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = new_keys->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < buffer_length; i++) {
- new_keys->set(i, keys_->get(i), mode);
- }
- }
- keys_ = new_keys;
-}
-
-
MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
- KeyCollectionType type) {
+ KeyCollectionType type,
+ KeyFilter filter,
+ GetKeysConversion getConversion) {
USE(ContainsOnlyValidKeys);
Isolate* isolate = object->GetIsolate();
- KeyAccumulator accumulator(isolate);
+ KeyAccumulator accumulator(isolate, filter);
Handle<JSFunction> arguments_function(
JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
-
PrototypeIterator::WhereToEnd end = type == OWN_ONLY
? PrototypeIterator::END_AT_NON_HIDDEN
: PrototypeIterator::END_AT_NULL;
@@ -6852,6 +7608,7 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
for (PrototypeIterator iter(isolate, object,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(end); iter.Advance()) {
+ accumulator.NextPrototype();
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
Handle<JSProxy> proxy = PrototypeIterator::GetCurrent<JSProxy>(iter);
Handle<Object> args[] = { proxy };
@@ -6864,14 +7621,15 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
arraysize(args),
args),
FixedArray);
- accumulator.AddKeys(Handle<JSObject>::cast(names), FixedArray::ALL_KEYS);
+ accumulator.AddKeysFromProxy(Handle<JSObject>::cast(names));
break;
}
Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
// Check access rights if required.
- if (current->IsAccessCheckNeeded() && !isolate->MayAccess(current)) {
+ if (current->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), current)) {
if (iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
isolate->ReportFailedAccessCheck(current);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
@@ -6879,56 +7637,58 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
break;
}
- // Compute the element keys.
- Handle<FixedArray> element_keys =
- isolate->factory()->NewFixedArray(current->NumberOfEnumElements());
- current->GetEnumElementKeys(*element_keys);
- accumulator.AddKeys(element_keys, FixedArray::ALL_KEYS);
- DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
+ JSObject::CollectOwnElementKeys(current, &accumulator,
+ static_cast<PropertyAttributes>(DONT_ENUM));
// Add the element keys from the interceptor.
if (current->HasIndexedInterceptor()) {
Handle<JSObject> result;
- if (JSObject::GetKeysForIndexedInterceptor(
- current, object).ToHandle(&result)) {
- accumulator.AddKeys(result, FixedArray::ALL_KEYS);
+ if (JSObject::GetKeysForIndexedInterceptor(current, object)
+ .ToHandle(&result)) {
+ accumulator.AddElementKeysFromInterceptor(result);
}
- DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
- }
-
- // We can cache the computed property keys if access checks are
- // not needed and no interceptors are involved.
- //
- // We do not use the cache if the object has elements and
- // therefore it does not make sense to cache the property names
- // for arguments objects. Arguments objects will always have
- // elements.
- // Wrapped strings have elements, but don't have an elements
- // array or dictionary. So the fast inline test for whether to
- // use the cache says yes, so we should not create a cache.
- bool cache_enum_keys =
- ((current->map()->GetConstructor() != *arguments_function) &&
- !current->IsJSValue() && !current->IsAccessCheckNeeded() &&
- !current->HasNamedInterceptor() && !current->HasIndexedInterceptor());
- // Compute the property keys and cache them if possible.
-
- Handle<FixedArray> enum_keys =
- JSObject::GetEnumPropertyKeys(current, cache_enum_keys);
- accumulator.AddKeys(enum_keys, FixedArray::ALL_KEYS);
- DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
-
- // Add the non-symbol property keys from the interceptor.
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
+ }
+
+ if (filter == SKIP_SYMBOLS) {
+ // We can cache the computed property keys if access checks are
+ // not needed and no interceptors are involved.
+ //
+ // We do not use the cache if the object has elements and
+ // therefore it does not make sense to cache the property names
+ // for arguments objects. Arguments objects will always have
+ // elements.
+ // Wrapped strings have elements, but don't have an elements
+ // array or dictionary. So the fast inline test for whether to
+ // use the cache says yes, so we should not create a cache.
+ bool cache_enum_length =
+ ((current->map()->GetConstructor() != *arguments_function) &&
+ !current->IsJSValue() && !current->IsAccessCheckNeeded() &&
+ !current->HasNamedInterceptor() &&
+ !current->HasIndexedInterceptor());
+ // Compute the property keys and cache them if possible.
+ Handle<FixedArray> enum_keys =
+ JSObject::GetEnumPropertyKeys(current, cache_enum_length);
+ accumulator.AddKeys(enum_keys);
+ } else {
+ DCHECK(filter == INCLUDE_SYMBOLS);
+ PropertyAttributes attr_filter =
+ static_cast<PropertyAttributes>(DONT_ENUM | PRIVATE_SYMBOL);
+ current->CollectOwnPropertyNames(&accumulator, attr_filter);
+ }
+
+ // Add the property keys from the interceptor.
if (current->HasNamedInterceptor()) {
Handle<JSObject> result;
- if (JSObject::GetKeysForNamedInterceptor(
- current, object).ToHandle(&result)) {
- accumulator.AddKeys(result, FixedArray::NON_SYMBOL_KEYS);
+ if (JSObject::GetKeysForNamedInterceptor(current, object)
+ .ToHandle(&result)) {
+ accumulator.AddKeys(result);
}
- DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
}
}
- Handle<FixedArray> keys = accumulator.GetKeys();
+ Handle<FixedArray> keys = accumulator.GetKeys(getConversion);
DCHECK(ContainsOnlyValidKeys(keys));
return keys;
}
@@ -6973,31 +7733,41 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ return DefineAccessor(&it, getter, setter, attributes);
+}
- if (it.state() == LookupIterator::ACCESS_CHECK) {
- if (!it.HasAccess()) {
- isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
+
+MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
+ Isolate* isolate = it->isolate();
+
+ if (it->state() == LookupIterator::ACCESS_CHECK) {
+ if (!it->HasAccess()) {
+ isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
- it.Next();
+ it->Next();
}
+ Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
// Ignore accessors on typed arrays.
- if (it.IsElement() && object->HasFixedTypedArrayElements()) {
- return it.factory()->undefined_value();
+ if (it->IsElement() && object->HasFixedTypedArrayElements()) {
+ return it->factory()->undefined_value();
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
bool is_observed = object->map()->is_observed() &&
- !isolate->IsInternallyUsedPropertyName(name);
+ !isolate->IsInternallyUsedPropertyName(it->GetName());
bool preexists = false;
if (is_observed) {
- CHECK(GetPropertyAttributes(&it).IsJust());
- preexists = it.IsFound();
- if (preexists && (it.state() == LookupIterator::DATA ||
- it.GetAccessors()->IsAccessorInfo())) {
- old_value = GetProperty(&it).ToHandleChecked();
+ CHECK(GetPropertyAttributes(it).IsJust());
+ preexists = it->IsFound();
+ if (preexists && (it->state() == LookupIterator::DATA ||
+ it->GetAccessors()->IsAccessorInfo())) {
+ old_value = GetProperty(it).ToHandleChecked();
}
}
@@ -7006,10 +7776,10 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
// At least one of the accessors needs to be a new value.
DCHECK(!getter->IsNull() || !setter->IsNull());
if (!getter->IsNull()) {
- it.TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
+ it->TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
}
if (!setter->IsNull()) {
- it.TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
+ it->TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
}
if (is_observed) {
@@ -7017,7 +7787,8 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
AssertNoContextChange ncc(isolate);
const char* type = preexists ? "reconfigure" : "add";
RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, type, name, old_value), Object);
+ isolate, EnqueueChangeRecord(object, type, it->GetName(), old_value),
+ Object);
}
return isolate->factory()->undefined_value();
@@ -7145,7 +7916,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
}
return GetHeap()->undefined_value();
- } else if (IsGlobalObject()) {
+ } else if (IsJSGlobalObject()) {
return global_dictionary()->SlowReverseLookup(value);
} else {
return property_dictionary()->SlowReverseLookup(value);
@@ -7267,6 +8038,41 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
}
+Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
+ int in_object_properties,
+ int unused_property_fields) {
+#ifdef DEBUG
+ Object* constructor = map->GetConstructor();
+ DCHECK(constructor->IsJSFunction());
+ DCHECK_EQ(*map, JSFunction::cast(constructor)->initial_map());
+#endif
+ // Initial maps must always own their descriptors and it's descriptor array
+ // does not contain descriptors that do not belong to the map.
+ DCHECK(map->owns_descriptors());
+ DCHECK_EQ(map->NumberOfOwnDescriptors(),
+ map->instance_descriptors()->number_of_descriptors());
+
+ Handle<Map> result = RawCopy(map, instance_size);
+
+ // Please note instance_type and instance_size are set when allocated.
+ result->SetInObjectProperties(in_object_properties);
+ result->set_unused_property_fields(unused_property_fields);
+
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors > 0) {
+ // The copy will use the same descriptors array.
+ result->UpdateDescriptors(map->instance_descriptors(),
+ map->GetLayoutDescriptor());
+ result->SetNumberOfOwnDescriptors(number_of_own_descriptors);
+
+ DCHECK_EQ(result->NumberOfFields(),
+ in_object_properties - unused_property_fields);
+ }
+
+ return result;
+}
+
+
Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
Handle<Map> result = RawCopy(map, map->instance_size());
@@ -7286,8 +8092,8 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
// Sanity check. This path is only to be taken if the map owns its descriptor
// array, implying that its NumberOfOwnDescriptors equals the number of
// descriptors in the descriptor array.
- DCHECK(map->NumberOfOwnDescriptors() ==
- map->instance_descriptors()->number_of_descriptors());
+ DCHECK_EQ(map->NumberOfOwnDescriptors(),
+ map->instance_descriptors()->number_of_descriptors());
Handle<Map> result = CopyDropDescriptors(map);
Handle<Name> name = descriptor->GetKey();
@@ -7352,7 +8158,15 @@ void Map::TraceAllTransitions(Map* map) {
void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
Handle<Name> name, SimpleTransitionFlag flag) {
- parent->set_owns_descriptors(false);
+ if (!parent->GetBackPointer()->IsUndefined()) {
+ parent->set_owns_descriptors(false);
+ } else {
+ // |parent| is initial map and it must keep the ownership, there must be no
+ // descriptors in the descriptors array that do not belong to the map.
+ DCHECK(parent->owns_descriptors());
+ DCHECK_EQ(parent->NumberOfOwnDescriptors(),
+ parent->instance_descriptors()->number_of_descriptors());
+ }
if (parent->is_prototype_map()) {
DCHECK(child->is_prototype_map());
#if TRACE_MAPS
@@ -7606,7 +8420,9 @@ Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
map, new_desc, new_layout_descriptor, INSERT_TRANSITION,
transition_marker, reason, SPECIAL_TRANSITION);
new_map->set_is_extensible(false);
- new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ if (!IsFixedTypedArrayElementsKind(map->elements_kind())) {
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ }
return new_map;
}
@@ -7848,7 +8664,9 @@ Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
// Ensure the key is unique.
descriptor->KeyToUniqueName();
+ // Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
+ !map->GetBackPointer()->IsUndefined() &&
TransitionArray::CanHaveMoreTransitions(map)) {
return ShareDescriptor(map, descriptors, descriptor);
}
@@ -8666,18 +9484,27 @@ void DescriptorArray::Replace(int index, Descriptor* descriptor) {
}
-void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
- FixedArray* new_cache,
- Object* new_index_cache) {
- DCHECK(bridge_storage->length() >= kEnumCacheBridgeLength);
- DCHECK(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
- DCHECK(!IsEmpty());
- DCHECK(!HasEnumCache() || new_cache->length() > GetEnumCache()->length());
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
- set(kEnumCacheIndex, bridge_storage);
+// static
+void DescriptorArray::SetEnumCache(Handle<DescriptorArray> descriptors,
+ Isolate* isolate,
+ Handle<FixedArray> new_cache,
+ Handle<FixedArray> new_index_cache) {
+ DCHECK(!descriptors->IsEmpty());
+ FixedArray* bridge_storage;
+ bool needs_new_enum_cache = !descriptors->HasEnumCache();
+ if (needs_new_enum_cache) {
+ bridge_storage = *isolate->factory()->NewFixedArray(
+ DescriptorArray::kEnumCacheBridgeLength);
+ } else {
+ bridge_storage = FixedArray::cast(descriptors->get(kEnumCacheIndex));
+ }
+ bridge_storage->set(kEnumCacheBridgeCacheIndex, *new_cache);
+ bridge_storage->set(kEnumCacheBridgeIndicesCacheIndex,
+ new_index_cache.is_null() ? Object::cast(Smi::FromInt(0))
+ : *new_index_cache);
+ if (needs_new_enum_cache) {
+ descriptors->set(kEnumCacheIndex, bridge_storage);
+ }
}
@@ -8801,6 +9628,47 @@ Handle<LiteralsArray> LiteralsArray::New(Isolate* isolate,
}
+// static
+Handle<BindingsArray> BindingsArray::New(Isolate* isolate,
+ Handle<TypeFeedbackVector> vector,
+ Handle<JSReceiver> bound_function,
+ Handle<Object> bound_this,
+ int number_of_bindings) {
+ Handle<FixedArray> bindings = isolate->factory()->NewFixedArray(
+ number_of_bindings + kFirstBindingIndex);
+ Handle<BindingsArray> casted_bindings = Handle<BindingsArray>::cast(bindings);
+ casted_bindings->set_feedback_vector(*vector);
+ casted_bindings->set_bound_function(*bound_function);
+ casted_bindings->set_bound_this(*bound_this);
+ return casted_bindings;
+}
+
+
+// static
+Handle<JSArray> BindingsArray::CreateBoundArguments(
+ Handle<BindingsArray> bindings) {
+ int bound_argument_count = bindings->bindings_count();
+ Factory* factory = bindings->GetIsolate()->factory();
+ Handle<FixedArray> arguments = factory->NewFixedArray(bound_argument_count);
+ bindings->CopyTo(kFirstBindingIndex, *arguments, 0, bound_argument_count);
+ return factory->NewJSArrayWithElements(arguments);
+}
+
+
+// static
+Handle<JSArray> BindingsArray::CreateRuntimeBindings(
+ Handle<BindingsArray> bindings) {
+ Factory* factory = bindings->GetIsolate()->factory();
+ // A runtime bindings array consists of
+ // [bound function, bound this, [arg0, arg1, ...]].
+ Handle<FixedArray> runtime_bindings =
+ factory->NewFixedArray(2 + bindings->bindings_count());
+ bindings->CopyTo(kBoundFunctionIndex, *runtime_bindings, 0,
+ 2 + bindings->bindings_count());
+ return factory->NewJSArrayWithElements(runtime_bindings);
+}
+
+
int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out,
CatchPrediction* prediction_out) {
int innermost_handler = -1, innermost_start = -1;
@@ -10118,15 +10986,6 @@ bool Map::EquivalentToForNormalization(Map* other,
}
-void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
- // Iterate over all fields in the body but take care in dealing with
- // the code entry.
- IteratePointers(v, kPropertiesOffset, kCodeEntryOffset);
- v->VisitCodeEntry(this->address() + kCodeEntryOffset);
- IteratePointers(v, kCodeEntryOffset + kPointerSize, object_size);
-}
-
-
bool JSFunction::Inlines(SharedFunctionInfo* candidate) {
DisallowHeapAllocation no_gc;
if (shared() == candidate) return true;
@@ -10193,6 +11052,7 @@ void JSFunction::AttemptConcurrentOptimization() {
void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Code> code) {
Isolate* isolate = shared->GetIsolate();
+ if (isolate->serializer_enabled()) return;
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
Handle<Object> value(shared->optimized_code_map(), isolate);
if (value->IsSmi()) return; // Empty code maps are unsupported.
@@ -10206,6 +11066,7 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<HeapObject> code, Handle<LiteralsArray> literals,
BailoutId osr_ast_id) {
Isolate* isolate = shared->GetIsolate();
+ if (isolate->serializer_enabled()) return;
DCHECK(*code == isolate->heap()->undefined_value() ||
!shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
DCHECK(*code == isolate->heap()->undefined_value() ||
@@ -10236,12 +11097,11 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
// Copy old optimized code map and append one new entry.
new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
old_code_map, kEntryLength, TENURED);
- int old_length = old_code_map->length();
- // Zap the old map to avoid any stale entries. Note that this is required
- // for correctness because entries are being treated weakly by the GC.
- MemsetPointer(old_code_map->data_start(), isolate->heap()->the_hole_value(),
- old_length);
- entry = old_length;
+ // TODO(mstarzinger): Temporary workaround. The allocation above might have
+ // flushed the optimized code map and the copy we created is full of holes.
+ // For now we just give up on adding the entry and pretend it got flushed.
+ if (shared->optimized_code_map()->IsSmi()) return;
+ entry = old_code_map->length();
}
new_code_map->set(entry + kContextOffset, *native_context);
new_code_map->set(entry + kCachedCodeOffset, *code);
@@ -10260,21 +11120,24 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
#endif
+
+ // Zap any old optimized code map.
+ if (!shared->optimized_code_map()->IsSmi()) {
+ FixedArray* old_code_map = FixedArray::cast(shared->optimized_code_map());
+ old_code_map->FillWithHoles(0, old_code_map->length());
+ }
+
shared->set_optimized_code_map(*new_code_map);
}
void SharedFunctionInfo::ClearOptimizedCodeMap() {
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
-
- // If the next map link slot is already used then the function was
- // enqueued with code flushing and we remove it now.
- if (!code_map->get(kNextMapIndex)->IsUndefined()) {
- CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
- flusher->EvictOptimizedCodeMap(this);
+ // Zap any old optimized code map.
+ if (!optimized_code_map()->IsSmi()) {
+ FixedArray* old_code_map = FixedArray::cast(optimized_code_map());
+ old_code_map->FillWithHoles(0, old_code_map->length());
}
- DCHECK(code_map->get(kNextMapIndex)->IsUndefined());
set_optimized_code_map(Smi::FromInt(0));
}
@@ -10378,16 +11241,22 @@ static void ShrinkInstanceSize(Map* map, void* data) {
void JSFunction::CompleteInobjectSlackTracking() {
DCHECK(has_initial_map());
- Map* map = initial_map();
+ initial_map()->CompleteInobjectSlackTracking();
+}
- DCHECK(map->counter() >= Map::kSlackTrackingCounterEnd - 1);
- map->set_counter(Map::kRetainingCounterStart);
- int slack = map->unused_property_fields();
- TransitionArray::TraverseTransitionTree(map, &GetMinInobjectSlack, &slack);
+void Map::CompleteInobjectSlackTracking() {
+ // Has to be an initial map.
+ DCHECK(GetBackPointer()->IsUndefined());
+
+ DCHECK_GE(counter(), kSlackTrackingCounterEnd - 1);
+ set_counter(kRetainingCounterStart);
+
+ int slack = unused_property_fields();
+ TransitionArray::TraverseTransitionTree(this, &GetMinInobjectSlack, &slack);
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
- TransitionArray::TraverseTransitionTree(map, &ShrinkInstanceSize, &slack);
+ TransitionArray::TraverseTransitionTree(this, &ShrinkInstanceSize, &slack);
}
}
@@ -10414,7 +11283,7 @@ static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
// static
void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
PrototypeOptimizationMode mode) {
- if (object->IsGlobalObject()) return;
+ if (object->IsJSGlobalObject()) return;
if (object->IsJSGlobalProxy()) return;
if (mode == FAST_PROTOTYPE && PrototypeBenefitsFromNormalization(object)) {
// First normalize to ensure all JSFunctions are DATA_CONSTANT.
@@ -10478,7 +11347,8 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
}
Handle<Object> maybe_proto = PrototypeIterator::GetCurrent(iter);
if (maybe_proto->IsJSGlobalProxy()) continue;
- // Proxies on the prototype chain are not supported.
+ // Proxies on the prototype chain are not supported. They make it
+ // impossible to make any assumptions about the prototype chain anyway.
if (maybe_proto->IsJSProxy()) return;
Handle<JSObject> proto = Handle<JSObject>::cast(maybe_proto);
Handle<PrototypeInfo> proto_info =
@@ -10769,7 +11639,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
new_map->set_non_instance_prototype(true);
Isolate* isolate = new_map->GetIsolate();
construct_prototype = handle(
- isolate->context()->native_context()->initial_object_prototype(),
+ function->context()->native_context()->initial_object_prototype(),
isolate);
} else {
function->map()->set_non_instance_prototype(false);
@@ -10826,17 +11696,16 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
// First create a new map with the size and number of in-object properties
// suggested by the function.
InstanceType instance_type;
- int instance_size;
- int in_object_properties;
if (function->shared()->is_generator()) {
instance_type = JS_GENERATOR_OBJECT_TYPE;
- instance_size = JSGeneratorObject::kSize;
- in_object_properties = 0;
} else {
instance_type = JS_OBJECT_TYPE;
- instance_size = function->shared()->CalculateInstanceSize();
- in_object_properties = function->shared()->CalculateInObjectProperties();
}
+ int instance_size;
+ int in_object_properties;
+ function->CalculateInstanceSize(instance_type, 0, &instance_size,
+ &in_object_properties);
+
Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size);
if (function->map()->is_strong()) {
map->set_is_strong();
@@ -10863,8 +11732,71 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
}
-void JSFunction::SetInstanceClassName(String* name) {
- shared()->set_instance_class_name(name);
+Handle<Map> JSFunction::EnsureDerivedHasInitialMap(
+ Handle<JSFunction> original_constructor, Handle<JSFunction> constructor) {
+ DCHECK(constructor->has_initial_map());
+ Isolate* isolate = constructor->GetIsolate();
+ Handle<Map> constructor_initial_map(constructor->initial_map(), isolate);
+ if (*original_constructor == *constructor) return constructor_initial_map;
+ if (original_constructor->has_initial_map()) {
+ // Check that |original_constructor|'s initial map still in sync with
+ // the |constructor|, otherwise we must create a new initial map for
+ // |original_constructor|.
+ if (original_constructor->initial_map()->GetConstructor() == *constructor) {
+ return handle(original_constructor->initial_map(), isolate);
+ }
+ }
+
+ // First create a new map with the size and number of in-object properties
+ // suggested by the function.
+ DCHECK(!original_constructor->shared()->is_generator());
+ DCHECK(!constructor->shared()->is_generator());
+
+ // Fetch or allocate prototype.
+ Handle<Object> prototype;
+ if (original_constructor->has_instance_prototype()) {
+ prototype = handle(original_constructor->instance_prototype(), isolate);
+ } else {
+ prototype = isolate->factory()->NewFunctionPrototype(original_constructor);
+ }
+
+ // Finally link initial map and constructor function if the original
+ // constructor is actually a subclass constructor.
+ if (IsSubclassConstructor(original_constructor->shared()->kind())) {
+// TODO(ishell): v8:4531, allow ES6 built-ins subclasses to have
+// in-object properties.
+#if 0
+ InstanceType instance_type = constructor_initial_map->instance_type();
+ int internal_fields =
+ JSObject::GetInternalFieldCount(*constructor_initial_map);
+ int pre_allocated = constructor_initial_map->GetInObjectProperties() -
+ constructor_initial_map->unused_property_fields();
+ int instance_size;
+ int in_object_properties;
+ original_constructor->CalculateInstanceSizeForDerivedClass(
+ instance_type, internal_fields, &instance_size, &in_object_properties);
+
+ int unused_property_fields = in_object_properties - pre_allocated;
+ Handle<Map> map =
+ Map::CopyInitialMap(constructor_initial_map, instance_size,
+ in_object_properties, unused_property_fields);
+#endif
+ Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
+
+ JSFunction::SetInitialMap(original_constructor, map, prototype);
+ map->SetConstructor(*constructor);
+ original_constructor->StartInobjectSlackTracking();
+ return map;
+
+ } else {
+ Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
+ DCHECK(prototype->IsJSReceiver());
+ if (map->prototype() != *prototype) {
+ Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+ }
+ map->SetConstructor(*constructor);
+ return map;
+ }
}
@@ -11031,11 +11963,11 @@ Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
Handle<Object> property = Object::GetProperty(
script_wrapper, name_or_source_url_key).ToHandleChecked();
DCHECK(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
Handle<Object> result;
// Do not check against pending exception, since this function may be called
// when an exception has already been pending.
- if (!Execution::TryCall(method, script_wrapper, 0, NULL).ToHandle(&result)) {
+ if (!Execution::TryCall(isolate, property, script_wrapper, 0, NULL)
+ .ToHandle(&result)) {
return isolate->factory()->undefined_value();
}
return result;
@@ -11090,9 +12022,8 @@ Script* Script::Iterator::Next() { return iterator_.Next<Script>(); }
SharedFunctionInfo::Iterator::Iterator(Isolate* isolate)
- : script_iterator_(isolate), sfi_iterator_(NULL) {
- NextScript();
-}
+ : script_iterator_(isolate),
+ sfi_iterator_(isolate->heap()->noscript_shared_function_infos()) {}
bool SharedFunctionInfo::Iterator::NextScript() {
@@ -11115,6 +12046,38 @@ SharedFunctionInfo* SharedFunctionInfo::Iterator::Next() {
void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
Handle<Object> script_object) {
if (shared->script() == *script_object) return;
+ Isolate* isolate = shared->GetIsolate();
+
+ // Add shared function info to new script's list. If a collection occurs,
+ // the shared function info may be temporarily in two lists.
+ // This is okay because the gc-time processing of these lists can tolerate
+ // duplicates.
+ Handle<Object> list;
+ if (script_object->IsScript()) {
+ Handle<Script> script = Handle<Script>::cast(script_object);
+ list = handle(script->shared_function_infos(), isolate);
+ } else {
+ list = isolate->factory()->noscript_shared_function_infos();
+ }
+
+#ifdef DEBUG
+ {
+ WeakFixedArray::Iterator iterator(*list);
+ SharedFunctionInfo* next;
+ while ((next = iterator.Next<SharedFunctionInfo>())) {
+ DCHECK_NE(next, *shared);
+ }
+ }
+#endif // DEBUG
+ list = WeakFixedArray::Add(list, shared);
+
+ if (script_object->IsScript()) {
+ Handle<Script> script = Handle<Script>::cast(script_object);
+ script->set_shared_function_infos(*list);
+ } else {
+ isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
+ }
+
// Remove shared function info from old script's list.
if (shared->script()->IsScript()) {
Script* old_script = Script::cast(shared->script());
@@ -11123,23 +12086,12 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
WeakFixedArray::cast(old_script->shared_function_infos());
list->Remove(shared);
}
+ } else {
+ // Remove shared function info from root array.
+ Object* list = isolate->heap()->noscript_shared_function_infos();
+ CHECK(WeakFixedArray::cast(list)->Remove(shared));
}
- // Add shared function info to new script's list.
- if (script_object->IsScript()) {
- Handle<Script> script = Handle<Script>::cast(script_object);
- Handle<Object> list(script->shared_function_infos(), shared->GetIsolate());
-#ifdef DEBUG
- {
- WeakFixedArray::Iterator iterator(*list);
- SharedFunctionInfo* next;
- while ((next = iterator.Next<SharedFunctionInfo>())) {
- DCHECK_NE(next, *shared);
- }
- }
-#endif // DEBUG
- list = WeakFixedArray::Add(list, shared);
- script->set_shared_function_infos(*list);
- }
+
// Finally set new script.
shared->set_script(*script_object);
}
@@ -11178,19 +12130,56 @@ int SharedFunctionInfo::SourceSize() {
}
-int SharedFunctionInfo::CalculateInstanceSize() {
- int instance_size =
- JSObject::kHeaderSize +
- expected_nof_properties() * kPointerSize;
- if (instance_size > JSObject::kMaxInstanceSize) {
- instance_size = JSObject::kMaxInstanceSize;
- }
- return instance_size;
+namespace {
+
+void CalculateInstanceSizeHelper(InstanceType instance_type,
+ int requested_internal_fields,
+ int requested_in_object_properties,
+ int* instance_size,
+ int* in_object_properties) {
+ int header_size = JSObject::GetHeaderSize(instance_type);
+ DCHECK_LE(requested_internal_fields,
+ (JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2);
+ *instance_size =
+ Min(header_size +
+ ((requested_internal_fields + requested_in_object_properties)
+ << kPointerSizeLog2),
+ JSObject::kMaxInstanceSize);
+ *in_object_properties = ((*instance_size - header_size) >> kPointerSizeLog2) -
+ requested_internal_fields;
}
+} // namespace
-int SharedFunctionInfo::CalculateInObjectProperties() {
- return (CalculateInstanceSize() - JSObject::kHeaderSize) / kPointerSize;
+
+void JSFunction::CalculateInstanceSize(InstanceType instance_type,
+ int requested_internal_fields,
+ int* instance_size,
+ int* in_object_properties) {
+ CalculateInstanceSizeHelper(instance_type, requested_internal_fields,
+ shared()->expected_nof_properties(),
+ instance_size, in_object_properties);
+}
+
+
+void JSFunction::CalculateInstanceSizeForDerivedClass(
+ InstanceType instance_type, int requested_internal_fields,
+ int* instance_size, int* in_object_properties) {
+ Isolate* isolate = GetIsolate();
+ int expected_nof_properties = 0;
+ for (PrototypeIterator iter(isolate, this,
+ PrototypeIterator::START_AT_RECEIVER);
+ !iter.IsAtEnd(); iter.Advance()) {
+ JSFunction* func = iter.GetCurrent<JSFunction>();
+ SharedFunctionInfo* shared = func->shared();
+ expected_nof_properties += shared->expected_nof_properties();
+ if (!IsSubclassConstructor(shared->kind())) {
+ break;
+ }
+ }
+ CalculateInstanceSizeHelper(instance_type, requested_internal_fields,
+ expected_nof_properties, instance_size,
+ in_object_properties);
}
@@ -11812,13 +12801,11 @@ void Code::ClearInlineCaches(Code::Kind* kind) {
void SharedFunctionInfo::ClearTypeFeedbackInfo() {
feedback_vector()->ClearSlots(this);
- feedback_vector()->ClearICSlots(this);
}
void SharedFunctionInfo::ClearTypeFeedbackInfoAtGCTime() {
feedback_vector()->ClearSlotsAtGCTime(this);
- feedback_vector()->ClearICSlotsAtGCTime(this);
}
@@ -12194,7 +13181,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
case Translation::DOUBLE_REGISTER: {
int reg_code = iterator.Next();
- os << "{input=" << DoubleRegister::AllocationIndexToString(reg_code)
+ os << "{input=" << DoubleRegister::from_code(reg_code).ToString()
<< "}";
break;
}
@@ -12311,8 +13298,6 @@ const char* Code::ICState2String(InlineCacheState state) {
case MEGAMORPHIC: return "MEGAMORPHIC";
case GENERIC: return "GENERIC";
case DEBUG_STUB: return "DEBUG_STUB";
- case DEFAULT:
- return "DEFAULT";
}
UNREACHABLE();
return NULL;
@@ -12364,6 +13349,11 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
if ((name != NULL) && (name[0] != '\0')) {
os << "name = " << name << "\n";
+ } else if (kind() == BUILTIN) {
+ name = GetIsolate()->builtins()->Lookup(instruction_start());
+ if (name != NULL) {
+ os << "name = " << name << "\n";
+ }
}
if (kind() == OPTIMIZED_FUNCTION) {
os << "stack_slots = " << stack_slots() << "\n";
@@ -12504,6 +13494,16 @@ void BytecodeArray::Disassemble(std::ostream& os) {
SNPrintF(buf, "%p", bytecode_start);
os << buf.start() << " : ";
interpreter::Bytecodes::Decode(os, bytecode_start, parameter_count());
+ if (interpreter::Bytecodes::IsJump(bytecode)) {
+ int offset = static_cast<int8_t>(bytecode_start[1]);
+ SNPrintF(buf, " (%p)", bytecode_start + offset);
+ os << buf.start();
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
+ int index = static_cast<int>(bytecode_start[1]);
+ int offset = Smi::cast(constant_pool()->get(index))->value();
+ SNPrintF(buf, " (%p)", bytecode_start + offset);
+ os << buf.start();
+ }
os << "\n";
}
@@ -12929,25 +13929,99 @@ Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
}
-MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
- Handle<Object> value,
- bool from_javascript) {
+Maybe<bool> JSReceiver::SetPrototype(Handle<JSReceiver> object,
+ Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw) {
+ if (!object->IsJSObject()) return Just(false);
+ // TODO(neis): Deal with proxies.
+ return JSObject::SetPrototype(Handle<JSObject>::cast(object), value,
+ from_javascript, should_throw);
+}
+
+
+Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
+ Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw) {
+ Isolate* isolate = object->GetIsolate();
+
+ const bool observed = from_javascript && object->map()->is_observed();
+ Handle<Object> old_value;
+ if (observed) {
+ old_value = Object::GetPrototype(isolate, object);
+ }
+
+ Maybe<bool> result =
+ SetPrototypeUnobserved(object, value, from_javascript, should_throw);
+ MAYBE_RETURN(result, Nothing<bool>());
+
+ if (result.FromJust() && observed) {
+ Handle<Object> new_value = Object::GetPrototype(isolate, object);
+ if (!new_value->SameValue(*old_value)) {
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, JSObject::EnqueueChangeRecord(
+ object, "setPrototype",
+ isolate->factory()->proto_string(), old_value),
+ Nothing<bool>());
+ }
+ }
+
+ return result;
+}
+
+
+Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
+ Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw) {
#ifdef DEBUG
int size = object->Size();
#endif
Isolate* isolate = object->GetIsolate();
+
+ if (from_javascript) {
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK(!object->IsAccessCheckNeeded());
+ }
+
// Strong objects may not have their prototype set via __proto__ or
// setPrototypeOf.
if (from_javascript && object->map()->is_strong()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongSetProto, object),
- Object);
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrongSetProto, object));
}
Heap* heap = isolate->heap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
- if (!value->IsJSReceiver() && !value->IsNull()) return value;
+ if (!value->IsJSReceiver() && !value->IsNull()) return Just(true);
+
+ bool dictionary_elements_in_chain =
+ object->map()->DictionaryElementsInPrototypeChainOnly();
+
+ bool all_extensible = object->map()->is_extensible();
+ Handle<JSObject> real_receiver = object;
+ if (from_javascript) {
+ // Find the first object in the chain whose prototype object is not
+ // hidden.
+ PrototypeIterator iter(isolate, real_receiver);
+ while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+ // Casting to JSObject is fine because hidden prototypes are never
+ // JSProxies.
+ real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
+ iter.Advance();
+ all_extensible = all_extensible && real_receiver->map()->is_extensible();
+ }
+ }
+ Handle<Map> map(real_receiver->map());
+
+ // Nothing to do if prototype is already set.
+ if (map->prototype() == *value) return Just(true);
// From 8.6.2 Object Internal Methods
// ...
@@ -12957,50 +14031,25 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
// Implementation specific extensions that modify [[Class]], [[Prototype]]
// or [[Extensible]] must not violate the invariants defined in the preceding
// paragraph.
- if (!object->map()->is_extensible()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kNonExtensibleProto, object),
- Object);
+ if (!all_extensible) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNonExtensibleProto, object));
}
- // Before we can set the prototype we need to be sure
- // prototype cycles are prevented.
- // It is sufficient to validate that the receiver is not in the new prototype
- // chain.
+ // Before we can set the prototype we need to be sure prototype cycles are
+ // prevented. It is sufficient to validate that the receiver is not in the
+ // new prototype chain.
for (PrototypeIterator iter(isolate, *value,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
if (iter.GetCurrent<JSReceiver>() == *object) {
// Cycle detected.
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kCyclicProto),
- Object);
- }
- }
-
- bool dictionary_elements_in_chain =
- object->map()->DictionaryElementsInPrototypeChainOnly();
- Handle<JSObject> real_receiver = object;
-
- if (from_javascript) {
- // Find the first object in the chain whose prototype object is not
- // hidden and set the new prototype on that object.
- PrototypeIterator iter(isolate, real_receiver);
- while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
- real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
- iter.Advance();
- if (!real_receiver->map()->is_extensible()) {
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kNonExtensibleProto, object),
- Object);
- }
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kCyclicProto));
}
}
// Set the new prototype of the object.
- Handle<Map> map(real_receiver->map());
-
- // Nothing to do if prototype is already set.
- if (map->prototype() == *value) return value;
isolate->UpdateArrayProtectorOnSetPrototype(real_receiver);
@@ -13020,7 +14069,7 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
heap->ClearInstanceofCache();
DCHECK(size == object->Size());
- return value;
+ return Just(true);
}
@@ -13145,6 +14194,17 @@ MaybeHandle<Object> JSObject::AddDataElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes) {
+ MAYBE_RETURN_NULL(
+ AddDataElement(object, index, value, attributes, THROW_ON_ERROR));
+ return value;
+}
+
+
+// static
+Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ ShouldThrow should_throw) {
DCHECK(object->map()->is_extensible());
Isolate* isolate = object->GetIsolate();
@@ -13205,30 +14265,33 @@ MaybeHandle<Object> JSObject::AddDataElement(Handle<JSObject> object,
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<String> name = isolate->factory()->Uint32ToString(index);
- RETURN_ON_EXCEPTION(isolate, BeginPerformSplice(array), Object);
- RETURN_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION_VALUE(isolate, BeginPerformSplice(array),
+ Nothing<bool>());
+ RETURN_ON_EXCEPTION_VALUE(
isolate, EnqueueChangeRecord(array, "add", name,
isolate->factory()->the_hole_value()),
- Object);
- RETURN_ON_EXCEPTION(isolate,
- EnqueueChangeRecord(array, "update",
- isolate->factory()->length_string(),
- old_length_handle),
- Object);
- RETURN_ON_EXCEPTION(isolate, EndPerformSplice(array), Object);
+ Nothing<bool>());
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, EnqueueChangeRecord(array, "update",
+ isolate->factory()->length_string(),
+ old_length_handle),
+ Nothing<bool>());
+ RETURN_ON_EXCEPTION_VALUE(isolate, EndPerformSplice(array),
+ Nothing<bool>());
Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
- RETURN_ON_EXCEPTION(isolate, EnqueueSpliceRecord(array, old_length, deleted,
- new_length - old_length),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(isolate,
+ EnqueueSpliceRecord(array, old_length, deleted,
+ new_length - old_length),
+ Nothing<bool>());
} else if (object->map()->is_observed()) {
Handle<String> name = isolate->factory()->Uint32ToString(index);
- RETURN_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION_VALUE(
isolate, EnqueueChangeRecord(object, "add", name,
isolate->factory()->the_hole_value()),
- Object);
+ Nothing<bool>());
}
- return value;
+ return Just(true);
}
@@ -13431,16 +14494,6 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
}
-MaybeHandle<Object> JSArray::ReadOnlyLengthError(Handle<JSArray> array) {
- Isolate* isolate = array->GetIsolate();
- Handle<Name> length = isolate->factory()->length_string();
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kStrictReadOnlyProperty, length, array),
- Object);
-}
-
-
template <typename BackingStore>
static int FastHoleyElementsUsage(JSObject* object, BackingStore* store) {
int limit = object->IsJSArray()
@@ -13497,14 +14550,13 @@ void Dictionary<Derived, Shape, Key>::Print(std::ostream& os) { // NOLINT
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
if (this->IsKey(k)) {
- os << " ";
+ os << "\n ";
if (k->IsString()) {
String::cast(k)->StringPrint(os);
} else {
os << Brief(k);
}
- os << ": " << Brief(this->ValueAt(i)) << " " << this->DetailsAt(i)
- << "\n";
+ os << ": " << Brief(this->ValueAt(i)) << " " << this->DetailsAt(i);
}
}
}
@@ -13648,9 +14700,7 @@ Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
Handle<Name> name) {
LookupIterator it = LookupIterator::PropertyOrElement(
name->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
- if (!maybe_result.IsJust()) return Nothing<bool>();
- return Just(it.IsFound());
+ return HasProperty(&it);
}
@@ -13659,9 +14709,7 @@ Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
LookupIterator it(isolate, object, index,
LookupIterator::OWN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
- if (!maybe_result.IsJust()) return Nothing<bool>();
- return Just(it.IsFound());
+ return HasProperty(&it);
}
@@ -13679,12 +14727,14 @@ int JSObject::NumberOfOwnProperties(PropertyAttributes filter) {
if (HasFastProperties()) {
Map* map = this->map();
if (filter == NONE) return map->NumberOfOwnDescriptors();
- if (filter & DONT_ENUM) {
+ if (filter == DONT_SHOW) {
+ // The cached enum length was computed with filter == DONT_SHOW, so
+ // that's the only filter for which it's valid to retrieve it.
int result = map->EnumLength();
if (result != kInvalidEnumCacheSentinel) return result;
}
return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
- } else if (IsGlobalObject()) {
+ } else if (IsJSGlobalObject()) {
return global_dictionary()->NumberOfElementsFilterAttributes(filter);
} else {
return property_dictionary()->NumberOfElementsFilterAttributes(filter);
@@ -13817,12 +14867,12 @@ int JSObject::GetOwnPropertyNames(FixedArray* storage, int index,
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < real_size; i++) {
if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- !FilterKey(descs->GetKey(i), filter)) {
+ !descs->GetKey(i)->FilterKey(filter)) {
storage->set(index++, descs->GetKey(i));
}
}
return index - start_index;
- } else if (IsGlobalObject()) {
+ } else if (IsJSGlobalObject()) {
return global_dictionary()->CopyKeysTo(storage, index, filter,
GlobalDictionary::UNSORTED);
} else {
@@ -13832,31 +14882,76 @@ int JSObject::GetOwnPropertyNames(FixedArray* storage, int index,
}
-int JSObject::NumberOfOwnElements(PropertyAttributes filter) {
- return GetOwnElementKeys(NULL, filter);
+int JSObject::CollectOwnPropertyNames(KeyAccumulator* keys,
+ PropertyAttributes filter) {
+ if (HasFastProperties()) {
+ int nof_keys = keys->length();
+ int real_size = map()->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descs(map()->instance_descriptors());
+ for (int i = 0; i < real_size; i++) {
+ if ((descs->GetDetails(i).attributes() & filter) != 0) continue;
+ Name* key = descs->GetKey(i);
+ if (key->FilterKey(filter)) continue;
+ keys->AddKey(key);
+ }
+ return nof_keys - keys->length();
+ } else if (IsJSGlobalObject()) {
+ return global_dictionary()->CollectKeysTo(keys, filter);
+ } else {
+ return property_dictionary()->CollectKeysTo(keys, filter);
+ }
}
-int JSObject::NumberOfEnumElements() {
+int JSObject::NumberOfOwnElements(PropertyAttributes filter) {
// Fast case for objects with no elements.
- if (!IsJSValue() && HasFastObjectElements()) {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if (!IsJSValue() && HasFastElements()) {
+ uint32_t length =
+ IsJSArray()
+ ? static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value())
+ : static_cast<uint32_t>(FixedArrayBase::cast(elements())->length());
if (length == 0) return 0;
}
// Compute the number of enumerable elements.
+ return GetOwnElementKeys(NULL, filter);
+}
+
+
+int JSObject::NumberOfEnumElements() {
return NumberOfOwnElements(static_cast<PropertyAttributes>(DONT_ENUM));
}
+void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
+ KeyAccumulator* keys,
+ PropertyAttributes filter) {
+ uint32_t string_keys = 0;
+
+ // If this is a String wrapper, add the string indices first,
+ // as they're guaranteed to precede the elements in numerical order
+ // and ascending order is required by ECMA-262, 6th, 9.1.12.
+ if (object->IsJSValue()) {
+ Object* val = JSValue::cast(*object)->value();
+ if (val->IsString()) {
+ String* str = String::cast(val);
+ string_keys = str->length();
+ for (uint32_t i = 0; i < string_keys; i++) {
+ keys->AddKey(i);
+ }
+ }
+ }
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->CollectElementIndices(object, keys, kMaxUInt32, filter, 0);
+}
+
+
int JSObject::GetOwnElementKeys(FixedArray* storage,
PropertyAttributes filter) {
int counter = 0;
// If this is a String wrapper, add the string indices first,
- // as they're guaranteed to preced the elements in numerical order
+ // as they're guaranteed to precede the elements in numerical order
// and ascending order is required by ECMA-262, 6th, 9.1.12.
if (IsJSValue()) {
Object* val = JSValue::cast(this)->value();
@@ -13981,11 +15076,6 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
}
-int JSObject::GetEnumElementKeys(FixedArray* storage) {
- return GetOwnElementKeys(storage, static_cast<PropertyAttributes>(DONT_ENUM));
-}
-
-
const char* Symbol::PrivateSymbolToName() const {
Heap* heap = GetIsolate()->heap();
#define SYMBOL_CHECK_AND_PRINT(name) \
@@ -14860,8 +15950,8 @@ size_t JSTypedArray::element_size() {
}
-void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
- Handle<Name> name) {
+void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
+ Handle<Name> name) {
DCHECK(!global->HasFastProperties());
auto dictionary = handle(global->global_dictionary());
int entry = dictionary->FindEntry(name);
@@ -14871,8 +15961,8 @@ void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
// TODO(ishell): rename to EnsureEmptyPropertyCell or something.
-Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
- Handle<GlobalObject> global, Handle<Name> name) {
+Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
+ Handle<JSGlobalObject> global, Handle<Name> name) {
DCHECK(!global->HasFastProperties());
auto dictionary = handle(global->global_dictionary());
int entry = dictionary->FindEntry(name);
@@ -15509,7 +16599,7 @@ int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
int result = 0;
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !FilterKey(k, filter)) {
+ if (this->IsKey(k) && !k->FilterKey(filter)) {
if (this->IsDeleted(i)) continue;
PropertyDetails details = this->DetailsAt(i);
PropertyAttributes attr = details.attributes();
@@ -15525,7 +16615,7 @@ bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !FilterKey(k, NONE)) {
+ if (this->IsKey(k) && !k->FilterKey(NONE)) {
if (this->IsDeleted(i)) continue;
PropertyDetails details = this->DetailsAt(i);
if (details.type() == ACCESSOR_CONSTANT) return true;
@@ -15584,12 +16674,12 @@ int Dictionary<Derived, Shape, Key>::CopyKeysTo(
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !FilterKey(k, filter)) {
- if (this->IsDeleted(i)) continue;
- PropertyDetails details = this->DetailsAt(i);
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) == 0) storage->set(index++, k);
- }
+ if (!this->IsKey(k) || k->FilterKey(filter)) continue;
+ if (this->IsDeleted(i)) continue;
+ PropertyDetails details = this->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) continue;
+ storage->set(index++, k);
}
if (sort_mode == Dictionary::SORTED) {
storage->SortPairs(storage, index);
@@ -15599,6 +16689,24 @@ int Dictionary<Derived, Shape, Key>::CopyKeysTo(
}
+template <typename Derived, typename Shape, typename Key>
+int Dictionary<Derived, Shape, Key>::CollectKeysTo(KeyAccumulator* keys,
+ PropertyAttributes filter) {
+ int capacity = this->Capacity();
+ int keyLength = keys->length();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = this->KeyAt(i);
+ if (!this->IsKey(k) || k->FilterKey(filter)) continue;
+ if (this->IsDeleted(i)) continue;
+ PropertyDetails details = this->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) continue;
+ keys->AddKey(k);
+ }
+ return keyLength - keys->length();
+}
+
+
// Backwards lookup (slow).
template<typename Derived, typename Shape, typename Key>
Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) {
@@ -16106,7 +17214,6 @@ void JSMap::Clear(Handle<JSMap> map) {
void JSWeakCollection::Initialize(Handle<JSWeakCollection> weak_collection,
Isolate* isolate) {
- DCHECK_EQ(0, weak_collection->map()->GetInObjectProperties());
Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
weak_collection->set_table(*table);
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 225a7db42e..93f57333a1 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -62,9 +62,7 @@
// - JSFunction
// - JSGeneratorObject
// - JSModule
-// - GlobalObject
-// - JSGlobalObject
-// - JSBuiltinsObject
+// - JSGlobalObject
// - JSGlobalProxy
// - JSValue
// - JSDate
@@ -77,6 +75,7 @@
// - FixedArray
// - DescriptorArray
// - LiteralsArray
+// - BindingsArray
// - HashTable
// - Dictionary
// - StringTable
@@ -87,6 +86,7 @@
// - OrderedHashSet
// - OrderedHashMap
// - Context
+// - TypeFeedbackMetadata
// - TypeFeedbackVector
// - ScopeInfo
// - TransitionArray
@@ -421,7 +421,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_GENERATOR_OBJECT_TYPE) \
V(JS_MODULE_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_BUILTINS_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_ARRAY_TYPE) \
V(JS_ARRAY_BUFFER_TYPE) \
@@ -721,7 +720,6 @@ enum InstanceType {
JS_GENERATOR_OBJECT_TYPE,
JS_MODULE_TYPE,
JS_GLOBAL_OBJECT_TYPE,
- JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
JS_ARRAY_BUFFER_TYPE,
@@ -850,14 +848,15 @@ class ConsString;
class ElementsAccessor;
class FixedArrayBase;
class FunctionLiteral;
-class GlobalObject;
-class JSBuiltinsObject;
+class JSGlobalObject;
+class KeyAccumulator;
class LayoutDescriptor;
class LiteralsArray;
class LookupIterator;
class ObjectHashTable;
class ObjectVisitor;
class PropertyCell;
+class PropertyDescriptor;
class SafepointEntry;
class SharedFunctionInfo;
class StringStream;
@@ -941,8 +940,10 @@ template <class C> inline bool Is(Object* obj);
V(LayoutDescriptor) \
V(Map) \
V(DescriptorArray) \
+ V(BindingsArray) \
V(TransitionArray) \
V(LiteralsArray) \
+ V(TypeFeedbackMetadata) \
V(TypeFeedbackVector) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
@@ -991,9 +992,7 @@ template <class C> inline bool Is(Object* obj);
V(PolymorphicCodeCacheHashTable) \
V(MapCache) \
V(Primitive) \
- V(GlobalObject) \
V(JSGlobalObject) \
- V(JSBuiltinsObject) \
V(JSGlobalProxy) \
V(UndetectableObject) \
V(AccessCheckNeeded) \
@@ -1027,6 +1026,25 @@ class Object {
CERTAINLY_NOT_STORE_FROM_KEYED
};
+ enum ShouldThrow { THROW_ON_ERROR, DONT_THROW };
+
+#define RETURN_FAILURE(isolate, should_throw, call) \
+ do { \
+ if ((should_throw) == DONT_THROW) { \
+ return Just(false); \
+ } else { \
+ isolate->Throw(*isolate->factory()->call); \
+ return Nothing<bool>(); \
+ } \
+ } while (false)
+
+#define MAYBE_RETURN(call, value) \
+ do { \
+ if ((call).IsNothing()) return value; \
+ } while (false)
+
+#define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
+
INLINE(bool IsFixedArrayBase() const);
INLINE(bool IsExternal() const);
INLINE(bool IsAccessorInfo() const);
@@ -1084,6 +1102,8 @@ class Object {
// 1 all refer to the same property, so this helper will return true.
inline bool KeyEquals(Object* other);
+ inline bool FilterKey(PropertyAttributes filter);
+
Handle<HeapType> OptimalType(Isolate* isolate, Representation representation);
inline static Handle<Object> NewStorageFor(Isolate* isolate,
@@ -1211,17 +1231,23 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
LookupIterator* it, LanguageMode language_mode = SLOPPY);
- // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
+ // ES6 [[Set]] (when passed DONT_THROW)
+ // Invariants for this and related functions (unless stated otherwise):
+ // 1) When the result is Nothing, an exception is pending.
+ // 2) When passed THROW_ON_ERROR, the result is never Just(false).
+ // In some cases, an exception is thrown regardless of the ShouldThrow
+ // argument. These cases are either in accordance with the spec or not
+ // covered by it (eg., concerning API callbacks).
+ MUST_USE_RESULT static Maybe<bool> SetProperty(LookupIterator* it,
+ Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode);
MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
- StoreFromKeyed store_mode);
-
- MUST_USE_RESULT static MaybeHandle<Object> SetSuperProperty(
+ MUST_USE_RESULT static Maybe<bool> SetSuperProperty(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode);
@@ -1230,22 +1256,28 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
+ MUST_USE_RESULT static Maybe<bool> CannotCreateProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
- Handle<Object> value, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> RedefineNonconfigurableProperty(
+ Handle<Object> value, ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> WriteToReadOnlyProperty(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> WriteToReadOnlyProperty(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
+ Handle<Object> value, ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> RedefineIncompatibleProperty(
Isolate* isolate, Handle<Object> name, Handle<Object> value,
- LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetDataProperty(
- LookupIterator* it, Handle<Object> value);
- MUST_USE_RESULT static MaybeHandle<Object> AddDataProperty(
+ ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> SetDataProperty(LookupIterator* it,
+ Handle<Object> value);
+ MUST_USE_RESULT static Maybe<bool> AddDataProperty(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- LanguageMode language_mode, StoreFromKeyed store_mode);
+ ShouldThrow should_throw, StoreFromKeyed store_mode);
MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Handle<Object> object, Handle<Name> name,
LanguageMode language_mode = SLOPPY);
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
+ Handle<JSReceiver> holder, Handle<Name> name, Handle<Object> receiver,
+ LanguageMode language_mode = SLOPPY);
MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<Object> object, const char* key,
LanguageMode language_mode = SLOPPY);
@@ -1255,16 +1287,15 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
LookupIterator* it, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithAccessor(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode);
+ MUST_USE_RESULT static Maybe<bool> SetPropertyWithAccessor(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
Handle<Object> receiver,
Handle<JSReceiver> getter);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithDefinedSetter(
- Handle<Object> receiver,
- Handle<JSReceiver> setter,
- Handle<Object> value);
+ MUST_USE_RESULT static Maybe<bool> SetPropertyWithDefinedSetter(
+ Handle<Object> receiver, Handle<JSReceiver> setter, Handle<Object> value,
+ ShouldThrow should_throw);
MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
Isolate* isolate, Handle<Object> object, uint32_t index,
@@ -1274,8 +1305,9 @@ class Object {
Isolate* isolate, Handle<Object> object, uint32_t index,
Handle<Object> value, LanguageMode language_mode);
- static inline Handle<Object> GetPrototypeSkipHiddenPrototypes(
- Isolate* isolate, Handle<Object> receiver);
+ // Get the first non-hidden prototype.
+ static inline Handle<Object> GetPrototype(Isolate* isolate,
+ Handle<Object> receiver);
bool HasInPrototypeChain(Isolate* isolate, Object* object);
@@ -1357,7 +1389,8 @@ class Object {
Map* GetRootMap(Isolate* isolate);
// Helper for SetProperty and SetSuperProperty.
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyInternal(
+ // Return value is only meaningful if [found] is set to true on return.
+ MUST_USE_RESULT static Maybe<bool> SetPropertyInternal(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode, bool* found);
@@ -1604,46 +1637,78 @@ class HeapObject: public Object {
};
+// This is the base class for object's body descriptors.
+class BodyDescriptorBase {
+ protected:
+ static inline void IterateBodyImpl(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v);
+
+ template <typename StaticVisitor>
+ static inline void IterateBodyImpl(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset);
+
+ static inline void IteratePointers(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v);
+
+ template <typename StaticVisitor>
+ static inline void IteratePointers(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset);
+};
+
+
// This class describes a body of an object of a fixed size
// in which all pointer fields are located in the [start_offset, end_offset)
// interval.
-template<int start_offset, int end_offset, int size>
-class FixedBodyDescriptor {
+template <int start_offset, int end_offset, int size>
+class FixedBodyDescriptor : public BodyDescriptorBase {
public:
static const int kStartOffset = start_offset;
static const int kEndOffset = end_offset;
static const int kSize = size;
- static inline void IterateBody(HeapObject* obj, ObjectVisitor* v);
+ static inline void IterateBody(HeapObject* obj, ObjectVisitor* v) {
+ IterateBodyImpl(obj, start_offset, end_offset, v);
+ }
- template<typename StaticVisitor>
+ template <typename StaticVisitor>
static inline void IterateBody(HeapObject* obj) {
- StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, end_offset));
+ Heap* heap = obj->GetHeap();
+ IterateBodyImpl<StaticVisitor>(heap, obj, start_offset, end_offset);
}
};
-// This class describes a body of an object of a variable size
+// This base class describes a body of an object of a variable size
// in which all pointer fields are located in the [start_offset, object_size)
// interval.
-template<int start_offset>
-class FlexibleBodyDescriptor {
+template <int start_offset>
+class FlexibleBodyDescriptorBase : public BodyDescriptorBase {
public:
static const int kStartOffset = start_offset;
- static inline void IterateBody(HeapObject* obj,
- int object_size,
- ObjectVisitor* v);
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IterateBodyImpl(obj, start_offset, object_size, v);
+ }
- template<typename StaticVisitor>
+ template <typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
- StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, object_size));
+ Heap* heap = obj->GetHeap();
+ IterateBodyImpl<StaticVisitor>(heap, obj, start_offset, object_size);
}
};
+// This class describes a body of an object of a variable size
+// in which all pointer fields are located in the [start_offset, object_size)
+// interval. The size of the object is taken from the map.
+template <int start_offset>
+class FlexibleBodyDescriptor : public FlexibleBodyDescriptorBase<start_offset> {
+ public:
+ static inline int SizeOf(Map* map, HeapObject* object);
+};
+
+
// The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer)
class HeapNumber: public HeapObject {
@@ -1775,6 +1840,12 @@ enum AccessorComponent {
};
+enum KeyFilter { SKIP_SYMBOLS, INCLUDE_SYMBOLS };
+
+
+enum GetKeysConversion { KEEP_NUMBERS, CONVERT_TO_STRING };
+
+
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
@@ -1789,6 +1860,7 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
+ MUST_USE_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
MUST_USE_RESULT static inline Maybe<bool> HasProperty(
Handle<JSReceiver> object, Handle<Name> name);
MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(Handle<JSReceiver>,
@@ -1811,6 +1883,40 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> object, uint32_t index,
LanguageMode language_mode = SLOPPY);
+ MUST_USE_RESULT static Object* DefineProperty(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> name,
+ Handle<Object> attributes);
+ MUST_USE_RESULT static Object* DefineProperties(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> properties);
+
+ // "virtual" dispatcher to the correct [[DefineOwnProperty]] implementation.
+ static bool DefineOwnProperty(Isolate* isolate, Handle<JSReceiver> object,
+ Handle<Object> key, PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+
+ static bool OrdinaryDefineOwnProperty(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+ static bool OrdinaryDefineOwnProperty(LookupIterator* it,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+
+ static bool GetOwnPropertyDescriptor(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc);
+ static bool GetOwnPropertyDescriptor(LookupIterator* it,
+ PropertyDescriptor* desc);
+
+ // Disallow further properties to be added to the object. This is
+ // ES6's [[PreventExtensions]] when passed DONT_THROW.
+ MUST_USE_RESULT static Maybe<bool> PreventExtensions(
+ Handle<JSReceiver> object, ShouldThrow should_throw);
+
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
@@ -1834,6 +1940,12 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
LookupIterator* it);
+ // Set the object's prototype (only JSReceiver and null are allowed values).
+ MUST_USE_RESULT static Maybe<bool> SetPrototype(Handle<JSReceiver> object,
+ Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw);
+
static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
Handle<Name> name);
@@ -1854,8 +1966,9 @@ class JSReceiver: public HeapObject {
// Computes the enumerable keys for a JSObject. Used for implementing
// "for (n in object) { }".
MUST_USE_RESULT static MaybeHandle<FixedArray> GetKeys(
- Handle<JSReceiver> object,
- KeyCollectionType type);
+ Handle<JSReceiver> object, KeyCollectionType type,
+ KeyFilter filter = SKIP_SYMBOLS,
+ GetKeysConversion getConversion = KEEP_NUMBERS);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
@@ -1954,7 +2067,7 @@ class JSObject: public JSReceiver {
static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object,
uint32_t limit);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithInterceptor(
+ MUST_USE_RESULT static Maybe<bool> SetPropertyWithInterceptor(
LookupIterator* it, Handle<Object> value);
// SetLocalPropertyIgnoreAttributes converts callbacks to fields. We need to
@@ -1965,6 +2078,11 @@ class JSObject: public JSReceiver {
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+ MUST_USE_RESULT static Maybe<bool> DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ ShouldThrow should_throw,
+ ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+
MUST_USE_RESULT static MaybeHandle<Object> SetOwnPropertyIgnoreAttributes(
Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes,
@@ -1991,6 +2109,9 @@ class JSObject: public JSReceiver {
static void AddProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value, PropertyAttributes attributes);
+ MUST_USE_RESULT static Maybe<bool> AddDataElement(
+ Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, ShouldThrow should_throw);
MUST_USE_RESULT static MaybeHandle<Object> AddDataElement(
Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
PropertyAttributes attributes);
@@ -2057,6 +2178,10 @@ class JSObject: public JSReceiver {
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes);
+ static MaybeHandle<Object> DefineAccessor(LookupIterator* it,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes);
// Defines an AccessorInfo property on the given object.
MUST_USE_RESULT static MaybeHandle<Object> SetAccessor(
@@ -2157,8 +2282,10 @@ class JSObject: public JSReceiver {
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
+ static inline int GetHeaderSize(InstanceType instance_type);
inline int GetHeaderSize();
+ static inline int GetInternalFieldCount(Map* map);
inline int GetInternalFieldCount();
inline int GetInternalFieldOffset(int index);
inline Object* GetInternalField(int index);
@@ -2172,6 +2299,8 @@ class JSObject: public JSReceiver {
// index. Returns the number of properties added.
int GetOwnPropertyNames(FixedArray* storage, int index,
PropertyAttributes filter = NONE);
+ int CollectOwnPropertyNames(KeyAccumulator* keys,
+ PropertyAttributes filter = NONE);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
@@ -2181,6 +2310,9 @@ class JSObject: public JSReceiver {
// Returns the number of elements on this object filtering out elements
// with the specified attributes (ignoring interceptors).
int GetOwnElementKeys(FixedArray* storage, PropertyAttributes filter);
+ static void CollectOwnElementKeys(Handle<JSObject> object,
+ KeyAccumulator* keys,
+ PropertyAttributes filter);
// Count and fill in the enumerable elements into storage.
// (storage->length() == NumberOfEnumElements()).
// If storage is NULL, will count the elements without adding
@@ -2247,8 +2379,10 @@ class JSObject: public JSReceiver {
= UPDATE_WRITE_BARRIER);
// Set the object's prototype (only JSReceiver and null are allowed values).
- MUST_USE_RESULT static MaybeHandle<Object> SetPrototype(
- Handle<JSObject> object, Handle<Object> value, bool from_javascript);
+ MUST_USE_RESULT static Maybe<bool> SetPrototype(Handle<JSObject> object,
+ Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw);
// Initializes the body after properties slot, properties slot is
// initialized by set_properties. Fill the pre-allocated fields with
@@ -2262,11 +2396,10 @@ class JSObject: public JSReceiver {
// Check whether this object references another object
bool ReferencesObject(Object* obj);
- // Disalow further properties to be added to the oject.
- MUST_USE_RESULT static MaybeHandle<Object> PreventExtensions(
- Handle<JSObject> object);
+ MUST_USE_RESULT static Maybe<bool> PreventExtensions(
+ Handle<JSObject> object, ShouldThrow should_throw);
- bool IsExtensible();
+ static bool IsExtensible(Handle<JSObject> object);
// ES5 Object.seal
MUST_USE_RESULT static MaybeHandle<Object> Seal(Handle<JSObject> object);
@@ -2359,10 +2492,6 @@ class JSObject: public JSReceiver {
// don't want to be wasteful with long lived objects.
static const int kMaxUncheckedOldFastElementsLength = 500;
- // Note that Page::kMaxRegularHeapObjectSize puts a limit on
- // permissible values (see the DCHECK in heap.cc).
- static const int kInitialMaxFastElementArray = 100000;
-
// This constant applies only to the initial map of "global.Object" and
// not to arbitrary other JSObject maps.
static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
@@ -2380,10 +2509,7 @@ class JSObject: public JSReceiver {
STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
- class BodyDescriptor : public FlexibleBodyDescriptor<kPropertiesOffset> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object);
- };
+ typedef FlexibleBodyDescriptor<kPropertiesOffset> BodyDescriptor;
Context* GetCreationContext();
@@ -2415,8 +2541,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithFailedAccessCheck(
LookupIterator* it);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value);
+ MUST_USE_RESULT static Maybe<bool> SetPropertyWithFailedAccessCheck(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
// Add a property to a slow-case object.
static void AddSlowProperty(Handle<JSObject> object,
@@ -2456,8 +2582,12 @@ class JSObject: public JSReceiver {
// Helper for fast versions of preventExtensions, seal, and freeze.
// attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
template <PropertyAttributes attrs>
- MUST_USE_RESULT static MaybeHandle<Object> PreventExtensionsWithTransition(
- Handle<JSObject> object);
+ MUST_USE_RESULT static Maybe<bool> PreventExtensionsWithTransition(
+ Handle<JSObject> object, ShouldThrow should_throw);
+
+ MUST_USE_RESULT static Maybe<bool> SetPrototypeUnobserved(
+ Handle<JSObject> object, Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2519,8 +2649,6 @@ class FixedArray: public FixedArrayBase {
// Shrink length and insert filler objects.
void Shrink(int length);
- enum KeyFilter { ALL_KEYS, NON_SYMBOL_KEYS };
-
// Copy a sub array from the receiver to dest.
void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
@@ -2560,7 +2688,7 @@ class FixedArray: public FixedArrayBase {
// object, the prefix of this array is sorted.
void SortPairs(FixedArray* numbers, uint32_t len);
- class BodyDescriptor : public FlexibleBodyDescriptor<kHeaderSize> {
+ class BodyDescriptor : public FlexibleBodyDescriptorBase<kHeaderSize> {
public:
static inline int SizeOf(Map* map, HeapObject* object);
};
@@ -2768,9 +2896,9 @@ class DescriptorArray: public FixedArray {
// Initialize or change the enum cache,
// using the supplied storage for the small "bridge".
- void SetEnumCache(FixedArray* bridge_storage,
- FixedArray* new_cache,
- Object* new_index_cache);
+ static void SetEnumCache(Handle<DescriptorArray> descriptors,
+ Isolate* isolate, Handle<FixedArray> new_cache,
+ Handle<FixedArray> new_index_cache);
bool CanHoldValue(int descriptor, Object* value);
@@ -3319,6 +3447,8 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
// Returns the number of properties added.
int CopyKeysTo(FixedArray* storage, int index, PropertyAttributes filter,
SortMode sort_mode);
+ // Collect the unsorted keys into the given KeyAccumulator.
+ int CollectKeysTo(KeyAccumulator* keys, PropertyAttributes filter);
// Copies enumerable keys to preallocated fixed array.
void CopyEnumKeysTo(FixedArray* storage);
@@ -4336,7 +4466,6 @@ class FreeSpace: public HeapObject {
// Accessors for the next field.
inline FreeSpace* next();
- inline FreeSpace** next_address();
inline void set_next(FreeSpace* next);
inline static FreeSpace* cast(HeapObject* obj);
@@ -4605,6 +4734,48 @@ class LiteralsArray : public FixedArray {
};
+// A bindings array contains the bindings for a bound function. It also holds
+// the type feedback vector.
+class BindingsArray : public FixedArray {
+ public:
+ inline TypeFeedbackVector* feedback_vector() const;
+ inline void set_feedback_vector(TypeFeedbackVector* vector);
+
+ inline JSReceiver* bound_function() const;
+ inline void set_bound_function(JSReceiver* function);
+ inline Object* bound_this() const;
+ inline void set_bound_this(Object* bound_this);
+
+ inline Object* binding(int binding_index) const;
+ inline void set_binding(int binding_index, Object* binding);
+ inline int bindings_count() const;
+
+ static Handle<BindingsArray> New(Isolate* isolate,
+ Handle<TypeFeedbackVector> vector,
+ Handle<JSReceiver> bound_function,
+ Handle<Object> bound_this,
+ int number_of_bindings);
+
+ static Handle<JSArray> CreateBoundArguments(Handle<BindingsArray> bindings);
+ static Handle<JSArray> CreateRuntimeBindings(Handle<BindingsArray> bindings);
+
+ DECLARE_CAST(BindingsArray)
+
+ private:
+ static const int kVectorIndex = 0;
+ static const int kBoundFunctionIndex = 1;
+ static const int kBoundThisIndex = 2;
+ static const int kFirstBindingIndex = 3;
+
+ inline Object* get(int index) const;
+ inline void set(int index, Object* value);
+ inline void set(int index, Smi* value);
+ inline void set(int index, Object* value, WriteBarrierMode mode);
+
+ inline int length() const;
+};
+
+
// HandlerTable is a fixed array containing entries for exception handlers in
// the code object it is associated with. The tables comes in two flavors:
// 1) Based on ranges: Used for unoptimized code. Contains one entry per
@@ -4705,9 +4876,9 @@ class Code: public HeapObject {
NUMBER_OF_KINDS
};
- // No more than 16 kinds. The value is currently encoded in four bits in
+ // No more than 32 kinds. The value is currently encoded in five bits in
// Flags.
- STATIC_ASSERT(NUMBER_OF_KINDS <= 16);
+ STATIC_ASSERT(NUMBER_OF_KINDS <= 32);
static const char* Kind2String(Kind kind);
@@ -4809,6 +4980,7 @@ class Code: public HeapObject {
inline bool is_to_boolean_ic_stub();
inline bool is_keyed_stub();
inline bool is_optimized_code();
+ inline bool is_interpreter_entry_trampoline();
inline bool embeds_maps_weakly();
inline bool IsCodeStubOrIC();
@@ -5133,10 +5305,10 @@ class Code: public HeapObject {
class ProfilerTicksField : public BitField<int, 4, 28> {};
// Flags layout. BitField<type, shift, size>.
- class ICStateField : public BitField<InlineCacheState, 0, 4> {};
- class TypeField : public BitField<StubType, 4, 1> {};
- class CacheHolderField : public BitField<CacheHolderFlag, 5, 2> {};
- class KindField : public BitField<Kind, 7, 4> {};
+ class ICStateField : public BitField<InlineCacheState, 0, 3> {};
+ class TypeField : public BitField<StubType, 3, 1> {};
+ class CacheHolderField : public BitField<CacheHolderFlag, 4, 2> {};
+ class KindField : public BitField<Kind, 6, 5> {};
class ExtraICStateField: public BitField<ExtraICState, 11,
PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT
@@ -5367,6 +5539,8 @@ class Map: public HeapObject {
static const int kNoConstructorFunctionIndex = 0;
inline int GetConstructorFunctionIndex();
inline void SetConstructorFunctionIndex(int value);
+ static MaybeHandle<JSFunction> GetConstructorFunction(
+ Handle<Map> map, Handle<Context> native_context);
// Instance type.
inline InstanceType instance_type();
@@ -5414,6 +5588,10 @@ class Map: public HeapObject {
static const int kRetainingCounterStart = kSlackTrackingCounterEnd - 1;
static const int kRetainingCounterEnd = 0;
+ // Completes inobject slack tracking for the transition tree starting at this
+ // initial map.
+ void CompleteInobjectSlackTracking();
+
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
// property is set to a value that is not a JSObject, the prototype
@@ -5653,6 +5831,10 @@ class Map: public HeapObject {
// gathering type feedback. Use TryUpdate in those cases instead.
static Handle<Map> Update(Handle<Map> map);
+ static inline Handle<Map> CopyInitialMap(Handle<Map> map);
+ static Handle<Map> CopyInitialMap(Handle<Map> map, int instance_size,
+ int in_object_properties,
+ int unused_property_fields);
static Handle<Map> CopyDropDescriptors(Handle<Map> map);
static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
Descriptor* descriptor,
@@ -5779,6 +5961,7 @@ class Map: public HeapObject {
inline bool CanTransition();
+ inline bool IsBooleanMap();
inline bool IsPrimitiveMap();
inline bool IsJSObjectMap();
inline bool IsJSArrayMap();
@@ -5787,7 +5970,7 @@ class Map: public HeapObject {
inline bool IsJSProxyMap();
inline bool IsJSGlobalProxyMap();
inline bool IsJSGlobalObjectMap();
- inline bool IsGlobalObjectMap();
+ inline bool IsJSTypedArrayMap();
inline bool CanOmitMapChecks();
@@ -6385,9 +6568,8 @@ class SharedFunctionInfo: public HeapObject {
Handle<Object> script_object);
// Layout description of the optimized code map.
- static const int kNextMapIndex = 0;
- static const int kSharedCodeIndex = 1;
- static const int kEntriesStart = 2;
+ static const int kSharedCodeIndex = 0;
+ static const int kEntriesStart = 1;
static const int kContextOffset = 0;
static const int kCachedCodeOffset = 1;
static const int kLiteralsOffset = 2;
@@ -6683,12 +6865,9 @@ class SharedFunctionInfo: public HeapObject {
// Source size of this function.
int SourceSize();
- // Calculate the instance size.
- int CalculateInstanceSize();
-
- // Calculate the number of in-object properties.
- int CalculateInObjectProperties();
-
+ // Returns `false` if formal parameters include rest parameters, optional
+ // parameters, or destructuring parameters.
+ // TODO(caitp): make this a flag set during parsing
inline bool has_simple_parameters();
// Initialize a SharedFunctionInfo from a parsed function literal.
@@ -6701,9 +6880,7 @@ class SharedFunctionInfo: public HeapObject {
void ResetForNewContext(int new_ic_age);
- // Iterate over all shared function infos that are created from a script.
- // That excludes shared function infos created for API functions and C++
- // builtins.
+ // Iterate over all shared function infos.
class Iterator {
public:
explicit Iterator(Isolate* isolate);
@@ -6776,15 +6953,14 @@ class SharedFunctionInfo: public HeapObject {
// Total size.
static const int kSize = kProfilerTicksOffset + kPointerSize;
#else
- // The only reason to use smi fields instead of int fields
- // is to allow iteration without maps decoding during
- // garbage collections.
- // To avoid wasting space on 64-bit architectures we use
- // the following trick: we group integer fields into pairs
-// The least significant integer in each pair is shifted left by 1.
-// By doing this we guarantee that LSB of each kPointerSize aligned
-// word is not set and thus this word cannot be treated as pointer
-// to HeapObject during old space traversal.
+// The only reason to use smi fields instead of int fields is to allow
+// iteration without maps decoding during garbage collections.
+// To avoid wasting space on 64-bit architectures we use the following trick:
+// we group integer fields into pairs
+// The least significant integer in each pair is shifted left by 1. By doing
+// this we guarantee that LSB of each kPointerSize aligned word is not set and
+// thus this word cannot be treated as pointer to HeapObject during old space
+// traversal.
#if V8_TARGET_LITTLE_ENDIAN
static const int kLengthOffset = kLastPointerFieldOffset + kPointerSize;
static const int kFormalParameterCountOffset =
@@ -6866,6 +7042,7 @@ class SharedFunctionInfo: public HeapObject {
// Bit positions in compiler_hints.
enum CompilerHints {
+ // byte 0
kAllowLazyCompilation,
kAllowLazyCompilationWithoutContext,
kOptimizationDisabled,
@@ -6874,6 +7051,7 @@ class SharedFunctionInfo: public HeapObject {
kStrongModeFunction,
kUsesArguments,
kNeedsHomeObject,
+ // byte 1
kHasDuplicateParameters,
kForceInline,
kBoundFunction,
@@ -6882,21 +7060,39 @@ class SharedFunctionInfo: public HeapObject {
kIsFunction,
kDontCrankshaft,
kDontFlush,
- kIsArrow,
+ // byte 2
+ kFunctionKind,
+ kIsArrow = kFunctionKind,
kIsGenerator,
kIsConciseMethod,
kIsAccessorFunction,
kIsDefaultConstructor,
kIsSubclassConstructor,
kIsBaseConstructor,
- kInClassLiteral,
+ kIsInObjectLiteral,
+ // byte 3
kIsAsmFunction,
kDeserialized,
kNeverCompiled,
- kCompilerHintsCount // Pseudo entry
+ kCompilerHintsCount, // Pseudo entry
};
// Add hints for other modes when they're added.
STATIC_ASSERT(LANGUAGE_END == 3);
+ // kFunctionKind has to be byte-aligned
+ STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0);
+// Make sure that FunctionKind and byte 2 are in sync:
+#define ASSERT_FUNCTION_KIND_ORDER(functionKind, compilerFunctionKind) \
+ STATIC_ASSERT(FunctionKind::functionKind == \
+ 1 << (compilerFunctionKind - kFunctionKind))
+ ASSERT_FUNCTION_KIND_ORDER(kArrowFunction, kIsArrow);
+ ASSERT_FUNCTION_KIND_ORDER(kGeneratorFunction, kIsGenerator);
+ ASSERT_FUNCTION_KIND_ORDER(kConciseMethod, kIsConciseMethod);
+ ASSERT_FUNCTION_KIND_ORDER(kAccessorFunction, kIsAccessorFunction);
+ ASSERT_FUNCTION_KIND_ORDER(kDefaultConstructor, kIsDefaultConstructor);
+ ASSERT_FUNCTION_KIND_ORDER(kSubclassConstructor, kIsSubclassConstructor);
+ ASSERT_FUNCTION_KIND_ORDER(kBaseConstructor, kIsBaseConstructor);
+ ASSERT_FUNCTION_KIND_ORDER(kInObjectLiteral, kIsInObjectLiteral);
+#undef ASSERT_FUNCTION_KIND_ORDER
class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 8> {};
@@ -6923,46 +7119,47 @@ class SharedFunctionInfo: public HeapObject {
public:
// Constants for optimizing codegen for strict mode function and
+ // native tests when using integer-width instructions.
+ static const int kStrictModeBit =
+ kStrictModeFunction + kCompilerHintsSmiTagSize;
+ static const int kStrongModeBit =
+ kStrongModeFunction + kCompilerHintsSmiTagSize;
+ static const int kNativeBit = kNative + kCompilerHintsSmiTagSize;
+ static const int kBoundBit = kBoundFunction + kCompilerHintsSmiTagSize;
+
+ static const int kClassConstructorBits =
+ FunctionKind::kClassConstructor
+ << (kFunctionKind + kCompilerHintsSmiTagSize);
+
+ // Constants for optimizing codegen for strict mode function and
// native tests.
// Allows to use byte-width instructions.
- static const int kStrictModeBitWithinByte =
- (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
- static const int kStrongModeBitWithinByte =
- (kStrongModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
- static const int kNativeBitWithinByte =
- (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
+ static const int kStrictModeBitWithinByte = kStrictModeBit % kBitsPerByte;
+ static const int kStrongModeBitWithinByte = kStrongModeBit % kBitsPerByte;
+ static const int kNativeBitWithinByte = kNativeBit % kBitsPerByte;
+ static const int kBoundBitWithinByte = kBoundBit % kBitsPerByte;
- static const int kBoundBitWithinByte =
- (kBoundFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+ static const int kClassConstructorBitsWithinByte =
+ FunctionKind::kClassConstructor << kCompilerHintsSmiTagSize;
+ STATIC_ASSERT(kClassConstructorBitsWithinByte < (1 << kBitsPerByte));
#if defined(V8_TARGET_LITTLE_ENDIAN)
- static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kStrongModeByteOffset =
- kCompilerHintsOffset +
- (kStrongModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kNativeByteOffset = kCompilerHintsOffset +
- (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kBoundByteOffset =
- kCompilerHintsOffset +
- (kBoundFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+#define BYTE_OFFSET(compiler_hint) \
+ kCompilerHintsOffset + \
+ (compiler_hint + kCompilerHintsSmiTagSize) / kBitsPerByte
#elif defined(V8_TARGET_BIG_ENDIAN)
- static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kStrongModeByteOffset =
- kCompilerHintsOffset + (kCompilerHintsSize - 1) -
- ((kStrongModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kNativeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kBoundByteOffset =
- kCompilerHintsOffset + (kCompilerHintsSize - 1) -
- ((kBoundFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+#define BYTE_OFFSET(compiler_hint) \
+ kCompilerHintsOffset + (kCompilerHintsSize - 1) - \
+ ((compiler_hint + kCompilerHintsSmiTagSize) / kBitsPerByte)
#else
#error Unknown byte ordering
#endif
+ static const int kStrictModeByteOffset = BYTE_OFFSET(kStrictModeFunction);
+ static const int kStrongModeByteOffset = BYTE_OFFSET(kStrongModeFunction);
+ static const int kNativeByteOffset = BYTE_OFFSET(kNative);
+ static const int kBoundByteOffset = BYTE_OFFSET(kBoundFunction);
+ static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
+#undef BYTE_OFFSET
private:
// Returns entry from optimized code map for specified context and OSR entry.
@@ -7087,18 +7284,9 @@ class JSFunction: public JSObject {
inline void set_code_no_write_barrier(Code* code);
inline void ReplaceCode(Code* code);
- // Tells whether this function is builtin.
- inline bool IsBuiltin();
-
// Tells whether this function inlines the given shared function info.
bool Inlines(SharedFunctionInfo* candidate);
- // Tells whether this function should be subject to debugging.
- inline bool IsSubjectToDebugging();
-
- // Tells whether or not the function needs arguments adaption.
- inline bool NeedsArgumentsAdaption();
-
// Tells whether or not this function has been optimized.
inline bool IsOptimized();
@@ -7175,8 +7363,8 @@ class JSFunction: public JSObject {
inline LiteralsArray* literals();
inline void set_literals(LiteralsArray* literals);
- inline FixedArray* function_bindings();
- inline void set_function_bindings(FixedArray* bindings);
+ inline BindingsArray* function_bindings();
+ inline void set_function_bindings(BindingsArray* bindings);
// The initial map for an object created by this constructor.
inline Map* initial_map();
@@ -7184,6 +7372,11 @@ class JSFunction: public JSObject {
Handle<Object> prototype);
inline bool has_initial_map();
static void EnsureHasInitialMap(Handle<JSFunction> function);
+ // Ensures that the |original_constructor| has correct initial map and
+ // returns it. If the |original_constructor| is not a subclass constructor
+ // its initial map is left unmodified.
+ static Handle<Map> EnsureDerivedHasInitialMap(
+ Handle<JSFunction> original_constructor, Handle<JSFunction> constructor);
// Get and set the prototype property on a JSFunction. If the
// function has an initial map the prototype is set on the initial
@@ -7202,25 +7395,9 @@ class JSFunction: public JSObject {
// [[Construct]] from this function will not be allowed.
bool RemovePrototype();
- // Accessor for this function's initial map's [[class]]
- // property. This is primarily used by ECMA native functions. This
- // method sets the class_name field of this function's initial map
- // to a given value. It creates an initial map if this function does
- // not have one. Note that this method does not copy the initial map
- // if it has one already, but simply replaces it with the new value.
- // Instances created afterwards will have a map whose [[class]] is
- // set to 'value', but there is no guarantees on instances created
- // before.
- void SetInstanceClassName(String* name);
-
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
- // Returns `false` if formal parameters include rest parameters, optional
- // parameters, or destructuring parameters.
- // TODO(caitp): make this a flag set during parsing
- inline bool has_simple_parameters();
-
// [next_function_link]: Links functions into various lists, e.g. the list
// of optimized functions hanging off the native_context. The CodeFlusher
// uses this link to chain together flushing candidates. Treated weakly
@@ -7232,9 +7409,35 @@ class JSFunction: public JSObject {
DECLARE_CAST(JSFunction)
- // Iterates the objects, including code objects indirectly referenced
- // through pointers to the first instruction in the code object.
- void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
+ // Calculate the instance size and in-object properties count.
+ void CalculateInstanceSize(InstanceType instance_type,
+ int requested_internal_fields, int* instance_size,
+ int* in_object_properties);
+ void CalculateInstanceSizeForDerivedClass(InstanceType instance_type,
+ int requested_internal_fields,
+ int* instance_size,
+ int* in_object_properties);
+
+ // Visiting policy flags define whether the code entry or next function
+ // should be visited or not.
+ enum BodyVisitingPolicy {
+ kVisitCodeEntry = 1 << 0,
+ kVisitNextFunction = 1 << 1,
+
+ kSkipCodeEntryAndNextFunction = 0,
+ kVisitCodeEntryAndNextFunction = kVisitCodeEntry | kVisitNextFunction
+ };
+ // Iterates the function object according to the visiting policy.
+ template <BodyVisitingPolicy>
+ class BodyDescriptorImpl;
+
+ // Visit the whole object.
+ typedef BodyDescriptorImpl<kVisitCodeEntryAndNextFunction> BodyDescriptor;
+
+ // Don't visit next function.
+ typedef BodyDescriptorImpl<kVisitCodeEntry> BodyDescriptorStrongCode;
+ typedef BodyDescriptorImpl<kSkipCodeEntryAndNextFunction>
+ BodyDescriptorWeakCode;
// Dispatched behavior.
DECLARE_PRINTER(JSFunction)
@@ -7252,22 +7455,16 @@ class JSFunction: public JSObject {
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
- static const int kCodeEntryOffset = JSObject::kHeaderSize;
- static const int kPrototypeOrInitialMapOffset =
- kCodeEntryOffset + kPointerSize;
+ static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize;
static const int kSharedFunctionInfoOffset =
kPrototypeOrInitialMapOffset + kPointerSize;
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
static const int kLiteralsOffset = kContextOffset + kPointerSize;
static const int kNonWeakFieldsEndOffset = kLiteralsOffset + kPointerSize;
- static const int kNextFunctionLinkOffset = kNonWeakFieldsEndOffset;
+ static const int kCodeEntryOffset = kNonWeakFieldsEndOffset;
+ static const int kNextFunctionLinkOffset = kCodeEntryOffset + kPointerSize;
static const int kSize = kNextFunctionLinkOffset + kPointerSize;
- // Layout of the bound-function binding array.
- static const int kBoundFunctionIndex = 0;
- static const int kBoundThisIndex = 1;
- static const int kBoundArgumentsStartIndex = 2;
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
};
@@ -7292,7 +7489,7 @@ class JSGlobalProxy : public JSObject {
DECLARE_CAST(JSGlobalProxy)
- inline bool IsDetachedFrom(GlobalObject* global) const;
+ inline bool IsDetachedFrom(JSGlobalObject* global) const;
// Dispatched behavior.
DECLARE_PRINTER(JSGlobalProxy)
@@ -7308,41 +7505,22 @@ class JSGlobalProxy : public JSObject {
};
-// Common super class for JavaScript global objects and the special
-// builtins global objects.
-class GlobalObject: public JSObject {
+// JavaScript global object.
+class JSGlobalObject : public JSObject {
public:
- // [builtins]: the object holding the runtime routines written in JS.
- DECL_ACCESSORS(builtins, JSBuiltinsObject)
-
// [native context]: the natives corresponding to this global object.
DECL_ACCESSORS(native_context, Context)
// [global proxy]: the global proxy object of the context
DECL_ACCESSORS(global_proxy, JSObject)
- DECLARE_CAST(GlobalObject)
- static void InvalidatePropertyCell(Handle<GlobalObject> object,
+ static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
Handle<Name> name);
// Ensure that the global object has a cell for the given property name.
- static Handle<PropertyCell> EnsurePropertyCell(Handle<GlobalObject> global,
+ static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name);
- // Layout description.
- static const int kBuiltinsOffset = JSObject::kHeaderSize;
- static const int kNativeContextOffset = kBuiltinsOffset + kPointerSize;
- static const int kGlobalProxyOffset = kNativeContextOffset + kPointerSize;
- static const int kHeaderSize = kGlobalProxyOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
-};
-
-
-// JavaScript global object.
-class JSGlobalObject: public GlobalObject {
- public:
DECLARE_CAST(JSGlobalObject)
inline bool IsDetached();
@@ -7352,31 +7530,16 @@ class JSGlobalObject: public GlobalObject {
DECLARE_VERIFIER(JSGlobalObject)
// Layout description.
- static const int kSize = GlobalObject::kHeaderSize;
+ static const int kNativeContextOffset = JSObject::kHeaderSize;
+ static const int kGlobalProxyOffset = kNativeContextOffset + kPointerSize;
+ static const int kHeaderSize = kGlobalProxyOffset + kPointerSize;
+ static const int kSize = kHeaderSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalObject);
};
-// Builtins global object which holds the runtime routines written in
-// JavaScript.
-class JSBuiltinsObject: public GlobalObject {
- public:
- DECLARE_CAST(JSBuiltinsObject)
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSBuiltinsObject)
- DECLARE_VERIFIER(JSBuiltinsObject)
-
- // Layout description.
- static const int kSize = GlobalObject::kHeaderSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
-};
-
-
// Representation for JS Wrapper objects, String, Number, Boolean, etc.
class JSValue: public JSObject {
public:
@@ -7591,6 +7754,8 @@ class JSRegExp: public JSObject {
};
DECL_ACCESSORS(data, Object)
+ DECL_ACCESSORS(flags, Object)
+ DECL_ACCESSORS(source, Object)
inline Type TypeTag();
inline int CaptureCount();
@@ -7619,10 +7784,13 @@ class JSRegExp: public JSObject {
DECLARE_CAST(JSRegExp)
// Dispatched behavior.
+ DECLARE_PRINTER(JSRegExp)
DECLARE_VERIFIER(JSRegExp)
static const int kDataOffset = JSObject::kHeaderSize;
- static const int kSize = kDataOffset + kPointerSize;
+ static const int kSourceOffset = kDataOffset + kPointerSize;
+ static const int kFlagsOffset = kSourceOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
// Indices in the data array.
static const int kTagIndex = 0;
@@ -7671,12 +7839,8 @@ class JSRegExp: public JSObject {
FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
// In-object fields.
- static const int kSourceFieldIndex = 0;
- static const int kGlobalFieldIndex = 1;
- static const int kIgnoreCaseFieldIndex = 2;
- static const int kMultilineFieldIndex = 3;
- static const int kLastIndexFieldIndex = 4;
- static const int kInObjectFieldCount = 5;
+ static const int kLastIndexFieldIndex = 0;
+ static const int kInObjectFieldCount = 1;
// The uninitialized value for a regexp code object.
static const int kUninitializedValue = -1;
@@ -8374,6 +8538,11 @@ class Symbol: public Name {
// be used to designate own properties of objects.
DECL_BOOLEAN_ACCESSORS(is_private)
+ // [is_well_known_symbol]: Whether this is a spec-defined well-known symbol,
+ // or not. Well-known symbols do not throw when an access check fails during
+ // a load.
+ DECL_BOOLEAN_ACCESSORS(is_well_known_symbol)
+
DECLARE_CAST(Symbol)
// Dispatched behavior.
@@ -8391,6 +8560,7 @@ class Symbol: public Name {
private:
static const int kPrivateBit = 0;
+ static const int kWellKnownSymbolBit = 1;
const char* PrivateSymbolToName() const;
@@ -9410,20 +9580,21 @@ class JSProxy: public JSReceiver {
// If the handler defines an accessor property with a setter, invoke it.
// If it defines an accessor property without a setter, or a data property
- // that is read-only, throw. In all these cases set '*done' to true,
- // otherwise set it to false.
+ // that is read-only, fail. In all these cases set '*done' to true.
+ // Otherwise set it to false, in which case the return value is not
+ // meaningful.
MUST_USE_RESULT
- static MaybeHandle<Object> SetPropertyViaPrototypesWithHandler(
+ static Maybe<bool> SetPropertyViaPrototypesWithHandler(
Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode, bool* done);
+ Handle<Object> value, ShouldThrow should_throw, bool* done);
MUST_USE_RESULT static Maybe<PropertyAttributes>
GetPropertyAttributesWithHandler(Handle<JSProxy> proxy,
Handle<Object> receiver,
Handle<Name> name);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithHandler(
+ MUST_USE_RESULT static Maybe<bool> SetPropertyWithHandler(
Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode);
+ Handle<Object> value, ShouldThrow should_throw);
// Turn the proxy into an (empty) JSObject.
static void Fix(Handle<JSProxy> proxy);
@@ -9957,7 +10128,6 @@ class JSArray: public JSObject {
static bool HasReadOnlyLength(Handle<JSArray> array);
static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
- static MaybeHandle<Object> ReadOnlyLengthError(Handle<JSArray> array);
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
@@ -9981,6 +10151,17 @@ class JSArray: public JSObject {
static inline void SetContent(Handle<JSArray> array,
Handle<FixedArrayBase> storage);
+ static bool DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
+ Handle<Object> name, PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+
+ static bool AnythingToArrayLength(Isolate* isolate,
+ Handle<Object> length_object,
+ uint32_t* output);
+ static bool ArraySetLength(Isolate* isolate, Handle<JSArray> a,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+
DECLARE_CAST(JSArray)
// Dispatched behavior.
@@ -9994,6 +10175,14 @@ class JSArray: public JSObject {
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
+ // 600 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
+ // we do not want to include in objects.h
+ // Note that Page::kMaxRegularHeapObjectSize has to be in sync with
+ // kInitialMaxFastElementArray which is checked in a DCHECK in heap.cc.
+ static const int kInitialMaxFastElementArray =
+ (600 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
+ kPointerSize;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
};
@@ -10162,6 +10351,7 @@ class AccessCheckInfo: public Struct {
public:
DECL_ACCESSORS(named_callback, Object)
DECL_ACCESSORS(indexed_callback, Object)
+ DECL_ACCESSORS(callback, Object)
DECL_ACCESSORS(data, Object)
DECLARE_CAST(AccessCheckInfo)
@@ -10172,7 +10362,8 @@ class AccessCheckInfo: public Struct {
static const int kNamedCallbackOffset = HeapObject::kHeaderSize;
static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
- static const int kDataOffset = kIndexedCallbackOffset + kPointerSize;
+ static const int kCallbackOffset = kIndexedCallbackOffset + kPointerSize;
+ static const int kDataOffset = kCallbackOffset + kPointerSize;
static const int kSize = kDataOffset + kPointerSize;
private:
@@ -10254,7 +10445,9 @@ class TemplateInfo: public Struct {
static const int kPropertyListOffset = kNumberOfProperties + kPointerSize;
static const int kPropertyAccessorsOffset =
kPropertyListOffset + kPointerSize;
- static const int kHeaderSize = kPropertyAccessorsOffset + kPointerSize;
+ static const int kPropertyIntrinsicsOffset =
+ kPropertyAccessorsOffset + kPointerSize;
+ static const int kHeaderSize = kPropertyIntrinsicsOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
@@ -10577,11 +10770,7 @@ class ObjectVisitor BASE_EMBEDDED {
};
-class StructBodyDescriptor : public
- FlexibleBodyDescriptor<HeapObject::kHeaderSize> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object);
-};
+typedef FlexibleBodyDescriptor<HeapObject::kHeaderSize> StructBodyDescriptor;
// BooleanBit is a helper class for setting and getting a bit in an integer.
@@ -10602,28 +10791,7 @@ class BooleanBit : public AllStatic {
};
-class KeyAccumulator final BASE_EMBEDDED {
- public:
- explicit KeyAccumulator(Isolate* isolate) : isolate_(isolate), length_(0) {}
-
- void AddKey(Handle<Object> key, int check_limit);
- void AddKeys(Handle<FixedArray> array, FixedArray::KeyFilter filter);
- void AddKeys(Handle<JSObject> array, FixedArray::KeyFilter filter);
- void PrepareForComparisons(int count);
- Handle<FixedArray> GetKeys();
-
- int GetLength() { return length_; }
-
- private:
- void EnsureCapacity(int capacity);
- void Grow();
-
- Isolate* isolate_;
- Handle<FixedArray> keys_;
- Handle<OrderedHashSet> set_;
- int length_;
- DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
-};
-} } // namespace v8::internal
+} // NOLINT, false-positive due to second-order macros.
+} // NOLINT, false-positive due to second-order macros.
#endif // V8_OBJECTS_H_
diff --git a/deps/v8/src/optimizing-compile-dispatcher.cc b/deps/v8/src/optimizing-compile-dispatcher.cc
index 8e3e96ad00..7062db640d 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/optimizing-compile-dispatcher.cc
@@ -6,7 +6,6 @@
#include "src/base/atomicops.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/hydrogen.h"
#include "src/isolate.h"
#include "src/v8.h"
diff --git a/deps/v8/src/optimizing-compile-dispatcher.h b/deps/v8/src/optimizing-compile-dispatcher.h
index ad09dfa734..9c4e4cb8df 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/optimizing-compile-dispatcher.h
@@ -131,7 +131,7 @@ class OptimizingCompileDispatcher {
// is not safe to access them directly.
int recompilation_delay_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_OPTIMIZING_COMPILE_DISPATCHER_H_
diff --git a/deps/v8/src/parameter-initializer-rewriter.cc b/deps/v8/src/parameter-initializer-rewriter.cc
new file mode 100644
index 0000000000..28f741c1f6
--- /dev/null
+++ b/deps/v8/src/parameter-initializer-rewriter.cc
@@ -0,0 +1,82 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parameter-initializer-rewriter.h"
+
+#include "src/ast.h"
+#include "src/ast-expression-visitor.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+
+class Rewriter final : public AstExpressionVisitor {
+ public:
+ Rewriter(uintptr_t stack_limit, Expression* initializer, Scope* old_scope,
+ Scope* new_scope)
+ : AstExpressionVisitor(stack_limit, initializer),
+ old_scope_(old_scope),
+ new_scope_(new_scope) {}
+
+ private:
+ void VisitExpression(Expression* expr) override {}
+
+ void VisitFunctionLiteral(FunctionLiteral* expr) override;
+ void VisitClassLiteral(ClassLiteral* expr) override;
+ void VisitVariableProxy(VariableProxy* expr) override;
+
+ Scope* old_scope_;
+ Scope* new_scope_;
+};
+
+
+void Rewriter::VisitFunctionLiteral(FunctionLiteral* function_literal) {
+ function_literal->scope()->ReplaceOuterScope(new_scope_);
+}
+
+
+void Rewriter::VisitClassLiteral(ClassLiteral* class_literal) {
+ class_literal->scope()->ReplaceOuterScope(new_scope_);
+ if (class_literal->extends() != nullptr) {
+ Visit(class_literal->extends());
+ }
+ // No need to visit the constructor since it will have the class
+ // scope on its scope chain.
+ ZoneList<ObjectLiteralProperty*>* props = class_literal->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ if (!prop->key()->IsLiteral()) {
+ Visit(prop->key());
+ }
+ // No need to visit the values, since all values are functions with
+ // the class scope on their scope chain.
+ DCHECK(prop->value()->IsFunctionLiteral());
+ }
+}
+
+
+void Rewriter::VisitVariableProxy(VariableProxy* proxy) {
+ DCHECK(!proxy->is_resolved());
+ if (old_scope_->RemoveUnresolved(proxy)) {
+ new_scope_->AddUnresolved(proxy);
+ }
+}
+
+
+} // anonymous namespace
+
+
+void RewriteParameterInitializerScope(uintptr_t stack_limit,
+ Expression* initializer, Scope* old_scope,
+ Scope* new_scope) {
+ Rewriter rewriter(stack_limit, initializer, old_scope, new_scope);
+ rewriter.Run();
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/parameter-initializer-rewriter.h b/deps/v8/src/parameter-initializer-rewriter.h
new file mode 100644
index 0000000000..a195cb600f
--- /dev/null
+++ b/deps/v8/src/parameter-initializer-rewriter.h
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARAMETER_EXPRESSION_REWRITER_H_
+#define V8_PARAMETER_EXPRESSION_REWRITER_H_
+
+#include "src/ast.h"
+
+namespace v8 {
+namespace internal {
+
+
+void RewriteParameterInitializerScope(uintptr_t stack_limit,
+ Expression* initializer, Scope* old_scope,
+ Scope* new_scope);
+
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARAMETER_EXPRESSION_REWRITER_H_
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 60a6024608..2704db3d7c 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -14,7 +14,9 @@
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/messages.h"
+#include "src/parameter-initializer-rewriter.h"
#include "src/preparser.h"
+#include "src/rewriter.h"
#include "src/runtime/runtime.h"
#include "src/scanner-character-streams.h"
#include "src/scopeinfo.h"
@@ -344,8 +346,8 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
: FunctionKind::kDefaultBaseConstructor;
Scope* function_scope = NewScope(scope, FUNCTION_SCOPE, kind);
- function_scope->SetLanguageMode(
- static_cast<LanguageMode>(language_mode | STRICT));
+ SetLanguageMode(function_scope,
+ static_cast<LanguageMode>(language_mode | STRICT));
// Set start and end position to the same value
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
@@ -357,7 +359,6 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
kind, &function_factory);
body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
- AddAssertIsConstruct(body, pos);
if (call_super) {
// %_DefaultConstructorCallSuper(new.target, %GetPrototype(<this-fun>))
ZoneList<Expression*>* args =
@@ -913,18 +914,15 @@ Parser::Parser(ParseInfo* info)
DCHECK(!info->script().is_null() || info->source_stream() != NULL);
set_allow_lazy(info->allow_lazy_parsing());
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
- set_allow_harmony_arrow_functions(FLAG_harmony_arrow_functions);
set_allow_harmony_sloppy(FLAG_harmony_sloppy);
set_allow_harmony_sloppy_function(FLAG_harmony_sloppy_function);
set_allow_harmony_sloppy_let(FLAG_harmony_sloppy_let);
set_allow_harmony_rest_parameters(FLAG_harmony_rest_parameters);
set_allow_harmony_default_parameters(FLAG_harmony_default_parameters);
- set_allow_harmony_spread_calls(FLAG_harmony_spread_calls);
set_allow_harmony_destructuring(FLAG_harmony_destructuring);
- set_allow_harmony_spread_arrays(FLAG_harmony_spread_arrays);
- set_allow_harmony_new_target(FLAG_harmony_new_target);
set_allow_strong_mode(FLAG_strong_mode);
set_allow_legacy_const(FLAG_legacy_const);
+ set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -1050,6 +1048,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
FunctionState function_state(&function_state_, &scope_, scope,
kNormalFunction, &function_factory);
+ // Don't count the mode in the use counters--give the program a chance
+ // to enable script/module-wide strict/strong mode below.
scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
@@ -1067,7 +1067,15 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
if (ok && is_strict(language_mode())) {
CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
}
- if (ok && (is_strict(language_mode()) || allow_harmony_sloppy())) {
+ if (ok && is_sloppy(language_mode()) && allow_harmony_sloppy_function()) {
+ // TODO(littledan): Function bindings on the global object that modify
+ // pre-existing bindings should be made writable, enumerable and
+ // nonconfigurable if possible, whereas this code will leave attributes
+ // unchanged if the property already exists.
+ InsertSloppyBlockFunctionVarBindings(scope, &ok);
+ }
+ if (ok && (is_strict(language_mode()) || allow_harmony_sloppy() ||
+ allow_harmony_destructuring())) {
CheckConflictingVarDeclarations(scope_, &ok);
}
@@ -1184,8 +1192,8 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
if (shared_info->is_arrow()) {
Scope* scope =
- NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
- scope->SetLanguageMode(shared_info->language_mode());
+ NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
+ SetLanguageMode(scope, shared_info->language_mode());
scope->set_start_position(shared_info->start_position());
ExpressionClassifier formals_classifier;
ParserFormalParameters formals(scope);
@@ -1211,8 +1219,10 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
if (ok) {
checkpoint.Restore(&formals.materialized_literals_count);
+ // Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
+ // not be observable, or else the preparser would have failed.
Expression* expression =
- ParseArrowFunctionLiteral(formals, formals_classifier, &ok);
+ ParseArrowFunctionLiteral(true, formals, formals_classifier, &ok);
if (ok) {
// Scanning must end at the same position that was recorded
// previously. If not, parsing has been interrupted due to a stack
@@ -1328,13 +1338,11 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
// Strong mode implies strict mode. If there are several "use strict"
// / "use strong" directives, do the strict mode changes only once.
if (is_sloppy(scope_->language_mode())) {
- scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT));
+ RaiseLanguageMode(STRICT);
}
if (use_strong_found) {
- scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRONG));
+ RaiseLanguageMode(STRONG);
if (IsClassConstructor(function_state_->kind())) {
// "use strong" cannot occur in a class constructor body, to avoid
// unintuitive strong class object semantics.
@@ -1370,11 +1378,18 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
// incremented after parsing is done.
++use_counts_[v8::Isolate::kUseAsm];
scope_->SetAsmModule();
+ } else {
+ // Should not change mode, but will increment UseCounter
+ // if appropriate. Ditto usages below.
+ RaiseLanguageMode(SLOPPY);
}
} else {
// End of the directive prologue.
directive_prologue = false;
+ RaiseLanguageMode(SLOPPY);
}
+ } else {
+ RaiseLanguageMode(SLOPPY);
}
body->Add(stat, zone());
@@ -1451,8 +1466,7 @@ void* Parser::ParseModuleItemList(ZoneList<Statement*>* body, bool* ok) {
// ModuleItem*
DCHECK(scope_->is_module_scope());
- scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT));
+ RaiseLanguageMode(STRICT);
while (peek() != Token::EOS) {
Statement* stat = ParseModuleItem(CHECK_OK);
@@ -1927,7 +1941,7 @@ Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
Target target(&this->target_stack_, result);
Statement* statement = ParseStatementAsUnlabelled(labels, CHECK_OK);
- if (result) result->AddStatement(statement, zone());
+ if (result) result->statements()->Add(statement, zone());
return result;
}
}
@@ -2108,6 +2122,7 @@ Variable* Parser::Declare(Declaration* declaration,
var = new (zone()) Variable(declaration_scope, name, mode, kind,
declaration->initialization(), kNotAssigned);
var->AllocateTo(VariableLocation::LOOKUP, -1);
+ var->SetFromEval();
resolve = true;
}
@@ -2352,7 +2367,7 @@ Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
while (peek() != Token::RBRACE) {
Statement* stat = ParseStatement(NULL, CHECK_OK);
if (stat && !stat->IsEmpty()) {
- result->AddStatement(stat, zone());
+ result->statements()->Add(stat, zone());
}
}
Expect(Token::RBRACE, CHECK_OK);
@@ -2381,7 +2396,7 @@ Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels,
while (peek() != Token::RBRACE) {
Statement* stat = ParseStatementListItem(CHECK_OK);
if (stat && !stat->IsEmpty()) {
- body->AddStatement(stat, zone());
+ body->statements()->Add(stat, zone());
}
}
}
@@ -2519,6 +2534,7 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
if (!first_declaration) Consume(Token::COMMA);
Expression* pattern;
+ int decl_pos = peek_position();
{
ExpressionClassifier pattern_classifier;
Token::Value next = peek();
@@ -2526,6 +2542,10 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
if (!*ok) return;
ValidateBindingPattern(&pattern_classifier, ok);
if (!*ok) return;
+ if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
+ ValidateLetPattern(&pattern_classifier, ok);
+ if (!*ok) return;
+ }
if (!allow_harmony_destructuring() && !pattern->IsVariableProxy()) {
ReportUnexpectedToken(next);
*ok = false;
@@ -2533,6 +2553,8 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
}
}
+ bool is_pattern = pattern->IsObjectLiteral() || pattern->IsArrayLiteral();
+
Scanner::Location variable_loc = scanner()->location();
const AstRawString* single_name =
pattern->IsVariableProxy() ? pattern->AsVariableProxy()->raw_name()
@@ -2544,17 +2566,16 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
is_for_iteration_variable =
var_context == kForStatement &&
(peek() == Token::IN || PeekContextualKeyword(CStrVector("of")));
- if (is_for_iteration_variable && parsing_result->descriptor.mode == CONST) {
+ if (is_for_iteration_variable &&
+ (parsing_result->descriptor.mode == CONST ||
+ parsing_result->descriptor.mode == CONST_LEGACY)) {
parsing_result->descriptor.needs_init = false;
}
Expression* value = NULL;
// Harmony consts have non-optional initializers.
int initializer_position = RelocInfo::kNoPosition;
- if (peek() == Token::ASSIGN || (parsing_result->descriptor.mode == CONST &&
- !is_for_iteration_variable)) {
- Expect(Token::ASSIGN, ok);
- if (!*ok) return;
+ if (Check(Token::ASSIGN)) {
ExpressionClassifier classifier;
value = ParseAssignmentExpression(var_context != kForStatement,
&classifier, ok);
@@ -2579,6 +2600,15 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
// End position of the initializer is after the assignment expression.
initializer_position = scanner()->location().end_pos;
} else {
+ if ((parsing_result->descriptor.mode == CONST || is_pattern) &&
+ !is_for_iteration_variable) {
+ ParserTraits::ReportMessageAt(
+ Scanner::Location(decl_pos, scanner()->location().end_pos),
+ MessageTemplate::kDeclarationMissingInitializer,
+ is_pattern ? "destructuring" : "const");
+ *ok = false;
+ return;
+ }
// End position of the initializer is after the variable.
initializer_position = position();
}
@@ -2939,13 +2969,28 @@ Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
scope_->DeclarationScope()->RecordWithStatement();
Scope* with_scope = NewScope(scope_, WITH_SCOPE);
- Statement* stmt;
+ Block* body;
{ BlockState block_state(&scope_, with_scope);
with_scope->set_start_position(scanner()->peek_location().beg_pos);
- stmt = ParseSubStatement(labels, CHECK_OK);
+
+ // The body of the with statement must be enclosed in an additional
+ // lexical scope in case the body is a FunctionDeclaration.
+ body = factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+ block_scope->set_start_position(scanner()->location().beg_pos);
+ {
+ BlockState block_state(&scope_, block_scope);
+ Target target(&this->target_stack_, body);
+ Statement* stmt = ParseSubStatement(labels, CHECK_OK);
+ body->statements()->Add(stmt, zone());
+ block_scope->set_end_position(scanner()->location().end_pos);
+ block_scope = block_scope->FinalizeBlockScope();
+ body->set_scope(block_scope);
+ }
+
with_scope->set_end_position(scanner()->location().end_pos);
}
- return factory()->NewWithStatement(with_scope, expr, stmt, pos);
+ return factory()->NewWithStatement(with_scope, expr, body, pos);
}
@@ -3019,12 +3064,12 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
tag->position());
Statement* tag_statement =
factory()->NewExpressionStatement(tag_assign, RelocInfo::kNoPosition);
- switch_block->AddStatement(tag_statement, zone());
+ switch_block->statements()->Add(tag_statement, zone());
// make statement: undefined;
// This is needed so the tag isn't returned as the value, in case the switch
// statements don't have a value.
- switch_block->AddStatement(
+ switch_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
RelocInfo::kNoPosition),
@@ -3054,7 +3099,7 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
cases->Add(clause, zone());
}
switch_statement->Initialize(tag_read, cases);
- cases_block->AddStatement(switch_statement, zone());
+ cases_block->statements()->Add(switch_statement, zone());
}
Expect(Token::RBRACE, CHECK_OK);
@@ -3062,7 +3107,7 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
cases_scope = cases_scope->FinalizeBlockScope();
cases_block->set_scope(cases_scope);
- switch_block->AddStatement(cases_block, zone());
+ switch_block->statements()->Add(cases_block, zone());
return switch_block;
}
@@ -3114,21 +3159,79 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Scope* catch_scope = NULL;
Variable* catch_variable = NULL;
Block* catch_block = NULL;
- const AstRawString* name = NULL;
if (tok == Token::CATCH) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
catch_scope = NewScope(scope_, CATCH_SCOPE);
catch_scope->set_start_position(scanner()->location().beg_pos);
- name = ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ ExpressionClassifier pattern_classifier;
+ Expression* pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+ ValidateBindingPattern(&pattern_classifier, CHECK_OK);
+
+ const AstRawString* name = ast_value_factory()->dot_catch_string();
+ bool is_simple = pattern->IsVariableProxy();
+ if (is_simple) {
+ auto proxy = pattern->AsVariableProxy();
+ scope_->RemoveUnresolved(proxy);
+ name = proxy->raw_name();
+ }
catch_variable = catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
Variable::NORMAL);
- BlockState block_state(&scope_, catch_scope);
- catch_block = ParseBlock(NULL, CHECK_OK);
+
+ Expect(Token::RPAREN, CHECK_OK);
+
+ {
+ BlockState block_state(&scope_, catch_scope);
+
+ // TODO(adamk): Make a version of ParseScopedBlock that takes a scope and
+ // a block.
+ catch_block =
+ factory()->NewBlock(nullptr, 16, false, RelocInfo::kNoPosition);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+
+ block_scope->set_start_position(scanner()->location().beg_pos);
+ {
+ BlockState block_state(&scope_, block_scope);
+ Target target(&this->target_stack_, catch_block);
+
+ if (!is_simple) {
+ DeclarationDescriptor descriptor;
+ descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
+ descriptor.parser = this;
+ descriptor.declaration_scope = scope_;
+ descriptor.scope = scope_;
+ descriptor.hoist_scope = nullptr;
+ descriptor.mode = LET;
+ descriptor.is_const = false;
+ descriptor.needs_init = true;
+ descriptor.declaration_pos = pattern->position();
+ descriptor.initialization_pos = pattern->position();
+ descriptor.init_op = Token::INIT_LET;
+
+ DeclarationParsingResult::Declaration decl(
+ pattern, pattern->position(),
+ factory()->NewVariableProxy(catch_variable));
+
+ PatternRewriter::DeclareAndInitializeVariables(
+ catch_block, &descriptor, &decl, nullptr, CHECK_OK);
+ }
+
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ Statement* stat = ParseStatementListItem(CHECK_OK);
+ if (stat && !stat->IsEmpty()) {
+ catch_block->statements()->Add(stat, zone());
+ }
+ }
+ Consume(Token::RBRACE);
+ }
+ block_scope->set_end_position(scanner()->location().end_pos);
+ block_scope = block_scope->FinalizeBlockScope();
+ catch_block->set_scope(block_scope);
+ }
catch_scope->set_end_position(scanner()->location().end_pos);
tok = peek();
@@ -3153,7 +3256,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
factory()->NewTryCatchStatement(try_block, catch_scope, catch_variable,
catch_block, RelocInfo::kNoPosition);
try_block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
- try_block->AddStatement(statement, zone());
+ try_block->statements()->Add(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
}
@@ -3323,16 +3426,18 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
Statement* body, bool* ok) {
- // ES6 13.6.3.4 specifies that on each loop iteration the let variables are
- // copied into a new environment. After copying, the "next" statement of the
- // loop is executed to update the loop variables. The loop condition is
- // checked and the loop body is executed.
+ // ES6 13.7.4.8 specifies that on each loop iteration the let variables are
+ // copied into a new environment. Moreover, the "next" statement must be
+ // evaluated not in the environment of the just completed iteration but in
+ // that of the upcoming one. We achieve this with the following desugaring.
+ // Extra care is needed to preserve the completion value of the original loop.
//
- // We rewrite a for statement of the form
+ // We are given a for statement of the form
//
// labels: for (let/const x = i; cond; next) body
//
- // into
+ // and rewrite it as follows. Here we write {{ ... }} for init-blocks, ie.,
+ // blocks whose ignore_completion_value_ flag is set.
//
// {
// let/const x = i;
@@ -3340,29 +3445,21 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// first = 1;
// undefined;
// outer: for (;;) {
- // { // This block's only function is to ensure that the statements it
- // // contains do not affect the normal completion value. This is
- // // accomplished by setting its ignore_completion_value bit.
- // // No new lexical scope is introduced, so lexically scoped variables
- // // declared here will be scoped to the outer for loop.
- // let/const x = temp_x;
- // if (first == 1) {
- // first = 0;
- // } else {
- // next;
- // }
- // flag = 1;
- // }
+ // let/const x = temp_x;
+ // {{ if (first == 1) {
+ // first = 0;
+ // } else {
+ // next;
+ // }
+ // flag = 1;
+ // if (!cond) break;
+ // }}
// labels: for (; flag == 1; flag = 0, temp_x = x) {
- // if (cond) {
- // body
- // } else {
- // break outer;
- // }
- // }
- // if (flag == 1) {
- // break;
+ // body
// }
+ // {{ if (flag == 1) // Body used break.
+ // break;
+ // }}
// }
// }
@@ -3370,11 +3467,11 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Scope* for_scope = scope_;
ZoneList<Variable*> temps(names->length(), zone());
- Block* outer_block = factory()->NewBlock(NULL, names->length() + 3, false,
+ Block* outer_block = factory()->NewBlock(NULL, names->length() + 4, false,
RelocInfo::kNoPosition);
// Add statement: let/const x = i.
- outer_block->AddStatement(init, zone());
+ outer_block->statements()->Add(init, zone());
const AstRawString* temp_name = ast_value_factory()->dot_for_string();
@@ -3388,7 +3485,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
Statement* assignment_statement = factory()->NewExpressionStatement(
assignment, RelocInfo::kNoPosition);
- outer_block->AddStatement(assignment_statement, zone());
+ outer_block->statements()->Add(assignment_statement, zone());
temps.Add(temp, zone());
}
@@ -3402,11 +3499,11 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Token::ASSIGN, first_proxy, const1, RelocInfo::kNoPosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
- outer_block->AddStatement(assignment_statement, zone());
+ outer_block->statements()->Add(assignment_statement, zone());
}
// make statement: undefined;
- outer_block->AddStatement(
+ outer_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
RelocInfo::kNoPosition),
@@ -3419,7 +3516,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// in this function that looks up break targets.
ForStatement* outer_loop =
factory()->NewForStatement(NULL, RelocInfo::kNoPosition);
- outer_block->AddStatement(outer_loop, zone());
+ outer_block->statements()->Add(outer_loop, zone());
outer_block->set_scope(for_scope);
scope_ = inner_scope;
@@ -3427,7 +3524,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Block* inner_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
Block* ignore_completion_block = factory()->NewBlock(
- NULL, names->length() + 2, true, RelocInfo::kNoPosition);
+ NULL, names->length() + 3, true, RelocInfo::kNoPosition);
ZoneList<Variable*> inner_vars(names->length(), zone());
// For each let variable x:
// make statement: let/const x = temp_x.
@@ -3446,7 +3543,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
DCHECK(init->position() != RelocInfo::kNoPosition);
proxy->var()->set_initializer_position(init->position());
- ignore_completion_block->AddStatement(assignment_statement, zone());
+ ignore_completion_block->statements()->Add(assignment_statement, zone());
}
// Make statement: if (first == 1) { first = 0; } else { next; }
@@ -3472,7 +3569,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
}
Statement* clear_first_or_next = factory()->NewIfStatement(
compare, clear_first, next, RelocInfo::kNoPosition);
- ignore_completion_block->AddStatement(clear_first_or_next, zone());
+ ignore_completion_block->statements()->Add(clear_first_or_next, zone());
}
Variable* flag = scope_->NewTemporary(temp_name);
@@ -3484,9 +3581,19 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
- ignore_completion_block->AddStatement(assignment_statement, zone());
+ ignore_completion_block->statements()->Add(assignment_statement, zone());
}
- inner_block->AddStatement(ignore_completion_block, zone());
+
+ // Make statement: if (!cond) break.
+ if (cond) {
+ Statement* stop =
+ factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
+ Statement* noop = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ ignore_completion_block->statements()->Add(
+ factory()->NewIfStatement(cond, noop, stop, cond->position()), zone());
+ }
+
+ inner_block->statements()->Add(ignore_completion_block, zone());
// Make cond expression for main loop: flag == 1.
Expression* flag_cond = NULL;
{
@@ -3524,23 +3631,14 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
compound_next, RelocInfo::kNoPosition);
}
- // Make statement: if (cond) { body; } else { break outer; }
- Statement* body_or_stop = body;
- if (cond) {
- Statement* stop =
- factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
- body_or_stop =
- factory()->NewIfStatement(cond, body, stop, cond->position());
- }
-
// Make statement: labels: for (; flag == 1; flag = 0, temp_x = x)
// Note that we re-use the original loop node, which retains its labels
// and ensures that any break or continue statements in body point to
// the right place.
- loop->Initialize(NULL, flag_cond, compound_next_statement, body_or_stop);
- inner_block->AddStatement(loop, zone());
+ loop->Initialize(NULL, flag_cond, compound_next_statement, body);
+ inner_block->statements()->Add(loop, zone());
- // Make statement: if (flag == 1) { break; }
+ // Make statement: {{if (flag == 1) break;}}
{
Expression* compare = NULL;
// Make compare expresion: flag == 1.
@@ -3555,7 +3653,10 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
Statement* if_flag_break =
factory()->NewIfStatement(compare, stop, empty, RelocInfo::kNoPosition);
- inner_block->AddStatement(if_flag_break, zone());
+ Block* ignore_completion_block =
+ factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
+ ignore_completion_block->statements()->Add(if_flag_break, zone());
+ inner_block->statements()->Add(ignore_completion_block, zone());
}
inner_scope->set_end_position(scanner()->location().end_pos);
@@ -3594,12 +3695,11 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
int num_decl = parsing_result.declarations.length();
bool accept_IN = num_decl >= 1;
- bool accept_OF = true;
ForEachStatement::VisitMode mode;
int each_beg_pos = scanner()->location().beg_pos;
int each_end_pos = scanner()->location().end_pos;
- if (accept_IN && CheckInOrOf(accept_OF, &mode, ok)) {
+ if (accept_IN && CheckInOrOf(&mode, ok)) {
if (!*ok) return nullptr;
if (num_decl != 1) {
const char* loop_type =
@@ -3636,7 +3736,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
each_beg_pos, each_end_pos);
init_block = factory()->NewBlock(
nullptr, 2, true, parsing_result.descriptor.declaration_pos);
- init_block->AddStatement(
+ init_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewAssignment(
Token::ASSIGN, single_var,
@@ -3699,8 +3799,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
CHECK_OK);
}
- body_block->AddStatement(each_initialization_block, zone());
- body_block->AddStatement(body, zone());
+ body_block->statements()->Add(each_initialization_block, zone());
+ body_block->statements()->Add(body, zone());
VariableProxy* temp_proxy =
factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
@@ -3736,7 +3836,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
for_scope = for_scope->FinalizeBlockScope();
// Parsed for-in loop w/ variable declarations.
if (init_block != nullptr) {
- init_block->AddStatement(loop, zone());
+ init_block->statements()->Add(loop, zone());
if (for_scope != nullptr) {
init_block->set_scope(for_scope);
}
@@ -3757,13 +3857,12 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Expression* expression = ParseExpression(false, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
ForEachStatement::VisitMode mode;
- bool accept_OF = expression->IsVariableProxy();
is_let_identifier_expression =
expression->IsVariableProxy() &&
expression->AsVariableProxy()->raw_name() ==
ast_value_factory()->let_string();
- if (CheckInOrOf(accept_OF, &mode, ok)) {
+ if (CheckInOrOf(&mode, ok)) {
if (!*ok) return nullptr;
expression = this->CheckAndRewriteReferenceExpression(
expression, lhs_beg_pos, lhs_end_pos,
@@ -3776,12 +3875,28 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
+ // Make a block around the statement in case a lexical binding
+ // is introduced, e.g. by a FunctionDeclaration.
+ // This block must not use for_scope as its scope because if a
+ // lexical binding is introduced which overlaps with the for-in/of,
+ // expressions in head of the loop should actually have variables
+ // resolved in the outer scope.
+ Scope* body_scope = NewScope(for_scope, BLOCK_SCOPE);
+ scope_ = body_scope;
+ Block* block =
+ factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
Statement* body = ParseSubStatement(NULL, CHECK_OK);
- InitializeForEachStatement(loop, expression, enumerable, body);
+ block->statements()->Add(body, zone());
+ InitializeForEachStatement(loop, expression, enumerable, block);
scope_ = saved_scope;
+ body_scope->set_end_position(scanner()->location().end_pos);
+ body_scope = body_scope->FinalizeBlockScope();
+ if (body_scope != nullptr) {
+ block->set_scope(body_scope);
+ }
for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
- DCHECK(for_scope == NULL);
+ DCHECK(for_scope == nullptr);
// Parsed for-in loop.
return loop;
@@ -3851,11 +3966,21 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// const x = i;
// for (; c; n) b
// }
- DCHECK(init != NULL);
+ //
+ // or, desugar
+ // for (; c; n) b
+ // into
+ // {
+ // for (; c; n) b
+ // }
+ // just in case b introduces a lexical binding some other way, e.g., if b
+ // is a FunctionDeclaration.
Block* block =
factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
- block->AddStatement(init, zone());
- block->AddStatement(loop, zone());
+ if (init != nullptr) {
+ block->statements()->Add(init, zone());
+ }
+ block->statements()->Add(loop, zone());
block->set_scope(for_scope);
loop->Initialize(NULL, cond, next, body);
result = block;
@@ -3989,12 +4114,34 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
DCHECK(!assignment->is_compound());
initializer = assignment->value();
expr = assignment->target();
+
+ // TODO(adamk): Only call this if necessary.
+ RewriteParameterInitializerScope(parser_->stack_limit(), initializer,
+ parser_->scope_, parameters->scope);
}
AddFormalParameter(parameters, expr, initializer, is_rest);
}
+DoExpression* Parser::ParseDoExpression(bool* ok) {
+ // AssignmentExpression ::
+ // do '{' StatementList '}'
+ int pos = peek_position();
+
+ Expect(Token::DO, CHECK_OK);
+ Variable* result =
+ scope_->NewTemporary(ast_value_factory()->dot_result_string());
+ Block* block = ParseScopedBlock(nullptr, CHECK_OK);
+ DoExpression* expr = factory()->NewDoExpression(block, result, pos);
+ if (!Rewriter::Rewrite(this, expr, ast_value_factory())) {
+ *ok = false;
+ return nullptr;
+ }
+ return expr;
+}
+
+
void ParserTraits::ParseArrowFunctionFormalParameterList(
ParserFormalParameters* parameters, Expression* expr,
const Scanner::Location& params_loc,
@@ -4103,7 +4250,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
: NewScope(scope_, FUNCTION_SCOPE, kind);
- scope->SetLanguageMode(language_mode);
+ SetLanguageMode(scope, language_mode);
ZoneList<Statement*>* body = NULL;
int arity = -1;
int materialized_literal_count = -1;
@@ -4284,7 +4431,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_sloppy(language_mode) && allow_harmony_sloppy_function()) {
InsertSloppyBlockFunctionVarBindings(scope, CHECK_OK);
}
- if (is_strict(language_mode) || allow_harmony_sloppy()) {
+ if (is_strict(language_mode) || allow_harmony_sloppy() ||
+ allow_harmony_destructuring()) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
}
@@ -4335,7 +4483,7 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
total_preparse_skipped_ += scope_->end_position() - function_block_pos;
*materialized_literal_count = entry.literal_count();
*expected_property_count = entry.property_count();
- scope_->SetLanguageMode(entry.language_mode());
+ SetLanguageMode(scope_, entry.language_mode());
if (entry.uses_super_property()) scope_->RecordSuperPropertyUsage();
if (entry.calls_eval()) scope_->RecordEvalCall();
return;
@@ -4371,7 +4519,7 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
total_preparse_skipped_ += scope_->end_position() - function_block_pos;
*materialized_literal_count = logger.literals();
*expected_property_count = logger.properties();
- scope_->SetLanguageMode(logger.language_mode());
+ SetLanguageMode(scope_, logger.language_mode());
if (logger.uses_super_property()) {
scope_->RecordSuperPropertyUsage();
}
@@ -4389,21 +4537,6 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
}
-void Parser::AddAssertIsConstruct(ZoneList<Statement*>* body, int pos) {
- ZoneList<Expression*>* arguments =
- new (zone()) ZoneList<Expression*>(0, zone());
- CallRuntime* construct_check = factory()->NewCallRuntime(
- Runtime::kInlineIsConstructCall, arguments, pos);
- CallRuntime* non_callable_error = factory()->NewCallRuntime(
- Runtime::kThrowConstructorNonCallableError, arguments, pos);
- IfStatement* if_statement = factory()->NewIfStatement(
- factory()->NewUnaryOperation(Token::NOT, construct_check, pos),
- factory()->NewReturnStatement(non_callable_error, pos),
- factory()->NewEmptyStatement(pos), pos);
- body->Add(if_statement, zone());
-}
-
-
Statement* Parser::BuildAssertIsCoercible(Variable* var) {
// if (var === null || var === undefined)
// throw /* type error kNonCoercible) */;
@@ -4534,11 +4667,11 @@ Block* Parser::BuildParameterInitializationBlock(
loop->Initialize(init, cond, next, body);
- init_block->AddStatement(
+ init_block->statements()->Add(
factory()->NewExpressionStatement(init_array, RelocInfo::kNoPosition),
zone());
- init_block->AddStatement(loop, zone());
+ init_block->statements()->Add(loop, zone());
descriptor.initialization_pos = pos;
}
@@ -4569,7 +4702,7 @@ Block* Parser::BuildParameterInitializationBlock(
if (param_scope != nullptr) {
CheckConflictingVarDeclarations(param_scope, CHECK_OK);
}
- init_block->AddStatement(param_block, zone());
+ init_block->statements()->Add(param_block, zone());
}
}
return init_block;
@@ -4597,12 +4730,6 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
result->Add(NULL, zone());
}
- // For concise constructors, check that they are constructed,
- // not called.
- if (IsClassConstructor(kind)) {
- AddAssertIsConstruct(result, pos);
- }
-
ZoneList<Statement*>* body = result;
Scope* inner_scope = scope_;
Block* inner_block = nullptr;
@@ -4664,7 +4791,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
if (!parameters.is_simple) {
DCHECK_NOT_NULL(inner_scope);
DCHECK_EQ(body, inner_block->statements());
- scope_->SetLanguageMode(inner_scope->language_mode());
+ SetLanguageMode(scope_, inner_scope->language_mode());
Block* init_block = BuildParameterInitializationBlock(parameters, CHECK_OK);
DCHECK_NOT_NULL(init_block);
@@ -4672,6 +4799,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
inner_scope = inner_scope->FinalizeBlockScope();
if (inner_scope != nullptr) {
CheckConflictingVarDeclarations(inner_scope, CHECK_OK);
+ InsertShadowingVarBindingInitializers(inner_block);
}
result->Add(init_block, zone());
@@ -4684,12 +4812,9 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
// NOTE: We create a proxy and resolve it here so that in the
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxies as is the case now.
- Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
- bool use_strict_const = is_strict(scope_->language_mode()) ||
- (!allow_legacy_const() && allow_harmony_sloppy());
- if (use_strict_const) {
- fvar_init_op = Token::INIT_CONST;
- }
+ const bool use_strict_const = is_strict(scope_->language_mode());
+ Token::Value fvar_init_op =
+ use_strict_const ? Token::INIT_CONST : Token::INIT_CONST_LEGACY;
VariableMode fvar_mode = use_strict_const ? CONST : CONST_LEGACY;
Variable* fvar = new (zone())
Variable(scope_, function_name, fvar_mode, Variable::NORMAL,
@@ -4699,8 +4824,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
scope_->DeclareFunctionVar(fvar_declaration);
- VariableProxy* fproxy = scope_->NewUnresolved(factory(), function_name);
- fproxy->BindTo(fvar);
+ VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
result->Set(kFunctionNameAssignmentIndex,
factory()->NewExpressionStatement(
factory()->NewAssignment(fvar_init_op, fproxy,
@@ -4728,16 +4852,13 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
reusable_preparser_->set_allow_lazy(true);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
- SET_ALLOW(harmony_arrow_functions);
SET_ALLOW(harmony_sloppy);
SET_ALLOW(harmony_sloppy_let);
SET_ALLOW(harmony_rest_parameters);
SET_ALLOW(harmony_default_parameters);
- SET_ALLOW(harmony_spread_calls);
SET_ALLOW(harmony_destructuring);
- SET_ALLOW(harmony_spread_arrays);
- SET_ALLOW(harmony_new_target);
SET_ALLOW(strong_mode);
+ SET_ALLOW(harmony_do_expressions);
#undef SET_ALLOW
}
PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
@@ -4774,8 +4895,7 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
BlockState block_state(&scope_, block_scope);
- scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT));
+ RaiseLanguageMode(STRICT);
scope_->SetScopeName(name);
VariableProxy* proxy = NULL;
@@ -4838,15 +4958,13 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
block_scope->language_mode());
}
+ // Note that we do not finalize this block scope because strong
+ // mode uses it as a sentinel value indicating an anonymous class.
block_scope->set_end_position(end_pos);
if (name != NULL) {
DCHECK_NOT_NULL(proxy);
proxy->var()->set_initializer_position(end_pos);
- } else {
- // Unnamed classes should not have scopes (the scope will be empty).
- DCHECK_EQ(block_scope->num_var_or_const(), 0);
- block_scope = nullptr;
}
return factory()->NewClassLiteral(name, block_scope, proxy, extends,
@@ -4929,8 +5047,7 @@ Literal* Parser::GetLiteralUndefined(int position) {
void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Declaration* decl = scope->CheckConflictingVarDeclarations();
if (decl != NULL) {
- // In harmony mode we treat conflicting variable bindinds as early
- // errors. See ES5 16 for a definition of early errors.
+ // In ES6, conflicting variable bindings are early errors.
const AstRawString* name = decl->proxy()->raw_name();
int position = decl->proxy()->position();
Scanner::Location location = position == RelocInfo::kNoPosition
@@ -4943,6 +5060,31 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
}
+void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
+ // For each var-binding that shadows a parameter, insert an assignment
+ // initializing the variable with the parameter.
+ Scope* inner_scope = inner_block->scope();
+ DCHECK(inner_scope->is_declaration_scope());
+ Scope* function_scope = inner_scope->outer_scope();
+ DCHECK(function_scope->is_function_scope());
+ ZoneList<Declaration*>* decls = inner_scope->declarations();
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ if (decl->mode() != VAR || !decl->IsVariableDeclaration()) continue;
+ const AstRawString* name = decl->proxy()->raw_name();
+ Variable* parameter = function_scope->LookupLocal(name);
+ if (parameter == nullptr) continue;
+ VariableProxy* to = inner_scope->NewUnresolved(factory(), name);
+ VariableProxy* from = factory()->NewVariableProxy(parameter);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, to, from, RelocInfo::kNoPosition);
+ Statement* statement = factory()->NewExpressionStatement(
+ assignment, RelocInfo::kNoPosition);
+ inner_block->statements()->InsertAt(0, statement, zone());
+ }
+}
+
+
void Parser::InsertSloppyBlockFunctionVarBindings(Scope* scope, bool* ok) {
// For each variable which is used as a function declaration in a sloppy
// block,
@@ -6287,5 +6429,27 @@ Expression* Parser::SpreadCallNew(Expression* function,
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
}
+
+
+void Parser::SetLanguageMode(Scope* scope, LanguageMode mode) {
+ v8::Isolate::UseCounterFeature feature;
+ if (is_sloppy(mode))
+ feature = v8::Isolate::kSloppyMode;
+ else if (is_strong(mode))
+ feature = v8::Isolate::kStrongMode;
+ else if (is_strict(mode))
+ feature = v8::Isolate::kStrictMode;
+ else
+ UNREACHABLE();
+ ++use_counts_[feature];
+ scope->SetLanguageMode(mode);
+}
+
+
+void Parser::RaiseLanguageMode(LanguageMode mode) {
+ SetLanguageMode(scope_,
+ static_cast<LanguageMode>(scope_->language_mode() | mode));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index cf4cdad66b..b674a9d2e2 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -796,6 +796,8 @@ class ParserTraits {
const Scanner::Location& params_loc,
Scanner::Location* duplicate_loc, bool* ok);
+ V8_INLINE DoExpression* ParseDoExpression(bool* ok);
+
void ReindexLiterals(const ParserFormalParameters& parameters);
// Temporary glue; these functions will move to ParserBase.
@@ -925,7 +927,6 @@ class Parser : public ParserBase<ParserTraits> {
void SetCachedData(ParseInfo* info);
- bool inside_with() const { return scope_->inside_with(); }
ScriptCompiler::CompileOptions compile_options() const {
return compile_options_;
}
@@ -971,6 +972,7 @@ class Parser : public ParserBase<ParserTraits> {
Block* ParseVariableStatement(VariableDeclarationContext var_context,
ZoneList<const AstRawString*>* names,
bool* ok);
+ DoExpression* ParseDoExpression(bool* ok);
struct DeclarationDescriptor {
enum Kind { NORMAL, PARAMETER };
@@ -1031,7 +1033,7 @@ class Parser : public ParserBase<ParserTraits> {
// Visiting functions for AST nodes make this an AstVisitor.
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- virtual void Visit(AstNode* node) override;
+ void Visit(AstNode* node) override;
void RecurseIntoSubpattern(AstNode* pattern, Expression* value) {
Expression* old_value = current_value_;
@@ -1046,7 +1048,6 @@ class Parser : public ParserBase<ParserTraits> {
AstValueFactory* ast_value_factory() const {
return descriptor_->parser->ast_value_factory();
}
- bool inside_with() const { return descriptor_->parser->inside_with(); }
Zone* zone() const { return descriptor_->parser->zone(); }
Expression* pattern_;
@@ -1104,6 +1105,8 @@ class Parser : public ParserBase<ParserTraits> {
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
Statement* body, bool* ok);
+ void RewriteDoExpression(Expression* expr, bool* ok);
+
FunctionLiteral* ParseFunctionLiteral(
const AstRawString* name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
@@ -1123,8 +1126,8 @@ class Parser : public ParserBase<ParserTraits> {
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
- // For harmony block scoping mode: Check if the scope has conflicting var/let
- // declarations from different scopes. It covers for example
+ // Check if the scope has conflicting var/let declarations from different
+ // scopes. This covers for example
//
// function f() { { { var x; } let x; } }
// function g() { { var x; let x; } }
@@ -1134,6 +1137,10 @@ class Parser : public ParserBase<ParserTraits> {
// hoisted over such a scope.
void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
+ // Insert initializer statements for var-bindings shadowing parameter bindings
+ // from a non-simple parameter list.
+ void InsertShadowingVarBindingInitializers(Block* block);
+
// Implement sloppy block-scoped functions, ES2015 Annex B 3.3
void InsertSloppyBlockFunctionVarBindings(Scope* scope, bool* ok);
@@ -1147,7 +1154,6 @@ class Parser : public ParserBase<ParserTraits> {
BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
IterationStatement* LookupContinueTarget(const AstRawString* label, bool* ok);
- void AddAssertIsConstruct(ZoneList<Statement*>* body, int pos);
Statement* BuildAssertIsCoercible(Variable* var);
// Factory methods.
@@ -1193,6 +1199,9 @@ class Parser : public ParserBase<ParserTraits> {
Expression* SpreadCallNew(Expression* function,
ZoneList<v8::internal::Expression*>* args, int pos);
+ void SetLanguageMode(Scope* scope, LanguageMode mode);
+ void RaiseLanguageMode(LanguageMode mode);
+
Scanner scanner_;
PreParser* reusable_preparser_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
@@ -1244,6 +1253,7 @@ ZoneList<Statement*>* ParserTraits::ParseEagerFunctionBody(
function_type, ok);
}
+
void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope,
bool* ok) {
parser_->CheckConflictingVarDeclarations(scope, ok);
@@ -1373,6 +1383,14 @@ void ParserTraits::AddParameterInitializationBlock(
}
}
}
-} } // namespace v8::internal
+
+
+DoExpression* ParserTraits::ParseDoExpression(bool* ok) {
+ return parser_->ParseDoExpression(ok);
+}
+
+
+} // namespace internal
+} // namespace v8
#endif // V8_PARSER_H_
diff --git a/deps/v8/src/pattern-rewriter.cc b/deps/v8/src/pattern-rewriter.cc
index e4c602aa48..e96aef8ba2 100644
--- a/deps/v8/src/pattern-rewriter.cc
+++ b/deps/v8/src/pattern-rewriter.cc
@@ -4,6 +4,7 @@
#include "src/ast.h"
#include "src/messages.h"
+#include "src/parameter-initializer-rewriter.h"
#include "src/parser.h"
namespace v8 {
@@ -30,7 +31,7 @@ void Parser::PatternRewriter::DeclareAndInitializeVariables(
void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
Expression* value = current_value_;
- descriptor_->scope->RemoveUnresolved(pattern->AsVariableProxy());
+ descriptor_->scope->RemoveUnresolved(pattern);
// Declare variable.
// Note that we *always* must treat the initial value via a separate init
@@ -157,7 +158,7 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// we're in a with. The initialization value should not
// necessarily be stored in the global object in that case,
// which is why we need to generate a separate assignment node.
- if (value != NULL && !inside_with()) {
+ if (value != NULL && !descriptor_->scope->inside_with()) {
arguments->Add(value, zone());
value = NULL; // zap the value to avoid the unnecessary assignment
// Construct the call to Runtime_InitializeVarGlobal
@@ -171,11 +172,11 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
}
if (initialize != NULL) {
- block_->AddStatement(
+ block_->statements()->Add(
factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition),
zone());
}
- } else if (value != nullptr && (descriptor_->needs_init ||
+ } else if (value != nullptr && (descriptor_->mode == CONST_LEGACY ||
IsLexicalVariableMode(descriptor_->mode))) {
// Constant initializations always assign to the declared constant which
// is always at the function scope level. This is only relevant for
@@ -189,7 +190,7 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
DCHECK_NOT_NULL(value);
Assignment* assignment = factory()->NewAssignment(
descriptor_->init_op, proxy, value, descriptor_->initialization_pos);
- block_->AddStatement(
+ block_->statements()->Add(
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
zone());
value = NULL;
@@ -205,7 +206,7 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
VariableProxy* proxy = initialization_scope->NewUnresolved(factory(), name);
Assignment* assignment = factory()->NewAssignment(
descriptor_->init_op, proxy, value, descriptor_->initialization_pos);
- block_->AddStatement(
+ block_->statements()->Add(
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
zone());
}
@@ -220,7 +221,7 @@ Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
Token::ASSIGN, factory()->NewVariableProxy(temp), value,
RelocInfo::kNoPosition);
- block_->AddStatement(
+ block_->statements()->Add(
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
zone());
}
@@ -231,8 +232,8 @@ Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern) {
auto temp = CreateTempVar(current_value_);
- block_->AddStatement(descriptor_->parser->BuildAssertIsCoercible(temp),
- zone());
+ block_->statements()->Add(descriptor_->parser->BuildAssertIsCoercible(temp),
+ zone());
for (ObjectLiteralProperty* property : *pattern->properties()) {
RecurseIntoSubpattern(
@@ -244,8 +245,13 @@ void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern) {
void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
- auto iterator = CreateTempVar(
- descriptor_->parser->GetIterator(current_value_, factory()));
+ auto temp = CreateTempVar(current_value_);
+
+ block_->statements()->Add(descriptor_->parser->BuildAssertIsCoercible(temp),
+ zone());
+
+ auto iterator = CreateTempVar(descriptor_->parser->GetIterator(
+ factory()->NewVariableProxy(temp), factory()));
auto done = CreateTempVar(
factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition));
auto result = CreateTempVar();
@@ -264,12 +270,13 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
// }
auto next_block =
factory()->NewBlock(nullptr, 2, true, RelocInfo::kNoPosition);
- next_block->AddStatement(factory()->NewExpressionStatement(
- descriptor_->parser->BuildIteratorNextResult(
- factory()->NewVariableProxy(iterator),
- result, RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- zone());
+ next_block->statements()->Add(
+ factory()->NewExpressionStatement(
+ descriptor_->parser->BuildIteratorNextResult(
+ factory()->NewVariableProxy(iterator), result,
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
auto assign_to_done = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(done),
@@ -287,7 +294,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
RelocInfo::kNoPosition),
RelocInfo::kNoPosition),
RelocInfo::kNoPosition);
- next_block->AddStatement(
+ next_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewAssignment(Token::ASSIGN,
factory()->NewVariableProxy(v), next_value,
@@ -301,7 +308,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
RelocInfo::kNoPosition),
next_block, factory()->NewEmptyStatement(RelocInfo::kNoPosition),
RelocInfo::kNoPosition);
- block_->AddStatement(if_statement, zone());
+ block_->statements()->Add(if_statement, zone());
if (!(value->IsLiteral() && value->AsLiteral()->raw_value()->IsTheHole())) {
RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
@@ -334,8 +341,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
RelocInfo::kNoPosition),
factory()->NewEmptyStatement(RelocInfo::kNoPosition),
RelocInfo::kNoPosition);
- block_->AddStatement(if_statement, zone());
-
+ block_->statements()->Add(if_statement, zone());
RecurseIntoSubpattern(spread->expression(),
factory()->NewVariableProxy(array));
@@ -354,23 +360,21 @@ void Parser::PatternRewriter::VisitAssignment(Assignment* node) {
Token::EQ_STRICT, factory()->NewVariableProxy(temp),
factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
RelocInfo::kNoPosition);
+ Expression* initializer = node->value();
+ if (descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
+ descriptor_->scope->is_arrow_scope()) {
+ // TODO(adamk): Only call this if necessary.
+ RewriteParameterInitializerScope(
+ descriptor_->parser->stack_limit(), initializer,
+ descriptor_->scope->outer_scope(), descriptor_->scope);
+ }
Expression* value = factory()->NewConditional(
- is_undefined, node->value(), factory()->NewVariableProxy(temp),
+ is_undefined, initializer, factory()->NewVariableProxy(temp),
RelocInfo::kNoPosition);
RecurseIntoSubpattern(node->target(), value);
}
-void Parser::PatternRewriter::VisitSpread(Spread* node) {
- UNREACHABLE();
-}
-
-
-void Parser::PatternRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
- UNREACHABLE();
-}
-
-
// =============== UNREACHABLE =============================
void Parser::PatternRewriter::Visit(AstNode* node) { UNREACHABLE(); }
@@ -393,9 +397,10 @@ NOT_A_PATTERN(Conditional)
NOT_A_PATTERN(ContinueStatement)
NOT_A_PATTERN(CountOperation)
NOT_A_PATTERN(DebuggerStatement)
+NOT_A_PATTERN(DoExpression)
NOT_A_PATTERN(DoWhileStatement)
NOT_A_PATTERN(EmptyStatement)
-NOT_A_PATTERN(SloppyBlockFunctionStatement)
+NOT_A_PATTERN(EmptyParentheses)
NOT_A_PATTERN(ExportDeclaration)
NOT_A_PATTERN(ExpressionStatement)
NOT_A_PATTERN(ForInStatement)
@@ -410,6 +415,8 @@ NOT_A_PATTERN(NativeFunctionLiteral)
NOT_A_PATTERN(Property)
NOT_A_PATTERN(RegExpLiteral)
NOT_A_PATTERN(ReturnStatement)
+NOT_A_PATTERN(SloppyBlockFunctionStatement)
+NOT_A_PATTERN(Spread)
NOT_A_PATTERN(SuperPropertyReference)
NOT_A_PATTERN(SuperCallReference)
NOT_A_PATTERN(SwitchStatement)
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index b1e2825751..4f6a35d66e 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -720,7 +720,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
UNREACHABLE();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_ASSEMBLER_PPC_INL_H_
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 6bbb53c4ba..ac03ce6949 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -128,16 +128,6 @@ Register ToRegister(int num) {
}
-const char* DoubleRegister::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
- "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
- "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
- return names[index];
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -286,14 +276,14 @@ bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
Register Assembler::GetRA(Instr instr) {
Register reg;
- reg.code_ = Instruction::RAValue(instr);
+ reg.reg_code = Instruction::RAValue(instr);
return reg;
}
Register Assembler::GetRB(Instr instr) {
Register reg;
- reg.code_ = Instruction::RBValue(instr);
+ reg.reg_code = Instruction::RBValue(instr);
return reg;
}
@@ -747,6 +737,11 @@ void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
}
+void Assembler::popcntw(Register ra, Register rs) {
+ emit(EXT2 | POPCNTW | rs.code() * B21 | ra.code() * B16);
+}
+
+
void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
x_form(EXT2 | ANDX, ra, rs, rb, rc);
}
@@ -1481,6 +1476,11 @@ void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
}
+void Assembler::popcntd(Register ra, Register rs) {
+ emit(EXT2 | POPCNTD | rs.code() * B21 | ra.code() * B16);
+}
+
+
void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
@@ -2163,6 +2163,12 @@ void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index a1c08ad0ea..36843c17ab 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -44,7 +44,6 @@
#include <vector>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/ppc/constants-ppc.h"
#define ABI_USES_FUNCTION_DESCRIPTORS \
@@ -61,9 +60,9 @@
(V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
-#define ABI_TOC_REGISTER kRegister_r2_Code
+#define ABI_TOC_REGISTER Register::kCode_r2
#else
-#define ABI_TOC_REGISTER kRegister_r13_Code
+#define ABI_TOC_REGISTER Register::kCode_r13
#endif
#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
@@ -71,6 +70,40 @@
namespace v8 {
namespace internal {
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(r0) V(sp) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r11) V(ip) V(r13) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
+
+#if V8_EMBEDDED_CONSTANT_POOL
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r30)
+#else
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
+#endif
+
+#define DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+// clang-format on
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -92,310 +125,112 @@ namespace internal {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
-// Core register
struct Register {
- static const int kNumRegisters = 32;
- static const int kSizeInBytes = kPointerSize;
-
-#if V8_TARGET_LITTLE_ENDIAN
- static const int kMantissaOffset = 0;
- static const int kExponentOffset = 4;
-#else
- static const int kMantissaOffset = 4;
- static const int kExponentOffset = 0;
-#endif
-
- static const int kAllocatableLowRangeBegin = 3;
- static const int kAllocatableLowRangeEnd = 10;
- static const int kAllocatableHighRangeBegin = 14;
- static const int kAllocatableHighRangeEnd =
- FLAG_enable_embedded_constant_pool ? 27 : 28;
- static const int kAllocatableContext = 30;
-
- static const int kNumAllocatableLow =
- kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1;
- static const int kNumAllocatableHigh =
- kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1;
- static const int kMaxNumAllocatableRegisters =
- kNumAllocatableLow + kNumAllocatableHigh + 1; // cp
-
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
-
- static int ToAllocationIndex(Register reg) {
- int index;
- int code = reg.code();
- if (code == kAllocatableContext) {
- // Context is the last index
- index = NumAllocatableRegisters() - 1;
- } else if (code <= kAllocatableLowRangeEnd) {
- // low range
- index = code - kAllocatableLowRangeBegin;
- } else {
- // high range
- index = code - kAllocatableHighRangeBegin + kNumAllocatableLow;
- }
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return index;
- }
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- // Last index is always the 'cp' register.
- if (index == kMaxNumAllocatableRegisters - 1) {
- return from_code(kAllocatableContext);
- }
- return (index < kNumAllocatableLow)
- ? from_code(index + kAllocatableLowRangeBegin)
- : from_code(index - kNumAllocatableLow +
- kAllocatableHighRangeBegin);
- }
+ static const int kNumRegisters = Code::kAfterLast;
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "r3",
- "r4",
- "r5",
- "r6",
- "r7",
- "r8",
- "r9",
- "r10",
- "r14",
- "r15",
- "r16",
- "r17",
- "r18",
- "r19",
- "r20",
- "r21",
- "r22",
- "r23",
- "r24",
- "r25",
- "r26",
- "r27",
- "r28",
- "cp",
- };
- if (FLAG_enable_embedded_constant_pool &&
- (index == kMaxNumAllocatableRegisters - 2)) {
- return names[index + 1];
- }
- return names[index];
- }
+#define REGISTER_COUNT(R) 1 +
+ static const int kNumAllocatable =
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT)0;
+#undef REGISTER_COUNT
+#define REGISTER_BIT(R) 1 << kCode_##R |
static const RegList kAllocatable =
- 1 << 3 | 1 << 4 | 1 << 5 | 1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 |
- 1 << 14 | 1 << 15 | 1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 | 1 << 20 |
- 1 << 21 | 1 << 22 | 1 << 23 | 1 << 24 | 1 << 25 | 1 << 26 | 1 << 27 |
- (FLAG_enable_embedded_constant_pool ? 0 : 1 << 28) | 1 << 30;
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_BIT)0;
+#undef REGISTER_BIT
static Register from_code(int code) {
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
Register r = {code};
return r;
}
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
-
void set_code(int code) {
- code_ = code;
+ reg_code = code;
DCHECK(is_valid());
}
+#if V8_TARGET_LITTLE_ENDIAN
+ static const int kMantissaOffset = 0;
+ static const int kExponentOffset = 4;
+#else
+ static const int kMantissaOffset = 4;
+ static const int kExponentOffset = 0;
+#endif
+
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0; // general scratch
-const int kRegister_sp_Code = 1; // stack pointer
-const int kRegister_r2_Code = 2; // special on PowerPC
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_r11_Code = 11; // lithium scratch
-const int kRegister_ip_Code = 12; // ip (general scratch)
-const int kRegister_r13_Code = 13; // special on PowerPC
-const int kRegister_r14_Code = 14;
-const int kRegister_r15_Code = 15;
-
-const int kRegister_r16_Code = 16;
-const int kRegister_r17_Code = 17;
-const int kRegister_r18_Code = 18;
-const int kRegister_r19_Code = 19;
-const int kRegister_r20_Code = 20;
-const int kRegister_r21_Code = 21;
-const int kRegister_r22_Code = 22;
-const int kRegister_r23_Code = 23;
-const int kRegister_r24_Code = 24;
-const int kRegister_r25_Code = 25;
-const int kRegister_r26_Code = 26;
-const int kRegister_r27_Code = 27;
-const int kRegister_r28_Code = 28; // constant pool pointer
-const int kRegister_r29_Code = 29; // roots array pointer
-const int kRegister_r30_Code = 30; // context pointer
-const int kRegister_fp_Code = 31; // frame pointer
-
-const Register no_reg = {kRegister_no_reg_Code};
-
-const Register r0 = {kRegister_r0_Code};
-const Register sp = {kRegister_sp_Code};
-const Register r2 = {kRegister_r2_Code};
-const Register r3 = {kRegister_r3_Code};
-const Register r4 = {kRegister_r4_Code};
-const Register r5 = {kRegister_r5_Code};
-const Register r6 = {kRegister_r6_Code};
-const Register r7 = {kRegister_r7_Code};
-const Register r8 = {kRegister_r8_Code};
-const Register r9 = {kRegister_r9_Code};
-const Register r10 = {kRegister_r10_Code};
-const Register r11 = {kRegister_r11_Code};
-const Register ip = {kRegister_ip_Code};
-const Register r13 = {kRegister_r13_Code};
-const Register r14 = {kRegister_r14_Code};
-const Register r15 = {kRegister_r15_Code};
-
-const Register r16 = {kRegister_r16_Code};
-const Register r17 = {kRegister_r17_Code};
-const Register r18 = {kRegister_r18_Code};
-const Register r19 = {kRegister_r19_Code};
-const Register r20 = {kRegister_r20_Code};
-const Register r21 = {kRegister_r21_Code};
-const Register r22 = {kRegister_r22_Code};
-const Register r23 = {kRegister_r23_Code};
-const Register r24 = {kRegister_r24_Code};
-const Register r25 = {kRegister_r25_Code};
-const Register r26 = {kRegister_r26_Code};
-const Register r27 = {kRegister_r27_Code};
-const Register r28 = {kRegister_r28_Code};
-const Register r29 = {kRegister_r29_Code};
-const Register r30 = {kRegister_r30_Code};
-const Register fp = {kRegister_fp_Code};
-
-// Give alias names to registers
-const Register cp = {kRegister_r30_Code}; // JavaScript context pointer
-const Register kRootRegister = {kRegister_r29_Code}; // Roots array pointer.
-const Register kConstantPoolRegister = {kRegister_r28_Code}; // Constant pool
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
+
+// Aliases
+const Register kLithiumScratch = r11; // lithium scratch.
+const Register kConstantPoolRegister = r28; // Constant pool.
+const Register kRootRegister = r29; // Roots array pointer.
+const Register cp = r30; // JavaScript context pointer.
// Double word FP register.
struct DoubleRegister {
- static const int kNumRegisters = 32;
- static const int kMaxNumRegisters = kNumRegisters;
- static const int kNumVolatileRegisters = 14; // d0-d13
- static const int kSizeInBytes = 8;
-
- static const int kAllocatableLowRangeBegin = 1;
- static const int kAllocatableLowRangeEnd = 12;
- static const int kAllocatableHighRangeBegin = 15;
- static const int kAllocatableHighRangeEnd = 31;
-
- static const int kNumAllocatableLow =
- kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1;
- static const int kNumAllocatableHigh =
- kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1;
- static const int kMaxNumAllocatableRegisters =
- kNumAllocatableLow + kNumAllocatableHigh;
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
-
- // TODO(turbofan)
- inline static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
-
- static int ToAllocationIndex(DoubleRegister reg) {
- int code = reg.code();
- int index = (code <= kAllocatableLowRangeEnd)
- ? code - kAllocatableLowRangeBegin
- : code - kAllocatableHighRangeBegin + kNumAllocatableLow;
- DCHECK(index < kMaxNumAllocatableRegisters);
- return index;
- }
-
- static DoubleRegister FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return (index < kNumAllocatableLow)
- ? from_code(index + kAllocatableLowRangeBegin)
- : from_code(index - kNumAllocatableLow +
- kAllocatableHighRangeBegin);
- }
-
- static const char* AllocationIndexToString(int index);
-
- static DoubleRegister from_code(int code) {
- DoubleRegister r = {code};
- return r;
- }
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
- bool is(DoubleRegister reg) const { return code_ == reg.code_; }
+ static const int kNumRegisters = Code::kAfterLast;
+ static const int kMaxNumRegisters = kNumRegisters;
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
- void split_code(int* vm, int* m) const {
- DCHECK(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
}
- int code_;
+ int reg_code;
};
-
-const DoubleRegister no_dreg = {-1};
-const DoubleRegister d0 = {0};
-const DoubleRegister d1 = {1};
-const DoubleRegister d2 = {2};
-const DoubleRegister d3 = {3};
-const DoubleRegister d4 = {4};
-const DoubleRegister d5 = {5};
-const DoubleRegister d6 = {6};
-const DoubleRegister d7 = {7};
-const DoubleRegister d8 = {8};
-const DoubleRegister d9 = {9};
-const DoubleRegister d10 = {10};
-const DoubleRegister d11 = {11};
-const DoubleRegister d12 = {12};
-const DoubleRegister d13 = {13};
-const DoubleRegister d14 = {14};
-const DoubleRegister d15 = {15};
-const DoubleRegister d16 = {16};
-const DoubleRegister d17 = {17};
-const DoubleRegister d18 = {18};
-const DoubleRegister d19 = {19};
-const DoubleRegister d20 = {20};
-const DoubleRegister d21 = {21};
-const DoubleRegister d22 = {22};
-const DoubleRegister d23 = {23};
-const DoubleRegister d24 = {24};
-const DoubleRegister d25 = {25};
-const DoubleRegister d26 = {26};
-const DoubleRegister d27 = {27};
-const DoubleRegister d28 = {28};
-const DoubleRegister d29 = {29};
-const DoubleRegister d30 = {30};
-const DoubleRegister d31 = {31};
+#define DECLARE_REGISTER(R) \
+ const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_dreg = {Register::kCode_no_reg};
// Aliases for double registers. Defined using #define instead of
// "static const DoubleRegister&" because Clang complains otherwise when a
@@ -409,19 +244,19 @@ Register ToRegister(int num);
// Coprocessor register
struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
+ bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
+ bool is(CRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
@@ -1072,6 +907,7 @@ class Assembler : public AssemblerBase {
void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC);
+ void popcntd(Register dst, Register src);
void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
@@ -1101,6 +937,7 @@ class Assembler : public AssemblerBase {
void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void cntlzw_(Register dst, Register src, RCBit rc = LeaveRC);
+ void popcntw(Register dst, Register src);
void subi(Register dst, Register src1, const Operand& src2);
@@ -1213,6 +1050,8 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
void fcfid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void fcfids(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
void fctid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fctidz(const DoubleRegister frt, const DoubleRegister frb,
@@ -1471,7 +1310,10 @@ class Assembler : public AssemblerBase {
}
void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
- void EndBlockTrampolinePool() { trampoline_pool_blocked_nesting_--; }
+ void EndBlockTrampolinePool() {
+ int count = --trampoline_pool_blocked_nesting_;
+ if (count == 0) CheckTrampolinePoolQuick();
+ }
bool is_trampoline_pool_blocked() const {
return trampoline_pool_blocked_nesting_ > 0;
}
@@ -1612,7 +1454,7 @@ class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index e08c865e4e..9b3a3fb9ad 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -21,11 +21,12 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- r3 : number of arguments excluding receiver
- // -- r4 : called function (only guaranteed when
- // extra_args requires it)
+ // (only guaranteed when the called function
+ // is not marked as DontAdaptArguments)
+ // -- r4 : called function
// -- sp[0] : last argument
// -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == r0)
+ // -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
__ AssertFunction(r4);
@@ -46,9 +47,27 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
- // JumpToExternalReference expects r0 to contain the number of arguments
- // including the receiver and the extra arguments.
+ // JumpToExternalReference expects r3 to contain the number of arguments
+ // including the receiver and the extra arguments. But r3 is only valid
+ // if the called function is marked as DontAdaptArguments, otherwise we
+ // need to load the argument count from the SharedFunctionInfo.
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+ __ SmiUntag(r5);
+#endif
+ __ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(ne, r3, r5, r3);
+ } else {
+ Label skip;
+ __ beq(&skip);
+ __ mr(r3, r5);
+ __ bind(&skip);
+ }
__ addi(r3, r3, Operand(num_extra_args + 1));
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -60,7 +79,8 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
__ LoadP(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ LoadP(result,
+ FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
__ LoadP(result,
MemOperand(result, Context::SlotOffset(
@@ -74,7 +94,8 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
__ LoadP(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ LoadP(result,
+ FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
__ LoadP(
result,
@@ -201,41 +222,44 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
+ // -- r6 : original constructor
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r3 and get rid of the rest (including the
+ // 1. Load the first argument into r5 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
__ cmpi(r3, Operand::Zero());
__ beq(&no_arguments);
__ subi(r3, r3, Operand(1));
- __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ LoadPUX(r3, MemOperand(sp, r3));
+ __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r5, MemOperand(sp, r5));
__ Drop(2);
__ b(&done);
__ bind(&no_arguments);
- __ LoadRoot(r3, Heap::kempty_stringRootIndex);
+ __ LoadRoot(r5, Heap::kempty_stringRootIndex);
__ Drop(1);
__ bind(&done);
}
- // 2. Make sure r3 is a string.
+ // 2. Make sure r5 is a string.
{
Label convert, done_convert;
- __ JumpIfSmi(r3, &convert);
- __ CompareObjectType(r3, r5, r5, FIRST_NONSTRING_TYPE);
+ __ JumpIfSmi(r5, &convert);
+ __ CompareObjectType(r5, r7, r7, FIRST_NONSTRING_TYPE);
__ blt(&done_convert);
__ bind(&convert);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ push(r4);
+ __ Push(r4, r6);
+ __ mr(r3, r5);
__ CallStub(&stub);
- __ pop(r4);
+ __ mr(r5, r3);
+ __ Pop(r4, r6);
}
__ bind(&done_convert);
}
@@ -243,13 +267,18 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 3. Allocate a JSValue wrapper for the string.
{
// ----------- S t a t e -------------
- // -- r3 : the first argument
+ // -- r5 : the first argument
// -- r4 : constructor function
+ // -- r6 : original constructor
// -- lr : return address
// -----------------------------------
- Label allocate, done_allocate;
- __ mr(r5, r3);
+ Label allocate, done_allocate, rt_call;
+
+ // Fall back to runtime if the original constructor and function differ.
+ __ cmp(r4, r6);
+ __ bne(&rt_call);
+
__ Allocate(JSValue::kSize, r3, r6, r7, &allocate, TAG_OBJECT);
__ bind(&done_allocate);
@@ -273,6 +302,17 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Pop(r4, r5);
}
__ b(&done_allocate);
+
+ // Fallback to the runtime to create new object.
+ __ bind(&rt_call);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r5, r4, r6); // constructor function, original constructor
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Pop(r4, r5);
+ }
+ __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+ __ Ret();
}
}
@@ -356,18 +396,24 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ cmpi(r5, Operand::Zero());
__ bne(&rt_call);
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(r4, r6);
+ // Verify that the original constructor is a JSFunction.
+ __ CompareObjectType(r6, r8, r7, JS_FUNCTION_TYPE);
__ bne(&rt_call);
// Load the initial map and verify that it is in fact a map.
- // r4: constructor function
+ // r6: original constructor
__ LoadP(r5,
- FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r5, &rt_call);
__ CompareObjectType(r5, r8, r7, MAP_TYPE);
__ bne(&rt_call);
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ LoadP(r8, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r4, r8);
+ __ bne(&rt_call);
+
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the
// initial map's instance type would be JS_FUNCTION_TYPE.
@@ -390,9 +436,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
__ bne(&allocate);
- __ push(r4);
-
- __ Push(r5, r4); // r4 = constructor
+ __ Push(r4, r5, r5); // r5 = initial map
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(r4, r5);
@@ -488,7 +532,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r4: constructor function
// r6: original constructor
__ bind(&rt_call);
- __ Push(r4, r6);
+ __ Push(r4, r6); // constructor function, original constructor
__ CallRuntime(Runtime::kNewObject, 2);
__ mr(r7, r3);
@@ -885,21 +929,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
@@ -907,7 +937,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r0, Heap::kStackLimitRootIndex);
__ cmp(sp, r0);
__ bge(&ok);
+ __ push(kInterpreterBytecodeArrayRegister);
__ CallRuntime(Runtime::kStackGuard, 0);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -956,6 +988,63 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
+static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
+ Register count, Register scratch) {
+ Label loop;
+ __ addi(index, index, Operand(kPointerSize)); // Bias up for LoadPU
+ __ mtctr(count);
+ __ bind(&loop);
+ __ LoadPU(scratch, MemOperand(index, -kPointerSize));
+ __ push(scratch);
+ __ bdnz(&loop);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r5 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- r4 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Calculate number of arguments (add one for receiver).
+ __ addi(r6, r3, Operand(1));
+
+ // Push the arguments.
+ Generate_InterpreterPushArgs(masm, r5, r6, r7);
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (not including receiver)
+ // -- r6 : original constructor
+ // -- r4 : constructor to call
+ // -- r5 : address of the first argument
+ // -----------------------------------
+
+ // Push a slot for the receiver to be constructed.
+ __ push(r3);
+
+ // Push the arguments (skip if none).
+ Label skip;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&skip);
+ Generate_InterpreterPushArgs(masm, r5, r3, r7);
+ __ bind(&skip);
+
+ // Call the constructor with r3, r4, and r6 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1499,71 +1588,80 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(r4);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorBits, r0);
+ __ bne(&class_constructor, cr0);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
- __ lbz(r6, FieldMemOperand(r5, SharedFunctionInfo::kNativeByteOffset));
- __ andi(r0, r6, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ Label done_convert;
+ __ andi(r0, r6, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+ (1 << SharedFunctionInfo::kNativeBit)));
__ bne(&done_convert, cr0);
{
- __ ShiftLeftImm(r6, r3, Operand(kPointerSizeLog2));
- __ LoadPX(r6, MemOperand(sp, r6));
-
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSFunction)
// -- r5 : the shared function info.
- // -- r6 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(r6, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r6, r7, r7, FIRST_JS_RECEIVER_TYPE);
- __ bge(&done_convert);
- __ JumpIfRoot(r6, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(r6, Heap::kNullValueRootIndex, &convert_to_object);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(r6);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ ShiftLeftImm(r6, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r6, MemOperand(sp, r6));
+ __ JumpIfSmi(r6, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r6, r7, r7, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(r6, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(r6, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(r6);
+ }
+ __ b(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r3);
+ __ Push(r3, r4);
+ __ mr(r3, r6);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mr(r6, r3);
+ __ Pop(r3, r4);
+ __ SmiUntag(r3);
+ }
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ b(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r3);
- __ Push(r3, r4);
- __ mr(r3, r6);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mr(r6, r3);
- __ Pop(r3, r4);
- __ SmiUntag(r3);
- }
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
__ StorePX(r6, MemOperand(sp, r7));
}
@@ -1585,11 +1683,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
ParameterCount actual(r3);
ParameterCount expected(r5);
__ InvokeCode(r6, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ }
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the target to call (can be any Object).
@@ -1599,8 +1704,8 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(r4, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq);
__ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
__ bne(&non_function);
@@ -1622,7 +1727,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
@@ -1719,32 +1826,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
- // -- r5 : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- r4 : the target to call (can be any Object).
-
- // Calculate number of arguments (add one for receiver).
- __ addi(r6, r3, Operand(1));
-
- // Push the arguments.
- Label loop;
- __ addi(r5, r5, Operand(kPointerSize)); // Bias up for LoadPU
- __ mtctr(r6);
- __ bind(&loop);
- __ LoadPU(r6, MemOperand(r5, -kPointerSize));
- __ push(r6);
- __ bdnz(&loop);
-
- // Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : actual number of arguments
@@ -1804,13 +1885,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label no_strong_error;
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r8, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r8,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrongModeFunction,
-#else
- SharedFunctionInfo::kStrongModeFunction + kSmiTagSize,
-#endif
- r0);
+ __ TestBit(r8, SharedFunctionInfo::kStrongModeBit, r0);
__ beq(&no_strong_error, cr0);
// What we really care about is the required number of arguments.
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 290159a3e7..92501a4a23 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -1034,15 +1034,22 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
-
+ //
+ // If argv_in_register():
+ // r5: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ mr(r15, r4);
- // Compute the argv pointer.
- __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
- __ add(r4, r4, sp);
- __ subi(r4, r4, Operand(kPointerSize));
+ if (argv_in_register()) {
+ // Move argv into the correct register.
+ __ mr(r4, r5);
+ } else {
+ // Compute the argv pointer.
+ __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
+ __ add(r4, r4, sp);
+ __ subi(r4, r4, Operand(kPointerSize));
+ }
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
@@ -1141,8 +1148,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r3:r4: result
// sp: stack pointer
// fp: frame pointer
- // r14: still holds argc (callee-saved).
- __ LeaveExitFrame(save_doubles(), r14, true);
+ Register argc;
+ if (argv_in_register()) {
+ // We don't want to pop arguments so set argc to no_reg.
+ argc = no_reg;
+ } else {
+ // r14: still holds argc (callee-saved).
+ argc = r14;
+ }
+ __ LeaveExitFrame(save_doubles(), argc, true);
__ blr();
// Handling of exception.
@@ -1416,13 +1430,7 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ lwz(scratch, FieldMemOperand(shared_info,
SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kBoundFunction,
-#else
- SharedFunctionInfo::kBoundFunction + kSmiTagSize,
-#endif
- r0);
+ __ TestBit(scratch, SharedFunctionInfo::kBoundBit, r0);
__ bne(&slow_case, cr0);
// Get the "prototype" (or initial map) of the {function}.
@@ -1697,7 +1705,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ LoadP(r7,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
+ __ LoadP(r7, FieldMemOperand(r7, JSGlobalObject::kNativeContextOffset));
__ cmpi(r9, Operand::Zero());
if (CpuFeatures::IsSupported(ISELECT)) {
__ LoadP(r11, MemOperand(r7, kNormalOffset));
@@ -1914,7 +1922,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the arguments boilerplate from the current native context.
__ LoadP(r7,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
+ __ LoadP(r7, FieldMemOperand(r7, JSGlobalObject::kNativeContextOffset));
__ LoadP(
r7,
MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
@@ -2521,110 +2529,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions and natives.
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r7, FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r7,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrictModeFunction,
-#else
- SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
-#endif
- r0);
- __ bne(cont, cr0);
-
- // Do not transform the receiver for native.
- __ TestBit(r7,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kNative,
-#else
- SharedFunctionInfo::kNative + kSmiTagSize,
-#endif
- r0);
- __ bne(cont, cr0);
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ mov(r3, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- {
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(r4);
- __ mr(r3, r6);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(r4);
- }
- __ StoreP(r3, MemOperand(sp, argc * kPointerSize), r0);
- __ b(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm, int argc,
- bool needs_checks, bool call_as_method) {
- // r4 : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- // r4: pushed function (to be verified)
- __ JumpIfSmi(r4, &slow);
-
- // Goto slow case if we do not have a function.
- __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
- __ bne(&slow);
- }
-
- // Fast-case: Invoke the function now.
- // r4: pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
-
- if (needs_checks) {
- __ JumpIfSmi(r6, &wrap);
- __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
- __ blt(&wrap);
- } else {
- __ b(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// r3 : number of arguments
// r4 : the function to call
@@ -2715,9 +2619,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2754,34 +2656,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
__ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
- // Compute the receiver in sloppy mode.
- __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
-
- __ JumpIfSmi(r6, &wrap);
- __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
- __ blt(&wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call);
+ __ mov(r3, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
- __ beq(&slow_start);
+ __ beq(&call);
// Verify that r7 contains an AllocationSite
__ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
@@ -2816,7 +2699,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ LoadP(r7, FieldMemOperand(r5, generic_offset));
__ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
__ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
- __ b(&slow_start);
+ __ b(&call);
__ bind(&uninitialized);
@@ -2854,23 +2737,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(r4);
}
- __ b(&have_js_function);
+ __ b(&call);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that the function is really a JavaScript function.
- // r4: pushed function (to be verified)
- __ JumpIfSmi(r4, &slow);
-
- // Goto slow case if we do not have a function.
- __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
- __ bne(&slow);
- __ b(&have_js_function);
+ __ b(&call);
}
@@ -3011,7 +2885,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode, 1);
__ Move(result_, r3);
call_helper.AfterCall(masm);
__ b(&exit_);
@@ -3320,6 +3194,28 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
}
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes one argument in r3.
+ Label not_smi;
+ __ JumpIfNotSmi(r3, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ cmpi(r3, Operand::Zero());
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(lt, r3, r0, r3);
+ } else {
+ Label positive;
+ __ bgt(&positive);
+ __ li(r3, Operand::Zero());
+ __ bind(&positive);
+ }
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ push(r3); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength, 1, 1);
+}
+
+
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in r3.
Label is_number;
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index bc6c26b217..ef4bdce5d1 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -322,7 +322,7 @@ class NameDictionaryLookupStub : public PlatformCodeStub {
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_CODE_STUBS_PPC_H_
diff --git a/deps/v8/src/ppc/codegen-ppc.h b/deps/v8/src/ppc/codegen-ppc.h
index f8da74eaa6..7f19beea7d 100644
--- a/deps/v8/src/ppc/codegen-ppc.h
+++ b/deps/v8/src/ppc/codegen-ppc.h
@@ -35,7 +35,7 @@ class MathExpGenerator : public AllStatic {
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_CODEGEN_PPC_H_
diff --git a/deps/v8/src/ppc/constants-ppc.cc b/deps/v8/src/ppc/constants-ppc.cc
index 56147b3c48..e6eec643f4 100644
--- a/deps/v8/src/ppc/constants-ppc.cc
+++ b/deps/v8/src/ppc/constants-ppc.cc
@@ -14,45 +14,18 @@ namespace internal {
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumRegisters] = {
"r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
- "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
+ "r11", "ip", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
"r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "fp"};
-// List of alias names which can be used when referring to PPC registers.
-const Registers::RegisterAlias Registers::aliases_[] = {{10, "sl"},
- {11, "r11"},
- {12, "r12"},
- {13, "r13"},
- {14, "r14"},
- {15, "r15"},
- {kNoRegister, NULL}};
-
-
-const char* Registers::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumRegisters)) {
- result = names_[reg];
- } else {
- result = "noreg";
- }
- return result;
-}
-
-
-const char* FPRegisters::names_[kNumFPRegisters] = {
+const char* DoubleRegisters::names_[kNumDoubleRegisters] = {
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
"d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
"d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
-const char* FPRegisters::Name(int reg) {
- DCHECK((0 <= reg) && (reg < kNumFPRegisters));
- return names_[reg];
-}
-
-
-int FPRegisters::Number(const char* name) {
- for (int i = 0; i < kNumFPRegisters; i++) {
+int DoubleRegisters::Number(const char* name) {
+ for (int i = 0; i < kNumDoubleRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
@@ -71,15 +44,6 @@ int Registers::Number(const char* name) {
}
}
- // Look through the alias names.
- int i = 0;
- while (aliases_[i].reg != kNoRegister) {
- if (strcmp(aliases_[i].name, name) == 0) {
- return aliases_[i].reg;
- }
- i++;
- }
-
// No register with the requested name found.
return kNoRegister;
}
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index b304bad7ce..87a82719be 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -18,8 +18,7 @@ namespace internal {
const int kNumRegisters = 32;
// FP support.
-const int kNumFPDoubleRegisters = 32;
-const int kNumFPRegisters = kNumFPDoubleRegisters;
+const int kNumDoubleRegisters = 32;
const int kNoRegister = -1;
@@ -229,6 +228,7 @@ enum OpcodeExt2 {
LHAUX = 375 << 1, // load half-word algebraic w/ update x-form
XORX = 316 << 1, // Exclusive OR
MFSPR = 339 << 1, // Move from Special-Purpose-Register
+ POPCNTW = 378 << 1, // Population Count Words
STHX = 407 << 1, // store half-word w/ x-form
ORC = 412 << 1, // Or with Complement
STHUX = 439 << 1, // store half-word w/ update x-form
@@ -238,6 +238,7 @@ enum OpcodeExt2 {
MTSPR = 467 << 1, // Move to Special-Purpose-Register
DIVD = 489 << 1, // Divide Double Word
DIVW = 491 << 1, // Divide Word
+ POPCNTD = 506 << 1, // Population Count Doubleword
// Below represent bits 10-1 (any value >= 512)
LFSX = 535 << 1, // load float-single w/ x-form
@@ -564,35 +565,23 @@ class Instruction {
// Helper functions for converting between register numbers and names.
class Registers {
public:
- // Return the name of the register.
- static const char* Name(int reg);
-
// Lookup the register number for the name provided.
static int Number(const char* name);
- struct RegisterAlias {
- int reg;
- const char* name;
- };
-
private:
static const char* names_[kNumRegisters];
- static const RegisterAlias aliases_[];
};
// Helper functions for converting between FP register numbers and names.
-class FPRegisters {
+class DoubleRegisters {
public:
- // Return the name of the register.
- static const char* Name(int reg);
-
// Lookup the register number for the name provided.
static int Number(const char* name);
private:
- static const char* names_[kNumFPRegisters];
+ static const char* names_[kNumDoubleRegisters];
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_CONSTANTS_PPC_H_
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 3e4511f78f..831ccf6cdc 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -5,6 +5,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -99,7 +100,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -123,7 +124,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -150,15 +151,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit();
- const int kDoubleRegsSize =
- kDoubleSize * DoubleRegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
- // Save all FPU registers before messing with them.
+ // Save all double registers before messing with them.
__ subi(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
- DoubleRegister fpu_reg = DoubleRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ stfd(fpu_reg, MemOperand(sp, offset));
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ stfd(dreg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
@@ -215,11 +218,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ // Copy double registers to
+ // double_registers_[DoubleRegister::kNumRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ lfd(d0, MemOperand(sp, src_offset));
__ stfd(d0, MemOperand(r4, dst_offset));
}
@@ -291,9 +295,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ blt(&outer_push_loop);
__ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
- for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
- const DoubleRegister dreg = DoubleRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
__ lfd(dreg, MemOperand(r4, src_offset));
}
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 5d7de8a0b4..83fbc7e29c 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -78,6 +78,7 @@ class Decoder {
void DecodeExt1(Instruction* instr);
void DecodeExt2(Instruction* instr);
+ void DecodeExt3(Instruction* instr);
void DecodeExt4(Instruction* instr);
void DecodeExt5(Instruction* instr);
@@ -116,7 +117,9 @@ void Decoder::PrintRegister(int reg) {
// Print the double FP register name according to the active name converter.
-void Decoder::PrintDRegister(int reg) { Print(FPRegisters::Name(reg)); }
+void Decoder::PrintDRegister(int reg) {
+ Print(DoubleRegister::from_code(reg).ToString());
+}
// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
@@ -607,6 +610,16 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "stfdux 'rs, 'ra, 'rb");
return;
}
+ case POPCNTW: {
+ Format(instr, "popcntw 'ra, 'rs");
+ return;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case POPCNTD: {
+ Format(instr, "popcntd 'ra, 'rs");
+ return;
+ }
+#endif
}
switch (instr->Bits(10, 2) << 2) {
@@ -870,6 +883,19 @@ void Decoder::DecodeExt2(Instruction* instr) {
}
+void Decoder::DecodeExt3(Instruction* instr) {
+ switch (instr->Bits(10, 1) << 1) {
+ case FCFID: {
+ Format(instr, "fcfids'. 'Dt, 'Db");
+ break;
+ }
+ default: {
+ Unknown(instr); // not used by V8
+ }
+ }
+}
+
+
void Decoder::DecodeExt4(Instruction* instr) {
switch (instr->Bits(5, 1) << 1) {
case FDIV: {
@@ -1287,7 +1313,10 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
Format(instr, "stfdu 'Dt, 'int16('ra)");
break;
}
- case EXT3:
+ case EXT3: {
+ DecodeExt3(instr);
+ break;
+ }
case EXT4: {
DecodeExt4(instr);
break;
@@ -1349,7 +1378,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Registers::Name(reg);
+ return v8::internal::Register::from_code(reg).ToString();
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
diff --git a/deps/v8/src/ppc/frames-ppc.h b/deps/v8/src/ppc/frames-ppc.h
index d5b6d3caa9..b1de9f50ff 100644
--- a/deps/v8/src/ppc/frames-ppc.h
+++ b/deps/v8/src/ppc/frames-ppc.h
@@ -70,6 +70,8 @@ const RegList kCallerSavedDoubles = 1 << 0 | // d0
1 << 12 | // d12
1 << 13; // d13
+const int kNumCallerSavedDoubles = 14;
+
const RegList kCalleeSavedDoubles = 1 << 14 | // d14
1 << 15 | // d15
1 << 16 | // d16
@@ -185,7 +187,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_FRAMES_PPC_H_
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index c123e7c602..b54845d4b3 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -78,14 +78,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5};
@@ -108,6 +100,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return r3; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return r3; }
@@ -228,6 +224,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -391,15 +394,38 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- r3, // argument count (including receiver)
+ r3, // argument count (not including receiver)
r5, // address of first argument
r4 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r3, // argument count (not including receiver)
+ r6, // original constructor
+ r4, // constructor to call
+ r5 // address of the first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r3, // argument count (argc)
+ r5, // address of first argument (argv)
+ r4 // the runtime function to call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index e973471572..e543ba853b 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -12,9 +12,11 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
-#include "src/ppc/macro-assembler-ppc.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/ppc/macro-assembler-ppc.h"
+
namespace v8 {
namespace internal {
@@ -24,7 +26,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
has_frame_(false) {
if (isolate() != NULL) {
code_object_ =
- Handle<Object>(isolate()->heap()->undefined_value(), isolate());
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -47,7 +49,8 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
if (cond != al) b(NegateCondition(cond), &skip, cr);
- DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
+ DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY ||
+ rmode == RelocInfo::CONSTRUCT_CALL);
mov(ip, Operand(target, rmode));
mtctr(ip);
@@ -564,7 +567,7 @@ void MacroAssembler::PopFixedFrame(Register marker_reg) {
const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
const int MacroAssembler::kNumSafepointSavedRegisters =
- Register::kMaxNumAllocatableRegisters;
+ Register::kNumAllocatable;
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
@@ -623,7 +626,9 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// General purpose registers are pushed last on the stack.
- int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -654,11 +659,26 @@ void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
const Register src,
const Register int_scratch) {
MovIntToDouble(dst, src, int_scratch);
- fcfid(dst, dst);
- frsp(dst, dst);
+ fcfids(dst, dst);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void MacroAssembler::ConvertInt64ToDouble(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfid(double_dst, double_dst);
}
+void MacroAssembler::ConvertInt64ToFloat(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfids(double_dst, double_dst);
+}
+#endif
+
+
void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
#if !V8_TARGET_ARCH_PPC64
const Register dst_hi,
@@ -863,7 +883,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
MultiPushDoubles(kCallerSavedDoubles);
// Note that d0 will be accessible at
// fp - ExitFrameConstants::kFrameSize -
- // kNumVolatileRegisters * kDoubleSize,
+ // kNumCallerSavedDoubles * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
}
@@ -922,7 +942,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
- const int kNumRegs = DoubleRegister::kNumVolatileRegisters;
+ const int kNumRegs = kNumCallerSavedDoubles;
const int offset =
(ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
addi(r6, fp, Operand(-offset));
@@ -1206,7 +1226,8 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
LoadP(scratch, FieldMemOperand(scratch, offset));
- LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ LoadP(scratch,
+ FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -2273,7 +2294,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
// Load the builtins object into target register.
LoadP(target,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
+ LoadP(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
LoadP(target, ContextOperand(target, native_context_index), r0);
}
@@ -2408,7 +2429,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
LoadP(dst, GlobalObjectOperand());
- LoadP(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+ LoadP(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
@@ -2418,7 +2439,8 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Load the global or builtins object from the current context.
LoadP(scratch,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ LoadP(scratch,
+ FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
LoadP(scratch,
@@ -2440,7 +2462,7 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
LoadP(function,
- FieldMemOperand(function, GlobalObject::kNativeContextOffset));
+ FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
}
@@ -4250,8 +4272,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
if (regs & candidate.bit()) continue;
return candidate;
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index f87c563e72..d4660d9207 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -14,17 +14,18 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_r3_Code};
-const Register kReturnRegister1 = {kRegister_r4_Code};
-const Register kJSFunctionRegister = {kRegister_r4_Code};
-const Register kContextRegister = {kRegister_r30_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_r3_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_r14_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_r15_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_r16_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_r17_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_r4_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_r3_Code};
+const Register kReturnRegister0 = {Register::kCode_r3};
+const Register kReturnRegister1 = {Register::kCode_r4};
+const Register kJSFunctionRegister = {Register::kCode_r4};
+const Register kContextRegister = {Register::kCode_r30};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_r14};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_r3};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_r4};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_r3};
// ----------------------------------------------------------------------------
// Static helper functions
@@ -385,6 +386,11 @@ class MacroAssembler : public Assembler {
void ConvertIntToFloat(const DoubleRegister dst, const Register src,
const Register int_scratch);
+#if V8_TARGET_ARCH_PPC64
+ void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
+ void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
+#endif
+
// Converts the double_input to an integer. Note that, upon return,
// the contents of double_dst will also hold the fixed point representation.
void ConvertDoubleToInt64(const DoubleRegister double_input,
@@ -1569,7 +1575,7 @@ inline MemOperand GlobalObjectOperand() {
#else
#define ACCESS_MASM(masm) masm->
#endif
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 518f8fae75..fa088a2c30 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -164,7 +164,7 @@ bool PPCDebugger::GetValue(const char* desc, intptr_t* value) {
bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
- int regnum = FPRegisters::Number(desc);
+ int regnum = DoubleRegisters::Number(desc);
if (regnum != kNoRegister) {
*value = sim_->get_double_from_d_register(regnum);
return true;
@@ -313,7 +313,8 @@ void PPCDebugger::Debug() {
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
- PrintF(" %3s: %08" V8PRIxPTR, Registers::Name(i), value);
+ PrintF(" %3s: %08" V8PRIxPTR,
+ Register::from_code(i).ToString(), value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -332,7 +333,7 @@ void PPCDebugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
- Registers::Name(i), value, value);
+ Register::from_code(i).ToString(), value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -351,7 +352,8 @@ void PPCDebugger::Debug() {
for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
dvalue = GetFPDoubleRegisterValue(i);
uint64_t as_words = bit_cast<uint64_t>(dvalue);
- PrintF("%3s: %f 0x%08x %08x\n", FPRegisters::Name(i), dvalue,
+ PrintF("%3s: %f 0x%08x %08x\n",
+ DoubleRegister::from_code(i).ToString(), dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xffffffff));
}
@@ -1847,6 +1849,36 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
}
break;
}
+ case POPCNTW: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uintptr_t rs_val = get_register(rs);
+ uintptr_t count = 0;
+ int n = 0;
+ uintptr_t bit = 0x80000000;
+ for (; n < 32; n++) {
+ if (bit & rs_val) count++;
+ bit >>= 1;
+ }
+ set_register(ra, count);
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case POPCNTD: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uintptr_t rs_val = get_register(rs);
+ uintptr_t count = 0;
+ int n = 0;
+ uintptr_t bit = 0x8000000000000000UL;
+ for (; n < 64; n++) {
+ if (bit & rs_val) count++;
+ bit >>= 1;
+ }
+ set_register(ra, count);
+ break;
+ }
+#endif
case SYNC: {
// todo - simulate sync
break;
@@ -2663,6 +2695,24 @@ void Simulator::ExecuteExt2(Instruction* instr) {
}
+void Simulator::ExecuteExt3(Instruction* instr) {
+ int opcode = instr->Bits(10, 1) << 1;
+ switch (opcode) {
+ case FCFID: {
+ // fcfids
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double t_val = get_double_from_d_register(frb);
+ int64_t* frb_val_p = reinterpret_cast<int64_t*>(&t_val);
+ double frt_val = static_cast<float>(*frb_val_p);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ }
+ UNIMPLEMENTED(); // Not used by V8.
+}
+
+
void Simulator::ExecuteExt4(Instruction* instr) {
switch (instr->Bits(5, 1) << 1) {
case FDIV: {
@@ -3578,8 +3628,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
- case EXT3:
- UNIMPLEMENTED();
+ case EXT3: {
+ ExecuteExt3(instr);
+ break;
+ }
case EXT4: {
ExecuteExt4(instr);
break;
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 042b2ada2c..bdf50ba474 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -54,8 +54,8 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() {}
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
@@ -311,6 +311,7 @@ class Simulator {
bool ExecuteExt2_9bit_part2(Instruction* instr);
void ExecuteExt2_5bit(Instruction* instr);
void ExecuteExt2(Instruction* instr);
+ void ExecuteExt3(Instruction* instr);
void ExecuteExt4(Instruction* instr);
#if V8_TARGET_ARCH_PPC64
void ExecuteExt5(Instruction* instr);
@@ -422,8 +423,8 @@ class SimulatorStack : public v8::internal::AllStatic {
Simulator::current(Isolate::Current())->PopAddress();
}
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_PPC_SIMULATOR_PPC_H_
diff --git a/deps/v8/src/preparse-data-format.h b/deps/v8/src/preparse-data-format.h
index 560693f67e..c68a684562 100644
--- a/deps/v8/src/preparse-data-format.h
+++ b/deps/v8/src/preparse-data-format.h
@@ -35,6 +35,7 @@ struct PreparseDataConstants {
};
-} } // namespace v8::internal.
+} // namespace internal
+} // namespace v8.
#endif // V8_PREPARSE_DATA_FORMAT_H_
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index f7ed1ed91a..711ff3b895 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -206,6 +206,7 @@ class CompleteParserRecorder : public ParserRecorder {
};
-} } // namespace v8::internal.
+} // namespace internal
+} // namespace v8.
#endif // V8_PREPARSE_DATA_H_
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 1bdcb85db7..4b86d78597 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -111,8 +111,7 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
FunctionState top_state(&function_state_, &scope_, top_scope, kNormalFunction,
&top_factory);
scope_->SetLanguageMode(language_mode);
- Scope* function_scope = NewScope(
- scope_, IsArrowFunction(kind) ? ARROW_SCOPE : FUNCTION_SCOPE, kind);
+ Scope* function_scope = NewScope(scope_, FUNCTION_SCOPE, kind);
if (!has_simple_parameters) function_scope->SetHasNonSimpleParameters();
PreParserFactory function_factory(NULL);
FunctionState function_state(&function_state_, &scope_, function_scope, kind,
@@ -533,7 +532,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// ConstBinding ::
// BindingPattern '=' AssignmentExpression
bool require_initializer = false;
- bool is_strict_const = false;
+ bool lexical = false;
if (peek() == Token::VAR) {
if (is_strong(language_mode())) {
Scanner::Location location = scanner()->peek_location();
@@ -557,12 +556,13 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
if (is_strict(language_mode()) ||
(allow_harmony_sloppy() && !allow_legacy_const())) {
DCHECK(var_context != kStatement);
- is_strict_const = true;
- require_initializer = var_context != kForStatement;
+ require_initializer = true;
+ lexical = true;
}
} else if (peek() == Token::LET && allow_let()) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
+ lexical = true;
} else {
*ok = false;
return Statement::Default();
@@ -577,12 +577,17 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
do {
// Parse binding pattern.
if (nvars > 0) Consume(Token::COMMA);
+ int decl_pos = peek_position();
+ PreParserExpression pattern = PreParserExpression::Default();
{
ExpressionClassifier pattern_classifier;
Token::Value next = peek();
- PreParserExpression pattern =
- ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+ pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+
ValidateBindingPattern(&pattern_classifier, CHECK_OK);
+ if (lexical) {
+ ValidateLetPattern(&pattern_classifier, CHECK_OK);
+ }
if (!allow_harmony_destructuring() && !pattern.IsIdentifier()) {
ReportUnexpectedToken(next);
@@ -591,12 +596,15 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
}
}
+ bool is_pattern = pattern.IsObjectLiteral() || pattern.IsArrayLiteral();
+
+ bool is_for_iteration_variable =
+ var_context == kForStatement &&
+ (peek() == Token::IN || PeekContextualKeyword(CStrVector("of")));
+
Scanner::Location variable_loc = scanner()->location();
nvars++;
- if (peek() == Token::ASSIGN || require_initializer ||
- // require initializers for multiple consts.
- (is_strict_const && peek() == Token::COMMA)) {
- Expect(Token::ASSIGN, CHECK_OK);
+ if (Check(Token::ASSIGN)) {
ExpressionClassifier classifier;
ParseAssignmentExpression(var_context != kForStatement, &classifier,
CHECK_OK);
@@ -606,6 +614,14 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
if (first_initializer_loc && !first_initializer_loc->IsValid()) {
*first_initializer_loc = variable_loc;
}
+ } else if ((require_initializer || is_pattern) &&
+ !is_for_iteration_variable) {
+ PreParserTraits::ReportMessageAt(
+ Scanner::Location(decl_pos, scanner()->location().end_pos),
+ MessageTemplate::kDeclarationMissingInitializer,
+ is_pattern ? "destructuring" : "const");
+ *ok = false;
+ return Statement::Default();
}
} while (peek() == Token::COMMA);
@@ -909,8 +925,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
&first_initializer_loc, &bindings_loc,
CHECK_OK);
bool accept_IN = decl_count >= 1;
- bool accept_OF = true;
- if (accept_IN && CheckInOrOf(accept_OF, &mode, ok)) {
+ if (accept_IN && CheckInOrOf(&mode, ok)) {
if (!*ok) return Statement::Default();
if (decl_count != 1) {
const char* loop_type =
@@ -945,7 +960,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
int lhs_end_pos = scanner()->location().end_pos;
is_let_identifier_expression =
lhs.IsIdentifier() && lhs.AsIdentifier().IsLet();
- if (CheckInOrOf(lhs.IsIdentifier(), &mode, ok)) {
+ if (CheckInOrOf(&mode, ok)) {
if (!*ok) return Statement::Default();
lhs = CheckAndRewriteReferenceExpression(
lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
@@ -1024,9 +1039,12 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
if (tok == Token::CATCH) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
+ ExpressionClassifier pattern_classifier;
+ ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+ ValidateBindingPattern(&pattern_classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
{
+ // TODO(adamk): Make this CATCH_SCOPE
Scope* with_scope = NewScope(scope_, WITH_SCOPE);
BlockState block_state(&scope_, with_scope);
ParseBlock(CHECK_OK);
@@ -1234,7 +1252,25 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
return Expression::Default();
}
+
+PreParserExpression PreParser::ParseDoExpression(bool* ok) {
+ // AssignmentExpression ::
+ // do '{' StatementList '}'
+ Expect(Token::DO, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+ {
+ BlockState block_state(&scope_, block_scope);
+ while (peek() != Token::RBRACE) {
+ ParseStatementListItem(CHECK_OK);
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+ return PreParserExpression::Default();
+ }
+}
+
#undef CHECK_OK
-} } // v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index 85844a0875..c4d7ed45b3 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -88,6 +88,7 @@ class ParserBase : public Traits {
typedef typename Traits::Type::FunctionLiteral FunctionLiteralT;
typedef typename Traits::Type::Literal LiteralT;
typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
+ typedef typename Traits::Type::StatementList StatementListT;
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
@@ -107,18 +108,15 @@ class ParserBase : public Traits {
stack_overflow_(false),
allow_lazy_(false),
allow_natives_(false),
- allow_harmony_arrow_functions_(false),
allow_harmony_sloppy_(false),
allow_harmony_sloppy_function_(false),
allow_harmony_sloppy_let_(false),
allow_harmony_rest_parameters_(false),
allow_harmony_default_parameters_(false),
- allow_harmony_spread_calls_(false),
allow_harmony_destructuring_(false),
- allow_harmony_spread_arrays_(false),
- allow_harmony_new_target_(false),
allow_strong_mode_(false),
- allow_legacy_const_(true) {}
+ allow_legacy_const_(true),
+ allow_harmony_do_expressions_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -126,20 +124,19 @@ class ParserBase : public Traits {
ALLOW_ACCESSORS(lazy);
ALLOW_ACCESSORS(natives);
- ALLOW_ACCESSORS(harmony_arrow_functions);
ALLOW_ACCESSORS(harmony_sloppy);
ALLOW_ACCESSORS(harmony_sloppy_function);
ALLOW_ACCESSORS(harmony_sloppy_let);
ALLOW_ACCESSORS(harmony_rest_parameters);
ALLOW_ACCESSORS(harmony_default_parameters);
- ALLOW_ACCESSORS(harmony_spread_calls);
ALLOW_ACCESSORS(harmony_destructuring);
- ALLOW_ACCESSORS(harmony_spread_arrays);
- ALLOW_ACCESSORS(harmony_new_target);
ALLOW_ACCESSORS(strong_mode);
ALLOW_ACCESSORS(legacy_const);
+ ALLOW_ACCESSORS(harmony_do_expressions);
#undef ALLOW_ACCESSORS
+ uintptr_t stack_limit() const { return stack_limit_; }
+
protected:
enum AllowRestrictedIdentifiers {
kAllowRestrictedIdentifiers,
@@ -309,16 +306,14 @@ class ParserBase : public Traits {
};
Scope* NewScope(Scope* parent, ScopeType scope_type) {
- // Must always pass the function kind for FUNCTION_SCOPE and ARROW_SCOPE.
+ // Must always pass the function kind for FUNCTION_SCOPE.
DCHECK(scope_type != FUNCTION_SCOPE);
- DCHECK(scope_type != ARROW_SCOPE);
return NewScope(parent, scope_type, kNormalFunction);
}
Scope* NewScope(Scope* parent, ScopeType scope_type, FunctionKind kind) {
DCHECK(ast_value_factory());
DCHECK(scope_type != MODULE_SCOPE || FLAG_harmony_modules);
- DCHECK(!IsArrowFunction(kind) || scope_type == ARROW_SCOPE);
Scope* result = new (zone())
Scope(zone(), parent, scope_type, ast_value_factory(), kind);
result->Initialize();
@@ -426,8 +421,7 @@ class ParserBase : public Traits {
}
}
- bool CheckInOrOf(
- bool accept_OF, ForEachStatement::VisitMode* visit_mode, bool* ok) {
+ bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode, bool* ok) {
if (Check(Token::IN)) {
if (is_strong(language_mode())) {
ReportMessageAt(scanner()->location(), MessageTemplate::kStrongForIn);
@@ -436,7 +430,7 @@ class ParserBase : public Traits {
*visit_mode = ForEachStatement::ENUMERATE;
}
return true;
- } else if (accept_OF && CheckContextualKeyword(CStrVector("of"))) {
+ } else if (CheckContextualKeyword(CStrVector("of"))) {
*visit_mode = ForEachStatement::ITERATE;
return true;
}
@@ -620,6 +614,13 @@ class ParserBase : public Traits {
}
}
+ void ValidateLetPattern(const ExpressionClassifier* classifier, bool* ok) {
+ if (!classifier->is_valid_let_pattern()) {
+ ReportClassifierError(classifier->let_pattern_error());
+ *ok = false;
+ }
+ }
+
void ExpressionUnexpectedToken(ExpressionClassifier* classifier) {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
@@ -711,9 +712,10 @@ class ParserBase : public Traits {
ExpressionT ParseMemberExpression(ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseMemberExpressionContinuation(
ExpressionT expression, ExpressionClassifier* classifier, bool* ok);
- ExpressionT ParseArrowFunctionLiteral(
- const FormalParametersT& parameters,
- const ExpressionClassifier& classifier, bool* ok);
+ ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
+ const FormalParametersT& parameters,
+ const ExpressionClassifier& classifier,
+ bool* ok);
ExpressionT ParseTemplateLiteral(ExpressionT tag, int start,
ExpressionClassifier* classifier, bool* ok);
void AddTemplateExpression(ExpressionT);
@@ -827,18 +829,15 @@ class ParserBase : public Traits {
bool allow_lazy_;
bool allow_natives_;
- bool allow_harmony_arrow_functions_;
bool allow_harmony_sloppy_;
bool allow_harmony_sloppy_function_;
bool allow_harmony_sloppy_let_;
bool allow_harmony_rest_parameters_;
bool allow_harmony_default_parameters_;
- bool allow_harmony_spread_calls_;
bool allow_harmony_destructuring_;
- bool allow_harmony_spread_arrays_;
- bool allow_harmony_new_target_;
bool allow_strong_mode_;
bool allow_legacy_const_;
+ bool allow_harmony_do_expressions_;
};
@@ -948,6 +947,14 @@ class PreParserExpression {
right->IsSpreadExpression()));
}
+ static PreParserExpression ObjectLiteral() {
+ return PreParserExpression(TypeField::encode(kObjectLiteralExpression));
+ }
+
+ static PreParserExpression ArrayLiteral() {
+ return PreParserExpression(TypeField::encode(kArrayLiteralExpression));
+ }
+
static PreParserExpression StringLiteral() {
return PreParserExpression(TypeField::encode(kStringLiteralExpression));
}
@@ -1005,6 +1012,14 @@ class PreParserExpression {
return PreParserIdentifier(IdentifierTypeField::decode(code_));
}
+ bool IsObjectLiteral() const {
+ return TypeField::decode(code_) == kObjectLiteralExpression;
+ }
+
+ bool IsArrayLiteral() const {
+ return TypeField::decode(code_) == kArrayLiteralExpression;
+ }
+
bool IsStringLiteral() const {
return TypeField::decode(code_) == kStringLiteralExpression;
}
@@ -1093,7 +1108,9 @@ class PreParserExpression {
kIdentifierExpression,
kStringLiteralExpression,
kBinaryOperationExpression,
- kSpreadExpression
+ kSpreadExpression,
+ kObjectLiteralExpression,
+ kArrayLiteralExpression
};
enum ExpressionType {
@@ -1231,12 +1248,12 @@ class PreParserFactory {
int literal_index,
bool is_strong,
int pos) {
- return PreParserExpression::Default();
+ return PreParserExpression::ArrayLiteral();
}
PreParserExpression NewArrayLiteral(PreParserExpressionList values,
int first_spread_index, int literal_index,
bool is_strong, int pos) {
- return PreParserExpression::Default();
+ return PreParserExpression::ArrayLiteral();
}
PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
PreParserExpression value,
@@ -1257,7 +1274,7 @@ class PreParserFactory {
bool has_function,
bool is_strong,
int pos) {
- return PreParserExpression::Default();
+ return PreParserExpression::ObjectLiteral();
}
PreParserExpression NewVariableProxy(void* variable) {
return PreParserExpression::Default();
@@ -1695,6 +1712,7 @@ class PreParserTraits {
// Temporary glue; these functions will move to ParserBase.
PreParserExpression ParseV8Intrinsic(bool* ok);
+ V8_INLINE PreParserExpression ParseDoExpression(bool* ok);
PreParserExpression ParseFunctionLiteral(
PreParserIdentifier name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
@@ -1835,6 +1853,7 @@ class PreParser : public ParserBase<PreParserTraits> {
Expression ParseConditionalExpression(bool accept_IN, bool* ok);
Expression ParseObjectLiteral(bool* ok);
Expression ParseV8Intrinsic(bool* ok);
+ Expression ParseDoExpression(bool* ok);
V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
int* expected_property_count, bool* ok);
@@ -1900,6 +1919,11 @@ void PreParserTraits::ParseArrowFunctionFormalParameterList(
}
+PreParserExpression PreParserTraits::ParseDoExpression(bool* ok) {
+ return pre_parser_->ParseDoExpression(ok);
+}
+
+
PreParserStatementList PreParser::ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos,
const PreParserFormalParameters& parameters, FunctionKind kind,
@@ -2095,6 +2119,10 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
(next == Token::YIELD && !is_generator()))) {
classifier->RecordStrictModeFormalParameterError(
scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
+ if (next == Token::LET) {
+ classifier->RecordLetPatternError(scanner()->location(),
+ MessageTemplate::kLetInLexicalBinding);
+ }
return this->GetSymbol(scanner());
} else {
this->ReportUnexpectedToken(next);
@@ -2216,12 +2244,10 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
// ClassLiteral
// '(' Expression ')'
// TemplateLiteral
+ // do Block
- int beg_pos = scanner()->peek_location().beg_pos;
- int end_pos = scanner()->peek_location().end_pos;
- ExpressionT result = this->EmptyExpression();
- Token::Value token = peek();
- switch (token) {
+ int beg_pos = peek_position();
+ switch (peek()) {
case Token::THIS: {
BindingPatternUnexpectedToken(classifier);
Consume(Token::THIS);
@@ -2231,29 +2257,22 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
if (IsClassConstructor(function_state_->kind())) {
ReportMessage(MessageTemplate::kStrongConstructorThis);
*ok = false;
- break;
+ return this->EmptyExpression();
}
}
- result = this->ThisExpression(scope_, factory(), beg_pos);
- break;
+ return this->ThisExpression(scope_, factory(), beg_pos);
}
case Token::NULL_LITERAL:
case Token::TRUE_LITERAL:
case Token::FALSE_LITERAL:
BindingPatternUnexpectedToken(classifier);
- Next();
- result =
- this->ExpressionFromLiteral(token, beg_pos, scanner(), factory());
- break;
+ return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
case Token::SMI:
case Token::NUMBER:
classifier->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenNumber);
- Next();
- result =
- this->ExpressionFromLiteral(token, beg_pos, scanner(), factory());
- break;
+ return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
case Token::IDENTIFIER:
case Token::LET:
@@ -2262,46 +2281,40 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::FUTURE_STRICT_RESERVED_WORD: {
// Using eval or arguments in this context is OK even in strict mode.
IdentifierT name = ParseAndClassifyIdentifier(classifier, CHECK_OK);
- result = this->ExpressionFromIdentifier(name, beg_pos, end_pos, scope_,
- factory());
- break;
+ return this->ExpressionFromIdentifier(
+ name, beg_pos, scanner()->location().end_pos, scope_, factory());
}
case Token::STRING: {
classifier->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenString);
Consume(Token::STRING);
- result = this->ExpressionFromString(beg_pos, scanner(), factory());
- break;
+ return this->ExpressionFromString(beg_pos, scanner(), factory());
}
case Token::ASSIGN_DIV:
classifier->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
- result = this->ParseRegExpLiteral(true, classifier, CHECK_OK);
- break;
+ return this->ParseRegExpLiteral(true, classifier, ok);
case Token::DIV:
classifier->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
- result = this->ParseRegExpLiteral(false, classifier, CHECK_OK);
- break;
+ return this->ParseRegExpLiteral(false, classifier, ok);
case Token::LBRACK:
if (!allow_harmony_destructuring()) {
BindingPatternUnexpectedToken(classifier);
}
- result = this->ParseArrayLiteral(classifier, CHECK_OK);
- break;
+ return this->ParseArrayLiteral(classifier, ok);
case Token::LBRACE:
if (!allow_harmony_destructuring()) {
BindingPatternUnexpectedToken(classifier);
}
- result = this->ParseObjectLiteral(classifier, CHECK_OK);
- break;
+ return this->ParseObjectLiteral(classifier, ok);
- case Token::LPAREN:
+ case Token::LPAREN: {
// Arrow function formal parameters are either a single identifier or a
// list of BindingPattern productions enclosed in parentheses.
// Parentheses are not valid on the LHS of a BindingPattern, so we use the
@@ -2321,28 +2334,27 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
classifier->RecordBindingPatternError(scanner()->location(),
MessageTemplate::kUnexpectedToken,
Token::String(Token::RPAREN));
- result = factory()->NewEmptyParentheses(beg_pos);
+ return factory()->NewEmptyParentheses(beg_pos);
} else if (allow_harmony_rest_parameters() && Check(Token::ELLIPSIS)) {
// (...x)=>x. The continuation that looks for the => is in
// ParseAssignmentExpression.
- int ellipsis_pos = scanner()->location().beg_pos;
+ int ellipsis_pos = position();
classifier->RecordExpressionError(scanner()->location(),
MessageTemplate::kUnexpectedToken,
Token::String(Token::ELLIPSIS));
classifier->RecordNonSimpleParameter();
Scanner::Location expr_loc = scanner()->peek_location();
Token::Value tok = peek();
- result = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ ExpressionT expr =
+ this->ParseAssignmentExpression(true, classifier, CHECK_OK);
// Patterns are not allowed as rest parameters. There is no way we can
// succeed so go ahead and use the convenient ReportUnexpectedToken
// interface.
- if (!Traits::IsIdentifier(result)) {
+ if (!Traits::IsIdentifier(expr)) {
ReportUnexpectedTokenAt(expr_loc, tok);
*ok = false;
return this->EmptyExpression();
}
- result = factory()->NewSpread(result, ellipsis_pos);
-
if (peek() == Token::COMMA) {
ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kParamAfterRest);
@@ -2350,14 +2362,15 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
return this->EmptyExpression();
}
Expect(Token::RPAREN, CHECK_OK);
- } else {
- // Heuristically try to detect immediately called functions before
- // seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = this->ParseExpression(true, classifier, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ return factory()->NewSpread(expr, ellipsis_pos);
}
- break;
+ // Heuristically try to detect immediately called functions before
+ // seeing the call parentheses.
+ parenthesized_function_ = (peek() == Token::FUNCTION);
+ ExpressionT expr = this->ParseExpression(true, classifier, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ return expr;
+ }
case Token::CLASS: {
BindingPatternUnexpectedToken(classifier);
@@ -2365,7 +2378,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
ReportMessage(MessageTemplate::kSloppyLexical);
*ok = false;
- break;
+ return this->EmptyExpression();
}
int class_token_position = position();
IdentifierT name = this->EmptyIdentifier();
@@ -2376,10 +2389,9 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
CHECK_OK);
class_name_location = scanner()->location();
}
- result = this->ParseClassLiteral(name, class_name_location,
- is_strict_reserved_name,
- class_token_position, CHECK_OK);
- break;
+ return this->ParseClassLiteral(name, class_name_location,
+ is_strict_reserved_name,
+ class_token_position, ok);
}
case Token::TEMPLATE_SPAN:
@@ -2387,26 +2399,30 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
classifier->RecordBindingPatternError(
scanner()->peek_location(),
MessageTemplate::kUnexpectedTemplateString);
- result = this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
- classifier, CHECK_OK);
- break;
+ return this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
+ classifier, ok);
case Token::MOD:
if (allow_natives() || extension_ != NULL) {
- result = this->ParseV8Intrinsic(CHECK_OK);
- break;
+ BindingPatternUnexpectedToken(classifier);
+ return this->ParseV8Intrinsic(ok);
}
- // If we're not allowing special syntax we fall-through to the
- // default case.
+ break;
- default: {
- Next();
- ReportUnexpectedToken(token);
- *ok = false;
- }
+ case Token::DO:
+ if (allow_harmony_do_expressions()) {
+ BindingPatternUnexpectedToken(classifier);
+ return Traits::ParseDoExpression(ok);
+ }
+ break;
+
+ default:
+ break;
}
- return result;
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return this->EmptyExpression();
}
@@ -2493,9 +2509,6 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
}
elem = this->GetLiteralTheHole(peek_position(), factory());
} else if (peek() == Token::ELLIPSIS) {
- if (!allow_harmony_spread_arrays()) {
- ExpressionUnexpectedToken(classifier);
- }
int start_pos = peek_position();
Consume(Token::ELLIPSIS);
ExpressionT argument =
@@ -2642,6 +2655,10 @@ ParserBase<Traits>::ParsePropertyDefinition(
scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
classifier->RecordDuplicateFormalParameterError(scanner()->location());
}
+ if (name_token == Token::LET) {
+ classifier->RecordLetPatternError(
+ scanner()->location(), MessageTemplate::kLetInLexicalBinding);
+ }
ExpressionT lhs = this->ExpressionFromIdentifier(
name, next_beg_pos, next_end_pos, scope_, factory());
@@ -2832,10 +2849,8 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
bool was_unspread = false;
int unspread_sequences_count = 0;
while (!done) {
- bool is_spread =
- allow_harmony_spread_calls() && (peek() == Token::ELLIPSIS);
int start_pos = peek_position();
- if (is_spread) Consume(Token::ELLIPSIS);
+ bool is_spread = Check(Token::ELLIPSIS);
ExpressionT argument = this->ParseAssignmentExpression(
true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
@@ -2912,13 +2927,17 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
}
ExpressionT expression = this->ParseConditionalExpression(
accept_IN, &arrow_formals_classifier, CHECK_OK);
- if (allow_harmony_arrow_functions() && peek() == Token::ARROW) {
+ if (peek() == Token::ARROW) {
BindingPatternUnexpectedToken(classifier);
ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
parenthesized_formals, CHECK_OK);
Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
Scope* scope =
- this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
+ this->NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
+ // Because the arrow's parameters were parsed in the outer scope, any
+ // usage flags that might have been triggered there need to be copied
+ // to the arrow scope.
+ scope_->PropagateUsageFlagsToScope(scope);
FormalParametersT parameters(scope);
if (!arrow_formals_classifier.is_simple_parameter_list()) {
scope->SetHasNonSimpleParameters();
@@ -2937,7 +2956,7 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
duplicate_loc);
}
expression = this->ParseArrowFunctionLiteral(
- parameters, arrow_formals_classifier, CHECK_OK);
+ accept_IN, parameters, arrow_formals_classifier, CHECK_OK);
if (fni_ != nullptr) fni_->Infer();
@@ -3376,7 +3395,7 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
if (peek() == Token::SUPER) {
const bool is_new = true;
result = ParseSuperExpression(is_new, classifier, CHECK_OK);
- } else if (allow_harmony_new_target() && peek() == Token::PERIOD) {
+ } else if (peek() == Token::PERIOD) {
return ParseNewTargetExpression(CHECK_OK);
} else {
result = this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
@@ -3873,7 +3892,7 @@ bool ParserBase<Traits>::IsNextLetKeyword() {
template <class Traits>
typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseArrowFunctionLiteral(
- const FormalParametersT& formal_parameters,
+ bool accept_IN, const FormalParametersT& formal_parameters,
const ExpressionClassifier& formals_classifier, bool* ok) {
if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
@@ -3931,7 +3950,7 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
parenthesized_function_ = false;
ExpressionClassifier classifier;
ExpressionT expression =
- ParseAssignmentExpression(true, &classifier, CHECK_OK);
+ ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
ValidateExpression(&classifier, CHECK_OK);
body = this->NewStatementList(1, zone());
this->AddParameterInitializationBlock(formal_parameters, body, CHECK_OK);
@@ -4180,6 +4199,7 @@ void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
return;
}
}
-} } // v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PREPARSER_H
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 59db57ac7e..f50d5904d0 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -13,14 +13,14 @@
namespace v8 {
namespace internal {
-CallPrinter::CallPrinter(Isolate* isolate, Zone* zone) {
+CallPrinter::CallPrinter(Isolate* isolate) {
output_ = NULL;
size_ = 0;
pos_ = 0;
position_ = 0;
found_ = false;
done_ = false;
- InitializeAstVisitor(isolate, zone);
+ InitializeAstVisitor(isolate);
}
@@ -228,6 +228,9 @@ void CallPrinter::VisitClassLiteral(ClassLiteral* node) {
void CallPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {}
+void CallPrinter::VisitDoExpression(DoExpression* node) { Find(node->block()); }
+
+
void CallPrinter::VisitConditional(Conditional* node) {
Find(node->condition());
Find(node->then_expression());
@@ -429,22 +432,22 @@ void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
#ifdef DEBUG
-// A helper for ast nodes that use FeedbackVectorICSlots.
-static int FormatICSlotNode(Vector<char>* buf, Expression* node,
- const char* node_name, FeedbackVectorICSlot slot) {
+// A helper for ast nodes that use FeedbackVectorSlots.
+static int FormatSlotNode(Vector<char>* buf, Expression* node,
+ const char* node_name, FeedbackVectorSlot slot) {
int pos = SNPrintF(*buf, "%s", node_name);
if (!slot.IsInvalid()) {
- pos = SNPrintF(*buf + pos, " ICSlot(%d)", slot.ToInt());
+ pos = SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
}
return pos;
}
-PrettyPrinter::PrettyPrinter(Isolate* isolate, Zone* zone) {
+PrettyPrinter::PrettyPrinter(Isolate* isolate) {
output_ = NULL;
size_ = 0;
pos_ = 0;
- InitializeAstVisitor(isolate, zone);
+ InitializeAstVisitor(isolate);
}
@@ -701,6 +704,13 @@ void PrettyPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
}
+void PrettyPrinter::VisitDoExpression(DoExpression* node) {
+ Print("(do {");
+ PrintStatements(node->block()->statements());
+ Print("})");
+}
+
+
void PrettyPrinter::VisitConditional(Conditional* node) {
Visit(node->condition());
Print(" ? ");
@@ -904,8 +914,8 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
}
-void PrettyPrinter::PrintOut(Isolate* isolate, Zone* zone, AstNode* node) {
- PrettyPrinter printer(isolate, zone);
+void PrettyPrinter::PrintOut(Isolate* isolate, AstNode* node) {
+ PrettyPrinter printer(isolate);
PrintF("%s\n", printer.Print(node));
}
@@ -1061,6 +1071,13 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->inc_indent();
}
+ IndentedScope(AstPrinter* printer, const char* txt, int pos)
+ : ast_printer_(printer) {
+ ast_printer_->PrintIndented(txt);
+ ast_printer_->Print(" at %d\n", pos);
+ ast_printer_->inc_indent();
+ }
+
virtual ~IndentedScope() {
ast_printer_->dec_indent();
}
@@ -1073,8 +1090,7 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-AstPrinter::AstPrinter(Isolate* isolate, Zone* zone)
- : PrettyPrinter(isolate, zone), indent_(0) {}
+AstPrinter::AstPrinter(Isolate* isolate) : PrettyPrinter(isolate), indent_(0) {}
AstPrinter::~AstPrinter() {
@@ -1124,14 +1140,14 @@ void AstPrinter::PrintLabelsIndented(ZoneList<const AstRawString*>* labels) {
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(this, s);
+ IndentedScope indent(this, s, node->position());
Visit(node);
}
const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
Init();
- { IndentedScope indent(this, "FUNC");
+ { IndentedScope indent(this, "FUNC", program->position());
PrintLiteralIndented("NAME", program->name(), true);
PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
PrintParameters(program->scope());
@@ -1180,7 +1196,7 @@ void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
void AstPrinter::VisitBlock(Block* node) {
const char* block_txt =
node->ignore_completion_value() ? "BLOCK NOCOMPLETIONS" : "BLOCK";
- IndentedScope indent(this, block_txt);
+ IndentedScope indent(this, block_txt, node->position());
PrintStatements(node->statements());
}
@@ -1204,26 +1220,26 @@ void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
void AstPrinter::VisitImportDeclaration(ImportDeclaration* node) {
- IndentedScope indent(this, "IMPORT");
+ IndentedScope indent(this, "IMPORT", node->position());
PrintLiteralIndented("NAME", node->proxy()->name(), true);
PrintLiteralIndented("FROM", node->module_specifier()->string(), true);
}
void AstPrinter::VisitExportDeclaration(ExportDeclaration* node) {
- IndentedScope indent(this, "EXPORT ");
+ IndentedScope indent(this, "EXPORT", node->position());
PrintLiteral(node->proxy()->name(), true);
}
void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
- IndentedScope indent(this, "EXPRESSION STATEMENT");
+ IndentedScope indent(this, "EXPRESSION STATEMENT", node->position());
Visit(node->expression());
}
void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
- IndentedScope indent(this, "EMPTY");
+ IndentedScope indent(this, "EMPTY", node->position());
}
@@ -1234,7 +1250,7 @@ void AstPrinter::VisitSloppyBlockFunctionStatement(
void AstPrinter::VisitIfStatement(IfStatement* node) {
- IndentedScope indent(this, "IF");
+ IndentedScope indent(this, "IF", node->position());
PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_statement());
if (node->HasElseStatement()) {
@@ -1244,32 +1260,32 @@ void AstPrinter::VisitIfStatement(IfStatement* node) {
void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
- IndentedScope indent(this, "CONTINUE");
+ IndentedScope indent(this, "CONTINUE", node->position());
PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitBreakStatement(BreakStatement* node) {
- IndentedScope indent(this, "BREAK");
+ IndentedScope indent(this, "BREAK", node->position());
PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
- IndentedScope indent(this, "RETURN");
+ IndentedScope indent(this, "RETURN", node->position());
Visit(node->expression());
}
void AstPrinter::VisitWithStatement(WithStatement* node) {
- IndentedScope indent(this, "WITH");
+ IndentedScope indent(this, "WITH", node->position());
PrintIndentedVisit("OBJECT", node->expression());
PrintIndentedVisit("BODY", node->statement());
}
void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
- IndentedScope indent(this, "SWITCH");
+ IndentedScope indent(this, "SWITCH", node->position());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
@@ -1280,10 +1296,10 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
void AstPrinter::VisitCaseClause(CaseClause* clause) {
if (clause->is_default()) {
- IndentedScope indent(this, "DEFAULT");
+ IndentedScope indent(this, "DEFAULT", clause->position());
PrintStatements(clause->statements());
} else {
- IndentedScope indent(this, "CASE");
+ IndentedScope indent(this, "CASE", clause->position());
Visit(clause->label());
PrintStatements(clause->statements());
}
@@ -1291,7 +1307,7 @@ void AstPrinter::VisitCaseClause(CaseClause* clause) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
- IndentedScope indent(this, "DO");
+ IndentedScope indent(this, "DO", node->position());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
@@ -1299,7 +1315,7 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
- IndentedScope indent(this, "WHILE");
+ IndentedScope indent(this, "WHILE", node->position());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -1307,7 +1323,7 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
- IndentedScope indent(this, "FOR");
+ IndentedScope indent(this, "FOR", node->position());
PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
@@ -1317,7 +1333,7 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) {
- IndentedScope indent(this, "FOR IN");
+ IndentedScope indent(this, "FOR IN", node->position());
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->enumerable());
PrintIndentedVisit("BODY", node->body());
@@ -1325,7 +1341,7 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
- IndentedScope indent(this, "FOR OF");
+ IndentedScope indent(this, "FOR OF", node->position());
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("OF", node->iterable());
PrintIndentedVisit("BODY", node->body());
@@ -1333,7 +1349,7 @@ void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
- IndentedScope indent(this, "TRY CATCH");
+ IndentedScope indent(this, "TRY CATCH", node->position());
PrintIndentedVisit("TRY", node->try_block());
PrintLiteralWithModeIndented("CATCHVAR",
node->variable(),
@@ -1343,19 +1359,19 @@ void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
- IndentedScope indent(this, "TRY finalLY");
+ IndentedScope indent(this, "TRY FINALLY", node->position());
PrintIndentedVisit("TRY", node->try_block());
PrintIndentedVisit("FINALLY", node->finally_block());
}
void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
- IndentedScope indent(this, "DEBUGGER");
+ IndentedScope indent(this, "DEBUGGER", node->position());
}
void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
- IndentedScope indent(this, "FUNC LITERAL");
+ IndentedScope indent(this, "FUNC LITERAL", node->position());
PrintLiteralIndented("NAME", node->name(), false);
PrintLiteralIndented("INFERRED NAME", node->inferred_name(), false);
PrintParameters(node->scope());
@@ -1367,7 +1383,7 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
- IndentedScope indent(this, "CLASS LITERAL");
+ IndentedScope indent(this, "CLASS LITERAL", node->position());
if (node->raw_name() != nullptr) {
PrintLiteralIndented("NAME", node->name(), false);
}
@@ -1414,13 +1430,19 @@ void AstPrinter::PrintProperties(
void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
- IndentedScope indent(this, "NATIVE FUNC LITERAL");
+ IndentedScope indent(this, "NATIVE FUNC LITERAL", node->position());
PrintLiteralIndented("NAME", node->name(), false);
}
+void AstPrinter::VisitDoExpression(DoExpression* node) {
+ IndentedScope indent(this, "DO EXPRESSION", node->position());
+ PrintStatements(node->block()->statements());
+}
+
+
void AstPrinter::VisitConditional(Conditional* node) {
- IndentedScope indent(this, "CONDITIONAL");
+ IndentedScope indent(this, "CONDITIONAL", node->position());
PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_expression());
PrintIndentedVisit("ELSE", node->else_expression());
@@ -1434,7 +1456,7 @@ void AstPrinter::VisitLiteral(Literal* node) {
void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
- IndentedScope indent(this, "REGEXP LITERAL");
+ IndentedScope indent(this, "REGEXP LITERAL", node->position());
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
PrintIndented(buf.start());
@@ -1444,7 +1466,7 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
- IndentedScope indent(this, "OBJ LITERAL");
+ IndentedScope indent(this, "OBJ LITERAL", node->position());
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
PrintIndented(buf.start());
@@ -1453,13 +1475,13 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- IndentedScope indent(this, "ARRAY LITERAL");
+ IndentedScope indent(this, "ARRAY LITERAL", node->position());
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
PrintIndented(buf.start());
if (node->values()->length() > 0) {
- IndentedScope indent(this, "VALUES");
+ IndentedScope indent(this, "VALUES", node->position());
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
@@ -1471,7 +1493,7 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
Variable* var = node->var();
EmbeddedVector<char, 128> buf;
int pos =
- FormatICSlotNode(&buf, node, "VAR PROXY", node->VariableFeedbackSlot());
+ FormatSlotNode(&buf, node, "VAR PROXY", node->VariableFeedbackSlot());
switch (var->location()) {
case VariableLocation::UNALLOCATED:
@@ -1497,28 +1519,28 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(this, Token::Name(node->op()));
+ IndentedScope indent(this, Token::Name(node->op()), node->position());
Visit(node->target());
Visit(node->value());
}
void AstPrinter::VisitYield(Yield* node) {
- IndentedScope indent(this, "YIELD");
+ IndentedScope indent(this, "YIELD", node->position());
Visit(node->expression());
}
void AstPrinter::VisitThrow(Throw* node) {
- IndentedScope indent(this, "THROW");
+ IndentedScope indent(this, "THROW", node->position());
Visit(node->exception());
}
void AstPrinter::VisitProperty(Property* node) {
EmbeddedVector<char, 128> buf;
- FormatICSlotNode(&buf, node, "PROPERTY", node->PropertyFeedbackSlot());
- IndentedScope indent(this, buf.start());
+ FormatSlotNode(&buf, node, "PROPERTY", node->PropertyFeedbackSlot());
+ IndentedScope indent(this, buf.start(), node->position());
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
@@ -1532,7 +1554,7 @@ void AstPrinter::VisitProperty(Property* node) {
void AstPrinter::VisitCall(Call* node) {
EmbeddedVector<char, 128> buf;
- FormatICSlotNode(&buf, node, "CALL", node->CallFeedbackICSlot());
+ FormatSlotNode(&buf, node, "CALL", node->CallFeedbackICSlot());
IndentedScope indent(this, buf.start());
Visit(node->expression());
@@ -1541,7 +1563,7 @@ void AstPrinter::VisitCall(Call* node) {
void AstPrinter::VisitCallNew(CallNew* node) {
- IndentedScope indent(this, "CALL NEW");
+ IndentedScope indent(this, "CALL NEW", node->position());
Visit(node->expression());
PrintArguments(node->arguments());
}
@@ -1550,13 +1572,13 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
- IndentedScope indent(this, buf.start());
+ IndentedScope indent(this, buf.start(), node->position());
PrintArguments(node->arguments());
}
void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()));
+ IndentedScope indent(this, Token::Name(node->op()), node->position());
Visit(node->expression());
}
@@ -1565,48 +1587,48 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()));
- IndentedScope indent(this, buf.start());
+ IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()));
+ IndentedScope indent(this, Token::Name(node->op()), node->position());
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()));
+ IndentedScope indent(this, Token::Name(node->op()), node->position());
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitSpread(Spread* node) {
- IndentedScope indent(this, "...");
+ IndentedScope indent(this, "...", node->position());
Visit(node->expression());
}
void AstPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
- IndentedScope indent(this, "()");
+ IndentedScope indent(this, "()", node->position());
}
void AstPrinter::VisitThisFunction(ThisFunction* node) {
- IndentedScope indent(this, "THIS-FUNCTION");
+ IndentedScope indent(this, "THIS-FUNCTION", node->position());
}
void AstPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {
- IndentedScope indent(this, "SUPER-PROPERTY-REFERENCE");
+ IndentedScope indent(this, "SUPER-PROPERTY-REFERENCE", node->position());
}
void AstPrinter::VisitSuperCallReference(SuperCallReference* node) {
- IndentedScope indent(this, "SUPER-CALL-REFERENCE");
+ IndentedScope indent(this, "SUPER-CALL-REFERENCE", node->position());
}
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index 1971cfe839..0793d33e74 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -13,7 +13,7 @@ namespace internal {
class CallPrinter : public AstVisitor {
public:
- CallPrinter(Isolate* isolate, Zone* zone);
+ explicit CallPrinter(Isolate* isolate);
virtual ~CallPrinter();
// The following routine prints the node with position |position| into a
@@ -52,7 +52,7 @@ class CallPrinter : public AstVisitor {
class PrettyPrinter: public AstVisitor {
public:
- PrettyPrinter(Isolate* isolate, Zone* zone);
+ explicit PrettyPrinter(Isolate* isolate);
virtual ~PrettyPrinter();
// The following routines print a node into a string.
@@ -64,7 +64,7 @@ class PrettyPrinter: public AstVisitor {
void Print(const char* format, ...);
// Print a node to stdout.
- static void PrintOut(Isolate* isolate, Zone* zone, AstNode* node);
+ static void PrintOut(Isolate* isolate, AstNode* node);
// Individual nodes
#define DECLARE_VISIT(type) void Visit##type(type* node) override;
@@ -98,7 +98,7 @@ class PrettyPrinter: public AstVisitor {
// Prints the AST structure
class AstPrinter: public PrettyPrinter {
public:
- AstPrinter(Isolate* isolate, Zone* zone);
+ explicit AstPrinter(Isolate* isolate);
virtual ~AstPrinter();
const char* PrintProgram(FunctionLiteral* program);
@@ -133,6 +133,7 @@ class AstPrinter: public PrettyPrinter {
#endif // DEBUG
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PRETTYPRINTER_H_
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index ebda3dba83..03802a5c66 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -152,6 +152,7 @@ class AllocationTracker {
DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ALLOCATION_TRACKER_H_
diff --git a/deps/v8/src/profiler/circular-queue-inl.h b/deps/v8/src/profiler/circular-queue-inl.h
index 66b4af5b4a..428945a2ee 100644
--- a/deps/v8/src/profiler/circular-queue-inl.h
+++ b/deps/v8/src/profiler/circular-queue-inl.h
@@ -64,6 +64,7 @@ typename SamplingCircularQueue<T, L>::Entry* SamplingCircularQueue<T, L>::Next(
return next;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_CIRCULAR_QUEUE_INL_H_
diff --git a/deps/v8/src/profiler/circular-queue.h b/deps/v8/src/profiler/circular-queue.h
index 3508b371c4..272843bb2d 100644
--- a/deps/v8/src/profiler/circular-queue.h
+++ b/deps/v8/src/profiler/circular-queue.h
@@ -63,6 +63,7 @@ class SamplingCircularQueue {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_CIRCULAR_QUEUE_H_
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index df727ae7cb..ea82c5589f 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -74,6 +74,7 @@ void ProfilerEventsProcessor::FinishTickSample() {
ticks_buffer_.FinishEnqueue();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_CPU_PROFILER_INL_H_
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index c4216ed478..5e61697339 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -4,10 +4,9 @@
#include "src/profiler/cpu-profiler.h"
-#include "src/compiler.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
-#include "src/hashmap.h"
#include "src/log-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/vm-state-inl.h"
@@ -254,7 +253,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
NULL, code->instruction_start());
if (info) {
- rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
rec->entry->set_inlined_function_infos(info->inlined_function_infos());
}
rec->entry->FillFunctionInfo(shared);
@@ -291,7 +289,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name), line,
column, line_table, code->instruction_start());
if (info) {
- rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
rec->entry->set_inlined_function_infos(info->inlined_function_infos());
}
rec->entry->FillFunctionInfo(shared);
@@ -441,6 +438,7 @@ void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
void CpuProfiler::StartProfiling(String* title, bool record_samples) {
StartProfiling(profiles_->GetName(title), record_samples);
+ isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
}
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 2d6732725a..2326bb7652 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -270,7 +270,8 @@ class CpuProfiler : public CodeEventListener {
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_CPU_PROFILER_H_
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 3f776e05a8..4403e5d6c9 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -5,6 +5,7 @@
#include "src/profiler/heap-profiler.h"
#include "src/api.h"
+#include "src/debug/debug.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
@@ -75,6 +76,10 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
}
ids_->RemoveDeadEntries();
is_tracking_object_moves_ = true;
+
+ heap()->isolate()->debug()->feature_tracker()->Track(
+ DebugFeatureTracker::kHeapSnapshot);
+
return result;
}
@@ -86,6 +91,8 @@ void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
if (track_allocations) {
allocation_tracker_.Reset(new AllocationTracker(ids_.get(), names_.get()));
heap()->DisableInlineAllocation();
+ heap()->isolate()->debug()->feature_tracker()->Track(
+ DebugFeatureTracker::kAllocationTracking);
}
}
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index b304f388ff..d6fcbbdaca 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -79,6 +79,7 @@ class HeapProfiler {
base::Mutex profiler_mutex_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_HEAP_PROFILER_H_
diff --git a/deps/v8/src/profiler/heap-snapshot-generator-inl.h b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
index 12e37f5e60..fb1e891c94 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
@@ -43,6 +43,7 @@ HeapGraphEdge** HeapEntry::children_arr() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_INL_H_
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index c7bb3c950a..2268db223f 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -836,7 +836,10 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
HeapEntry::kString,
names_->GetName(String::cast(object)));
} else if (object->IsSymbol()) {
- return AddEntry(object, HeapEntry::kSymbol, "symbol");
+ if (Symbol::cast(object)->is_private())
+ return AddEntry(object, HeapEntry::kHidden, "private symbol");
+ else
+ return AddEntry(object, HeapEntry::kSymbol, "symbol");
} else if (object->IsCode()) {
return AddEntry(object, HeapEntry::kCode, "");
} else if (object->IsSharedFunctionInfo()) {
@@ -992,12 +995,12 @@ class IndexedReferencesExtractor : public ObjectVisitor {
parent_(parent),
next_index_(0) {
}
- void VisitCodeEntry(Address entry_address) {
+ void VisitCodeEntry(Address entry_address) override {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
generator_->SetInternalReference(parent_obj_, parent_, "code", code);
generator_->TagCodeObject(code);
}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
++next_index_;
if (CheckVisitedAndUnmark(p)) continue;
@@ -1155,23 +1158,23 @@ void V8HeapExplorer::ExtractJSObjectReferences(
SetWeakReference(js_fun, entry,
"next_function_link", js_fun->next_function_link(),
JSFunction::kNextFunctionLinkOffset);
- STATIC_ASSERT(JSFunction::kNextFunctionLinkOffset
- == JSFunction::kNonWeakFieldsEndOffset);
+ // Ensure no new weak references appeared in JSFunction.
+ STATIC_ASSERT(JSFunction::kCodeEntryOffset ==
+ JSFunction::kNonWeakFieldsEndOffset);
+ STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+ JSFunction::kNextFunctionLinkOffset);
STATIC_ASSERT(JSFunction::kNextFunctionLinkOffset + kPointerSize
== JSFunction::kSize);
- } else if (obj->IsGlobalObject()) {
- GlobalObject* global_obj = GlobalObject::cast(obj);
- SetInternalReference(global_obj, entry,
- "builtins", global_obj->builtins(),
- GlobalObject::kBuiltinsOffset);
- SetInternalReference(global_obj, entry,
- "native_context", global_obj->native_context(),
- GlobalObject::kNativeContextOffset);
- SetInternalReference(global_obj, entry,
- "global_proxy", global_obj->global_proxy(),
- GlobalObject::kGlobalProxyOffset);
- STATIC_ASSERT(GlobalObject::kHeaderSize - JSObject::kHeaderSize ==
- 3 * kPointerSize);
+ } else if (obj->IsJSGlobalObject()) {
+ JSGlobalObject* global_obj = JSGlobalObject::cast(obj);
+ SetInternalReference(global_obj, entry, "native_context",
+ global_obj->native_context(),
+ JSGlobalObject::kNativeContextOffset);
+ SetInternalReference(global_obj, entry, "global_proxy",
+ global_obj->global_proxy(),
+ JSGlobalObject::kGlobalProxyOffset);
+ STATIC_ASSERT(JSGlobalObject::kSize - JSObject::kHeaderSize ==
+ 2 * kPointerSize);
} else if (obj->IsJSArrayBufferView()) {
JSArrayBufferView* view = JSArrayBufferView::cast(obj);
SetInternalReference(view, entry, "buffer", view->buffer(),
@@ -1260,7 +1263,7 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
- EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
+ EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, JSGlobalObject, global);
if (context->IsNativeContext()) {
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
@@ -1593,18 +1596,14 @@ void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
JSFunction* func = JSFunction::cast(js_obj);
if (func->shared()->bound()) {
- FixedArray* bindings = func->function_bindings();
- SetNativeBindReference(js_obj, entry, "bound_this",
- bindings->get(JSFunction::kBoundThisIndex));
+ BindingsArray* bindings = func->function_bindings();
+ SetNativeBindReference(js_obj, entry, "bound_this", bindings->bound_this());
SetNativeBindReference(js_obj, entry, "bound_function",
- bindings->get(JSFunction::kBoundFunctionIndex));
- for (int i = JSFunction::kBoundArgumentsStartIndex;
- i < bindings->length(); i++) {
- const char* reference_name = names_->GetFormatted(
- "bound_argument_%d",
- i - JSFunction::kBoundArgumentsStartIndex);
+ bindings->bound_function());
+ for (int i = 0; i < bindings->bindings_count(); i++) {
+ const char* reference_name = names_->GetFormatted("bound_argument_%d", i);
SetNativeBindReference(js_obj, entry, reference_name,
- bindings->get(i));
+ bindings->binding(i));
}
}
}
@@ -1644,7 +1643,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
break;
}
}
- } else if (js_obj->IsGlobalObject()) {
+ } else if (js_obj->IsJSGlobalObject()) {
// We assume that global objects can only have slow properties.
GlobalDictionary* dictionary = js_obj->global_dictionary();
int length = dictionary->Capacity();
@@ -1773,7 +1772,7 @@ class RootsReferencesExtractor : public ObjectVisitor {
heap_(heap) {
}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
if (collecting_all_references_) {
for (Object** p = start; p < end; p++) all_references_.Add(*p);
} else {
@@ -1806,7 +1805,7 @@ class RootsReferencesExtractor : public ObjectVisitor {
}
}
- void Synchronize(VisitorSynchronization::SyncTag tag) {
+ void Synchronize(VisitorSynchronization::SyncTag tag) override {
if (collecting_all_references_ &&
previous_reference_count_ != all_references_.length()) {
previous_reference_count_ = all_references_.length();
@@ -2133,7 +2132,7 @@ void V8HeapExplorer::SetGcSubrootReference(
// Add a shortcut to JS global object reference at snapshot root.
if (child_obj->IsNativeContext()) {
Context* context = Context::cast(child_obj);
- GlobalObject* global = context->global_object();
+ JSGlobalObject* global = context->global_object();
if (global->IsJSGlobalObject()) {
bool is_debug_object = false;
is_debug_object = heap_->isolate()->debug()->IsDebugGlobal(global);
@@ -2164,6 +2163,7 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
#undef SYMBOL_NAME
#define SYMBOL_NAME(name, description) NAME_ENTRY(name)
PUBLIC_SYMBOL_LIST(SYMBOL_NAME)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_NAME)
#undef SYMBOL_NAME
#undef NAME_ENTRY
CHECK(!strong_gc_subroot_names_.is_empty());
@@ -2191,7 +2191,7 @@ void V8HeapExplorer::MarkAsWeakContainer(Object* object) {
class GlobalObjectsEnumerator : public ObjectVisitor {
public:
- virtual void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
if ((*p)->IsNativeContext()) {
Context* context = Context::cast(*p);
@@ -2244,11 +2244,9 @@ class GlobalHandlesExtractor : public ObjectVisitor {
public:
explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
: explorer_(explorer) {}
- virtual ~GlobalHandlesExtractor() {}
- virtual void VisitPointers(Object** start, Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {
+ ~GlobalHandlesExtractor() override {}
+ void VisitPointers(Object** start, Object** end) override { UNREACHABLE(); }
+ void VisitEmbedderReference(Object** p, uint16_t class_id) override {
explorer_->VisitSubtreeWrapper(p, class_id);
}
private:
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 1baebeee9e..3d6693b0d9 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -8,7 +8,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/objects.h"
-#include "src/strings-storage.h"
+#include "src/profiler/strings-storage.h"
namespace v8 {
namespace internal {
@@ -616,6 +616,7 @@ class HeapSnapshotJSONSerializer {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index c2e98cc4c8..04d7e39c16 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -23,7 +23,6 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
column_number_(column_number),
script_id_(v8::UnboundScript::kNoScriptId),
position_(0),
- no_frame_ranges_(NULL),
bailout_reason_(kEmptyBailoutReason),
deopt_reason_(kNoDeoptReason),
deopt_position_(SourcePosition::Unknown()),
@@ -43,6 +42,7 @@ ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
inline unsigned ProfileNode::function_id() const {
return tree_->GetFunctionId(this);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_PROFILE_GENERATOR_INL_H_
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index f3592bba65..21fa5ca4a4 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -4,7 +4,6 @@
#include "src/profiler/profile-generator.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
@@ -50,7 +49,6 @@ const char* const CodeEntry::kNoDeoptReason = "";
CodeEntry::~CodeEntry() {
- delete no_frame_ranges_;
delete line_info_;
}
@@ -611,17 +609,8 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// ebp contains return address of the current function and skips caller's
// frame. Check for this case and just skip such samples.
if (pc_entry) {
- List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
int pc_offset =
static_cast<int>(sample.pc - pc_entry->instruction_start());
- if (ranges) {
- for (int i = 0; i < ranges->length(); i++) {
- OffsetRange& range = ranges->at(i);
- if (range.from <= pc_offset && pc_offset < range.to) {
- return;
- }
- }
- }
src_line = pc_entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = pc_entry->line_number();
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index ce58d70ae9..079413a8cd 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -10,13 +10,11 @@
#include "src/allocation.h"
#include "src/compiler.h"
#include "src/hashmap.h"
-#include "src/strings-storage.h"
+#include "src/profiler/strings-storage.h"
namespace v8 {
namespace internal {
-struct OffsetRange;
-
// Provides a mapping from the offsets within generated code to
// the source line.
class JITLineInfoTable : public Malloced {
@@ -83,10 +81,6 @@ class CodeEntry {
void FillFunctionInfo(SharedFunctionInfo* shared);
- List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; }
- void set_no_frame_ranges(List<OffsetRange>* ranges) {
- no_frame_ranges_ = ranges;
- }
void set_inlined_function_infos(
const std::vector<InlinedFunctionInfo>& infos) {
inlined_function_infos_ = infos;
@@ -125,7 +119,6 @@ class CodeEntry {
int column_number_;
int script_id_;
int position_;
- List<OffsetRange>* no_frame_ranges_;
const char* bailout_reason_;
const char* deopt_reason_;
SourcePosition deopt_position_;
@@ -383,6 +376,7 @@ class ProfileGenerator {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_PROFILE_GENERATOR_H_
diff --git a/deps/v8/src/profiler/sampler.h b/deps/v8/src/profiler/sampler.h
index ed932c0354..354e935e31 100644
--- a/deps/v8/src/profiler/sampler.h
+++ b/deps/v8/src/profiler/sampler.h
@@ -130,6 +130,7 @@ class Sampler {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_SAMPLER_H_
diff --git a/deps/v8/src/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 52cc00f852..9f095b8866 100644
--- a/deps/v8/src/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/strings-storage.h"
+#include "src/profiler/strings-storage.h"
#include "src/base/smart-pointers.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index 8fd9da7d3c..7164caef63 100644
--- a/deps/v8/src/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STRINGS_STORAGE_H_
-#define V8_STRINGS_STORAGE_H_
+#ifndef V8_PROFILER_STRINGS_STORAGE_H_
+#define V8_PROFILER_STRINGS_STORAGE_H_
#include "src/allocation.h"
#include "src/hashmap.h"
@@ -11,8 +11,6 @@
namespace v8 {
namespace internal {
-struct OffsetRange;
-
// Provides a storage of strings allocated in C++ heap, to hold them
// forever, even if they disappear from JS heap or external storage.
class StringsStorage {
@@ -41,7 +39,7 @@ class StringsStorage {
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_STRINGS_STORAGE_H_
+#endif // V8_PROFILER_STRINGS_STORAGE_H_
diff --git a/deps/v8/src/profiler/unbound-queue-inl.h b/deps/v8/src/profiler/unbound-queue-inl.h
index fef7bec8d3..8c45d09861 100644
--- a/deps/v8/src/profiler/unbound-queue-inl.h
+++ b/deps/v8/src/profiler/unbound-queue-inl.h
@@ -77,6 +77,7 @@ Record* UnboundQueue<Record>::Peek() const {
return &next->value;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_UNBOUND_QUEUE_INL_H_
diff --git a/deps/v8/src/profiler/unbound-queue.h b/deps/v8/src/profiler/unbound-queue.h
index a63c327d10..c53b35a8ed 100644
--- a/deps/v8/src/profiler/unbound-queue.h
+++ b/deps/v8/src/profiler/unbound-queue.h
@@ -42,6 +42,7 @@ class UnboundQueue BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_UNBOUND_QUEUE_
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
new file mode 100644
index 0000000000..8c8dfa4f4a
--- /dev/null
+++ b/deps/v8/src/property-descriptor.cc
@@ -0,0 +1,268 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/property-descriptor.h"
+
+#include "src/bootstrapper.h"
+#include "src/factory.h"
+#include "src/isolate-inl.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Helper function for ToPropertyDescriptor. Comments describe steps for
+// "enumerable", other properties are handled the same way.
+// Returns false if an exception was thrown.
+bool GetPropertyIfPresent(Handle<Object> obj, Handle<String> name,
+ Handle<Object>* value) {
+ LookupIterator it(obj, name);
+ // 4. Let hasEnumerable be HasProperty(Obj, "enumerable").
+ Maybe<PropertyAttributes> maybe_attr = JSReceiver::GetPropertyAttributes(&it);
+ // 5. ReturnIfAbrupt(hasEnumerable).
+ if (!maybe_attr.IsJust()) return false;
+ // 6. If hasEnumerable is true, then
+ if (maybe_attr.FromJust() != ABSENT) {
+ // 6a. Let enum be ToBoolean(Get(Obj, "enumerable")).
+ // 6b. ReturnIfAbrupt(enum).
+ if (!JSObject::GetProperty(&it).ToHandle(value)) return false;
+ }
+ return true;
+}
+
+
+// Helper function for ToPropertyDescriptor. Handles the case of "simple"
+// objects: nothing on the prototype chain, just own fast data properties.
+// Must not have observable side effects, because the slow path will restart
+// the entire conversion!
+bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<Object> obj,
+ PropertyDescriptor* desc) {
+ if (!obj->IsJSObject()) return false;
+ Map* map = Handle<JSObject>::cast(obj)->map();
+ if (map->instance_type() != JS_OBJECT_TYPE) return false;
+ if (map->is_access_check_needed()) return false;
+ if (map->prototype() != *isolate->initial_object_prototype()) return false;
+ // During bootstrapping, the object_function_prototype_map hasn't been
+ // set up yet.
+ if (isolate->bootstrapper()->IsActive()) return false;
+ if (JSObject::cast(map->prototype())->map() !=
+ isolate->native_context()->object_function_prototype_map()) {
+ return false;
+ }
+ // TODO(jkummerow): support dictionary properties?
+ if (map->is_dictionary_map()) return false;
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(map->instance_descriptors());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ Name* key = descs->GetKey(i);
+ Handle<Object> value;
+ switch (details.type()) {
+ case DATA:
+ value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
+ details.representation(),
+ FieldIndex::ForDescriptor(map, i));
+ break;
+ case DATA_CONSTANT:
+ value = handle(descs->GetConstant(i), isolate);
+ break;
+ case ACCESSOR:
+ case ACCESSOR_CONSTANT:
+ // Bail out to slow path.
+ return false;
+ }
+ Heap* heap = isolate->heap();
+ if (key == heap->enumerable_string()) {
+ desc->set_enumerable(value->BooleanValue());
+ } else if (key == heap->configurable_string()) {
+ desc->set_configurable(value->BooleanValue());
+ } else if (key == heap->value_string()) {
+ desc->set_value(value);
+ } else if (key == heap->writable_string()) {
+ desc->set_writable(value->BooleanValue());
+ } else if (key == heap->get_string()) {
+ // Bail out to slow path to throw an exception if necessary.
+ if (!value->IsCallable()) return false;
+ desc->set_get(value);
+ } else if (key == heap->set_string()) {
+ // Bail out to slow path to throw an exception if necessary.
+ if (!value->IsCallable()) return false;
+ desc->set_set(value);
+ }
+ }
+ if ((desc->has_get() || desc->has_set()) &&
+ (desc->has_value() || desc->has_writable())) {
+ // Bail out to slow path to throw an exception.
+ return false;
+ }
+ return true;
+}
+
+
+static void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
+ Handle<String> name, Handle<Object> value) {
+ LookupIterator it(object, name);
+ Maybe<bool> result = JSObject::CreateDataProperty(&it, value);
+ CHECK(result.IsJust() && result.FromJust());
+}
+
+
+// ES6 6.2.4.4 "FromPropertyDescriptor"
+Handle<Object> PropertyDescriptor::ToObject(Isolate* isolate) {
+ DCHECK(!(PropertyDescriptor::IsAccessorDescriptor(this) &&
+ PropertyDescriptor::IsDataDescriptor(this)));
+ Factory* factory = isolate->factory();
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ if (has_value()) {
+ CreateDataProperty(isolate, result, factory->value_string(), value());
+ }
+ if (has_writable()) {
+ CreateDataProperty(isolate, result, factory->writable_string(),
+ factory->ToBoolean(writable()));
+ }
+ if (has_get()) {
+ CreateDataProperty(isolate, result, factory->get_string(), get());
+ }
+ if (has_set()) {
+ CreateDataProperty(isolate, result, factory->set_string(), set());
+ }
+ if (has_enumerable()) {
+ CreateDataProperty(isolate, result, factory->enumerable_string(),
+ factory->ToBoolean(enumerable()));
+ }
+ if (has_configurable()) {
+ CreateDataProperty(isolate, result, factory->configurable_string(),
+ factory->ToBoolean(configurable()));
+ }
+ return result;
+}
+
+
+// ES6 6.2.4.5
+// Returns false in case of exception.
+// static
+bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
+ Handle<Object> obj,
+ PropertyDescriptor* desc) {
+ // 1. ReturnIfAbrupt(Obj).
+ // 2. If Type(Obj) is not Object, throw a TypeError exception.
+ if (!obj->IsSpecObject()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kPropertyDescObject, obj));
+ return false;
+ }
+ // 3. Let desc be a new Property Descriptor that initially has no fields.
+ DCHECK(desc->is_empty());
+
+ if (ToPropertyDescriptorFastPath(isolate, obj, desc)) {
+ return true;
+ }
+
+ // TODO(jkummerow): Implement JSProxy support.
+ // Specifically, instead of taking the attributes != ABSENT shortcut, we
+ // have to implement proper HasProperty for proxies.
+ if (!obj->IsJSProxy()) {
+ { // enumerable?
+ Handle<Object> enumerable;
+ // 4 through 6b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->enumerable_string(),
+ &enumerable)) {
+ return false;
+ }
+ // 6c. Set the [[Enumerable]] field of desc to enum.
+ if (!enumerable.is_null()) {
+ desc->set_enumerable(enumerable->BooleanValue());
+ }
+ }
+ { // configurable?
+ Handle<Object> configurable;
+ // 7 through 9b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->configurable_string(),
+ &configurable)) {
+ return false;
+ }
+ // 9c. Set the [[Configurable]] field of desc to conf.
+ if (!configurable.is_null()) {
+ desc->set_configurable(configurable->BooleanValue());
+ }
+ }
+ { // value?
+ Handle<Object> value;
+ // 10 through 12b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->value_string(),
+ &value))
+ return false;
+ // 12c. Set the [[Value]] field of desc to value.
+ if (!value.is_null()) desc->set_value(value);
+ }
+ { // writable?
+ Handle<Object> writable;
+ // 13 through 15b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->writable_string(),
+ &writable)) {
+ return false;
+ }
+ // 15c. Set the [[Writable]] field of desc to writable.
+ if (!writable.is_null()) desc->set_writable(writable->BooleanValue());
+ }
+ { // getter?
+ Handle<Object> getter;
+ // 16 through 18b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->get_string(), &getter))
+ return false;
+ if (!getter.is_null()) {
+ // 18c. If IsCallable(getter) is false and getter is not undefined,
+ // throw a TypeError exception.
+ if (!getter->IsCallable() && !getter->IsUndefined()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kObjectGetterCallable, getter));
+ return false;
+ }
+ // 18d. Set the [[Get]] field of desc to getter.
+ desc->set_get(getter);
+ }
+ { // setter?
+ Handle<Object> setter;
+ // 19 through 21b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->set_string(),
+ &setter))
+ return false;
+ if (!setter.is_null()) {
+ // 21c. If IsCallable(setter) is false and setter is not undefined,
+ // throw a TypeError exception.
+ if (!setter->IsCallable() && !setter->IsUndefined()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kObjectSetterCallable, setter));
+ return false;
+ }
+ // 21d. Set the [[Set]] field of desc to setter.
+ desc->set_set(setter);
+ }
+ }
+ // 22. If either desc.[[Get]] or desc.[[Set]] is present, then
+ // 22a. If either desc.[[Value]] or desc.[[Writable]] is present,
+ // throw a TypeError exception.
+ if ((desc->has_get() || desc->has_set()) &&
+ (desc->has_value() || desc->has_writable())) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kValueAndAccessor, obj));
+ return false;
+ }
+ }
+ } else {
+ DCHECK(obj->IsJSProxy());
+ // Having an UNIMPLEMENTED() here would upset ClusterFuzz, because
+ // --harmony-proxies makes it possible to reach this branch.
+ isolate->Throw(
+ *isolate->factory()->NewTypeError(MessageTemplate::kUnsupported));
+ return false;
+ }
+ // 23. Return desc.
+ return true;
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/property-descriptor.h b/deps/v8/src/property-descriptor.h
new file mode 100644
index 0000000000..9425ab10ef
--- /dev/null
+++ b/deps/v8/src/property-descriptor.h
@@ -0,0 +1,117 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROPERTY_DESCRIPTOR_H_
+#define V8_PROPERTY_DESCRIPTOR_H_
+
+
+#include "src/handles.h"
+#include "src/property-details.h"
+
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Object;
+
+class PropertyDescriptor {
+ public:
+ PropertyDescriptor()
+ : enumerable_(false),
+ has_enumerable_(false),
+ configurable_(false),
+ has_configurable_(false),
+ writable_(false),
+ has_writable_(false) {}
+
+ // ES6 6.2.4.1
+ static bool IsAccessorDescriptor(PropertyDescriptor* desc) {
+ return desc->has_get() || desc->has_set();
+ }
+
+ // ES6 6.2.4.2
+ static bool IsDataDescriptor(PropertyDescriptor* desc) {
+ return desc->has_value() || desc->has_writable();
+ }
+
+ // ES6 6.2.4.3
+ static bool IsGenericDescriptor(PropertyDescriptor* desc) {
+ return !IsAccessorDescriptor(desc) && !IsDataDescriptor(desc);
+ }
+
+ bool is_empty() const {
+ return !has_enumerable() && !has_configurable() && !has_writable() &&
+ !has_value() && !has_get() && !has_set();
+ }
+
+ bool enumerable() const { return enumerable_; }
+ void set_enumerable(bool enumerable) {
+ enumerable_ = enumerable;
+ has_enumerable_ = true;
+ }
+ bool has_enumerable() const { return has_enumerable_; }
+
+ bool configurable() const { return configurable_; }
+ void set_configurable(bool configurable) {
+ configurable_ = configurable;
+ has_configurable_ = true;
+ }
+ bool has_configurable() const { return has_configurable_; }
+
+ Handle<Object> value() const { return value_; }
+ void set_value(Handle<Object> value) { value_ = value; }
+ bool has_value() const { return !value_.is_null(); }
+
+ bool writable() const { return writable_; }
+ void set_writable(bool writable) {
+ writable_ = writable;
+ has_writable_ = true;
+ }
+ bool has_writable() const { return has_writable_; }
+
+ Handle<Object> get() const { return get_; }
+ void set_get(Handle<Object> get) { get_ = get; }
+ bool has_get() const { return !get_.is_null(); }
+
+ Handle<Object> set() const { return set_; }
+ void set_set(Handle<Object> set) { set_ = set; }
+ bool has_set() const { return !set_.is_null(); }
+
+ Handle<Object> name() const { return name_; }
+ void set_name(Handle<Object> name) { name_ = name; }
+
+ PropertyAttributes ToAttributes() {
+ return static_cast<PropertyAttributes>(
+ (has_enumerable() && !enumerable() ? DONT_ENUM : NONE) |
+ (has_configurable() && !configurable() ? DONT_DELETE : NONE) |
+ (has_writable() && !writable() ? READ_ONLY : NONE));
+ }
+
+ Handle<Object> ToObject(Isolate* isolate);
+
+ static bool ToPropertyDescriptor(Isolate* isolate, Handle<Object> obj,
+ PropertyDescriptor* desc);
+
+ private:
+ bool enumerable_ : 1;
+ bool has_enumerable_ : 1;
+ bool configurable_ : 1;
+ bool has_configurable_ : 1;
+ bool writable_ : 1;
+ bool has_writable_ : 1;
+ Handle<Object> value_;
+ Handle<Object> get_;
+ Handle<Object> set_;
+ Handle<Object> name_;
+
+ // Some compilers (Xcode 5.1, ARM GCC 4.9) insist on having a copy
+ // constructor for std::vector<PropertyDescriptor>, so we can't
+ // DISALLOW_COPY_AND_ASSIGN(PropertyDescriptor); here.
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROPERTY_DESCRIPTOR_H_
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 33d3b8d7ef..7e5c78b8d9 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -11,23 +11,28 @@
// Ecma-262 3rd 8.6.1
enum PropertyAttributes {
- NONE = v8::None,
- READ_ONLY = v8::ReadOnly,
- DONT_ENUM = v8::DontEnum,
- DONT_DELETE = v8::DontDelete,
+ NONE = v8::None,
+ READ_ONLY = v8::ReadOnly,
+ DONT_ENUM = v8::DontEnum,
+ DONT_DELETE = v8::DontDelete,
- SEALED = DONT_DELETE,
- FROZEN = SEALED | READ_ONLY,
+ SEALED = DONT_DELETE,
+ FROZEN = SEALED | READ_ONLY,
- STRING = 8, // Used to filter symbols and string names
- SYMBOLIC = 16,
- PRIVATE_SYMBOL = 32,
+ STRING = 8, // Used to filter symbols and string names
+ SYMBOLIC = 16,
+ PRIVATE_SYMBOL = 32,
- DONT_SHOW = DONT_ENUM | SYMBOLIC | PRIVATE_SYMBOL,
- ABSENT = 64 // Used in runtime to indicate a property is absent.
+ DONT_SHOW = DONT_ENUM | SYMBOLIC | PRIVATE_SYMBOL,
+ ABSENT = 64, // Used in runtime to indicate a property is absent.
// ABSENT can never be stored in or returned from a descriptor's attributes
// bitfield. It is only used as a return value meaning the attributes of
// a non-existent property.
+
+ // When creating a property, EVAL_DECLARED used to indicate that the property
+ // came from a sloppy-mode direct eval, and certain checks need to be done.
+ // Cannot be stored in or returned from a descriptor's attributes bitfield.
+ EVAL_DECLARED = 128
};
@@ -371,6 +376,7 @@ class PropertyDetails BASE_EMBEDDED {
std::ostream& operator<<(std::ostream& os,
const PropertyAttributes& attributes);
std::ostream& operator<<(std::ostream& os, const PropertyDetails& details);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROPERTY_DETAILS_H_
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 09ec5f207f..b58c9c6acb 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -107,6 +107,7 @@ class AccessorConstantDescriptor final : public Descriptor {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROPERTY_H_
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 123a95711e..c6cff65635 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -214,6 +214,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index a48291a421..0dc519580d 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -289,6 +289,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/regexp/bytecodes-irregexp.h b/deps/v8/src/regexp/bytecodes-irregexp.h
index 27691422f3..d6110a3cb5 100644
--- a/deps/v8/src/regexp/bytecodes-irregexp.h
+++ b/deps/v8/src/regexp/bytecodes-irregexp.h
@@ -76,6 +76,8 @@ BYTECODE_ITERATOR(DECLARE_BYTECODES)
static const int BC_##name##_LENGTH = length;
BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
#undef DECLARE_BYTECODE_LENGTH
-} }
+
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_BYTECODES_IRREGEXP_H_
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 8ec0a9e543..06b9699d01 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -196,6 +196,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
};
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
diff --git a/deps/v8/src/regexp/interpreter-irregexp.h b/deps/v8/src/regexp/interpreter-irregexp.h
index d97d3b0f17..244af99091 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.h
+++ b/deps/v8/src/regexp/interpreter-irregexp.h
@@ -23,6 +23,7 @@ class IrregexpInterpreter {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_INTERPRETER_IRREGEXP_H_
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
index 118f3dba9c..3eb7c3c170 100644
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ b/deps/v8/src/regexp/jsregexp-inl.h
@@ -78,6 +78,7 @@ int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_JSREGEXP_INL_H_
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 438d1b1368..225ad73c4e 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -6410,7 +6410,9 @@ bool RegExpEngine::TooMuchRegExpCode(Handle<String> pattern) {
Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
- Object* key_pattern, ResultsCacheType type) {
+ Object* key_pattern,
+ FixedArray** last_match_cache,
+ ResultsCacheType type) {
FixedArray* cache;
if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
if (type == STRING_SPLIT_SUBSTRINGS) {
@@ -6426,23 +6428,25 @@ Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
uint32_t hash = key_string->Hash();
uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == key_string &&
- cache->get(index + kPatternOffset) == key_pattern) {
- return cache->get(index + kArrayOffset);
- }
- index =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index + kStringOffset) == key_string &&
- cache->get(index + kPatternOffset) == key_pattern) {
- return cache->get(index + kArrayOffset);
+ if (cache->get(index + kStringOffset) != key_string ||
+ cache->get(index + kPatternOffset) != key_pattern) {
+ index =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache->get(index + kStringOffset) != key_string ||
+ cache->get(index + kPatternOffset) != key_pattern) {
+ return Smi::FromInt(0);
+ }
}
- return Smi::FromInt(0);
+
+ *last_match_cache = FixedArray::cast(cache->get(index + kLastMatchOffset));
+ return cache->get(index + kArrayOffset);
}
void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
Handle<Object> key_pattern,
Handle<FixedArray> value_array,
+ Handle<FixedArray> last_match_cache,
ResultsCacheType type) {
Factory* factory = isolate->factory();
Handle<FixedArray> cache;
@@ -6464,6 +6468,7 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
cache->set(index + kStringOffset, *key_string);
cache->set(index + kPatternOffset, *key_pattern);
cache->set(index + kArrayOffset, *value_array);
+ cache->set(index + kLastMatchOffset, *last_match_cache);
} else {
uint32_t index2 =
((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
@@ -6471,13 +6476,16 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
cache->set(index2 + kStringOffset, *key_string);
cache->set(index2 + kPatternOffset, *key_pattern);
cache->set(index2 + kArrayOffset, *value_array);
+ cache->set(index2 + kLastMatchOffset, *last_match_cache);
} else {
cache->set(index2 + kStringOffset, Smi::FromInt(0));
cache->set(index2 + kPatternOffset, Smi::FromInt(0));
cache->set(index2 + kArrayOffset, Smi::FromInt(0));
+ cache->set(index2 + kLastMatchOffset, Smi::FromInt(0));
cache->set(index + kStringOffset, *key_string);
cache->set(index + kPatternOffset, *key_pattern);
cache->set(index + kArrayOffset, *value_array);
+ cache->set(index + kLastMatchOffset, *last_match_cache);
}
}
// If the array is a reasonably short list of substrings, convert it into a
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 760d37862b..537bdff8e2 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -1666,12 +1666,12 @@ class RegExpResultsCache : public AllStatic {
// Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
// On success, the returned result is guaranteed to be a COW-array.
static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
- ResultsCacheType type);
+ FixedArray** last_match_out, ResultsCacheType type);
// Attempt to add value_array to the cache specified by type. On success,
// value_array is turned into a COW-array.
static void Enter(Isolate* isolate, Handle<String> key_string,
Handle<Object> key_pattern, Handle<FixedArray> value_array,
- ResultsCacheType type);
+ Handle<FixedArray> last_match_cache, ResultsCacheType type);
static void Clear(FixedArray* cache);
static const int kRegExpResultsCacheSize = 0x100;
@@ -1680,6 +1680,7 @@ class RegExpResultsCache : public AllStatic {
static const int kStringOffset = 0;
static const int kPatternOffset = 1;
static const int kArrayOffset = 2;
+ static const int kLastMatchOffset = 3;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 36fd4b1564..da59546a79 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -220,6 +220,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 84c85affbe..265bf773eb 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -262,6 +262,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 2dd339eb8d..04a0e5e416 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -208,7 +208,7 @@ const RegList kRegExpCalleeSaved =
1 << 25 | 1 << 26 | 1 << 27 | 1 << 28 | 1 << 29 | 1 << 30 | 1 << 31;
#endif // V8_INTERPRETED_REGEXP
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
index b86d28dfb9..6f176cd12c 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
@@ -56,6 +56,7 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
#endif // V8_INTERPRETED_REGEXP
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
index 556d78d23d..bbfe5203d9 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
@@ -125,6 +125,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
index d4092ceaad..f9364195fa 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
@@ -81,6 +81,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
RegExpMacroAssembler* assembler_;
};
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_TRACER_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index c3d94a6acf..ea97d5b29b 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -245,6 +245,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index 9a6394e198..aea46cf673 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -124,6 +124,7 @@ class RegExpStack {
DISALLOW_COPY_AND_ASSIGN(RegExpStack);
};
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_STACK_H_
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index d690dc1974..dbee9e86b5 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -276,6 +276,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
index f636ca08ce..0deea50357 100644
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
@@ -196,6 +196,7 @@ class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
};
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
new file mode 100644
index 0000000000..f7a8eaba79
--- /dev/null
+++ b/deps/v8/src/register-configuration.cc
@@ -0,0 +1,168 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/register-configuration.h"
+#include "src/globals.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+#define REGISTER_COUNT(R) 1 +
+static const int kMaxAllocatableGeneralRegisterCount =
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT)0;
+static const int kMaxAllocatableDoubleRegisterCount =
+ ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT)0;
+
+static const char* const kGeneralRegisterNames[] = {
+#define REGISTER_NAME(R) #R,
+ GENERAL_REGISTERS(REGISTER_NAME)
+#undef REGISTER_NAME
+};
+
+static const char* const kDoubleRegisterNames[] = {
+#define REGISTER_NAME(R) #R,
+ DOUBLE_REGISTERS(REGISTER_NAME)
+#undef REGISTER_NAME
+};
+
+STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
+ Register::kNumRegisters);
+STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
+ DoubleRegister::kMaxNumRegisters);
+
+class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
+ public:
+ explicit ArchDefaultRegisterConfiguration(CompilerSelector compiler)
+ : RegisterConfiguration(
+ Register::kNumRegisters, DoubleRegister::kMaxNumRegisters,
+#if V8_TARGET_ARCH_IA32
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_X87
+ kMaxAllocatableGeneralRegisterCount,
+ compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
+ compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_X64
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_ARM
+ FLAG_enable_embedded_constant_pool
+ ? (kMaxAllocatableGeneralRegisterCount - 1)
+ : kMaxAllocatableGeneralRegisterCount,
+ CpuFeatures::IsSupported(VFP32DREGS)
+ ? kMaxAllocatableDoubleRegisterCount
+ : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT)0),
+ ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT)0,
+#elif V8_TARGET_ARCH_ARM64
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_MIPS
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_MIPS64
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_PPC
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#else
+ GetAllocatableGeneralRegisterCount(),
+ GetAllocatableDoubleRegisterCount(),
+ GetAllocatableAliasedDoubleRegisterCount(),
+#endif
+ GetAllocatableGeneralCodes(), GetAllocatableDoubleCodes(),
+ kGeneralRegisterNames, kDoubleRegisterNames) {
+ }
+
+ const char* general_register_name_table_[Register::kNumRegisters];
+ const char* double_register_name_table_[DoubleRegister::kMaxNumRegisters];
+
+ private:
+ static const int* GetAllocatableGeneralCodes() {
+#define REGISTER_CODE(R) Register::kCode_##R,
+ static const int general_codes[] = {
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_CODE)};
+#undef REGISTER_CODE
+ return general_codes;
+ }
+
+ static const int* GetAllocatableDoubleCodes() {
+#define REGISTER_CODE(R) DoubleRegister::kCode_##R,
+ static const int double_codes[] = {
+ ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_CODE)};
+#undef REGISTER_CODE
+ return double_codes;
+ }
+};
+
+
+template <RegisterConfiguration::CompilerSelector compiler>
+struct RegisterConfigurationInitializer {
+ static void Construct(ArchDefaultRegisterConfiguration* config) {
+ new (config) ArchDefaultRegisterConfiguration(compiler);
+ }
+};
+
+static base::LazyInstance<
+ ArchDefaultRegisterConfiguration,
+ RegisterConfigurationInitializer<RegisterConfiguration::CRANKSHAFT>>::type
+ kDefaultRegisterConfigurationForCrankshaft = LAZY_INSTANCE_INITIALIZER;
+
+
+static base::LazyInstance<
+ ArchDefaultRegisterConfiguration,
+ RegisterConfigurationInitializer<RegisterConfiguration::TURBOFAN>>::type
+ kDefaultRegisterConfigurationForTurboFan = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+
+const RegisterConfiguration* RegisterConfiguration::ArchDefault(
+ CompilerSelector compiler) {
+ return compiler == TURBOFAN
+ ? &kDefaultRegisterConfigurationForTurboFan.Get()
+ : &kDefaultRegisterConfigurationForCrankshaft.Get();
+}
+
+
+RegisterConfiguration::RegisterConfiguration(
+ int num_general_registers, int num_double_registers,
+ int num_allocatable_general_registers, int num_allocatable_double_registers,
+ int num_allocatable_aliased_double_registers,
+ const int* allocatable_general_codes, const int* allocatable_double_codes,
+ const char* const* general_register_names,
+ const char* const* double_register_names)
+ : num_general_registers_(num_general_registers),
+ num_double_registers_(num_double_registers),
+ num_allocatable_general_registers_(num_allocatable_general_registers),
+ num_allocatable_double_registers_(num_allocatable_double_registers),
+ num_allocatable_aliased_double_registers_(
+ num_allocatable_aliased_double_registers),
+ allocatable_general_codes_mask_(0),
+ allocatable_double_codes_mask_(0),
+ allocatable_general_codes_(allocatable_general_codes),
+ allocatable_double_codes_(allocatable_double_codes),
+ general_register_names_(general_register_names),
+ double_register_names_(double_register_names) {
+ for (int i = 0; i < num_allocatable_general_registers_; ++i) {
+ allocatable_general_codes_mask_ |= (1 << allocatable_general_codes_[i]);
+ }
+ for (int i = 0; i < num_allocatable_double_registers_; ++i) {
+ allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
+ }
+}
+
+#undef REGISTER_COUNT
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
new file mode 100644
index 0000000000..8ad1d78304
--- /dev/null
+++ b/deps/v8/src/register-configuration.h
@@ -0,0 +1,95 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
+#define V8_COMPILER_REGISTER_CONFIGURATION_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// An architecture independent representation of the sets of registers available
+// for instruction creation.
+class RegisterConfiguration {
+ public:
+ // Define the optimized compiler selector for register configuration
+ // selection.
+ //
+ // TODO(X87): This distinction in RegisterConfigurations is temporary
+ // until x87 TF supports all of the registers that Crankshaft does.
+ enum CompilerSelector { CRANKSHAFT, TURBOFAN };
+
+ // Architecture independent maxes.
+ static const int kMaxGeneralRegisters = 32;
+ static const int kMaxDoubleRegisters = 32;
+
+ static const RegisterConfiguration* ArchDefault(CompilerSelector compiler);
+
+ RegisterConfiguration(int num_general_registers, int num_double_registers,
+ int num_allocatable_general_registers,
+ int num_allocatable_double_registers,
+ int num_allocatable_aliased_double_registers,
+ const int* allocatable_general_codes,
+ const int* allocatable_double_codes,
+ char const* const* general_names,
+ char const* const* double_names);
+
+ int num_general_registers() const { return num_general_registers_; }
+ int num_double_registers() const { return num_double_registers_; }
+ int num_allocatable_general_registers() const {
+ return num_allocatable_general_registers_;
+ }
+ int num_allocatable_double_registers() const {
+ return num_allocatable_double_registers_;
+ }
+ // TODO(turbofan): This is a temporary work-around required because our
+ // register allocator does not yet support the aliasing of single/double
+ // registers on ARM.
+ int num_allocatable_aliased_double_registers() const {
+ return num_allocatable_aliased_double_registers_;
+ }
+ int32_t allocatable_general_codes_mask() const {
+ return allocatable_general_codes_mask_;
+ }
+ int32_t allocatable_double_codes_mask() const {
+ return allocatable_double_codes_mask_;
+ }
+ int GetAllocatableGeneralCode(int index) const {
+ return allocatable_general_codes_[index];
+ }
+ int GetAllocatableDoubleCode(int index) const {
+ return allocatable_double_codes_[index];
+ }
+ const char* GetGeneralRegisterName(int code) const {
+ return general_register_names_[code];
+ }
+ const char* GetDoubleRegisterName(int code) const {
+ return double_register_names_[code];
+ }
+ const int* allocatable_general_codes() const {
+ return allocatable_general_codes_;
+ }
+ const int* allocatable_double_codes() const {
+ return allocatable_double_codes_;
+ }
+
+ private:
+ const int num_general_registers_;
+ const int num_double_registers_;
+ int num_allocatable_general_registers_;
+ int num_allocatable_double_registers_;
+ int num_allocatable_aliased_double_registers_;
+ int32_t allocatable_general_codes_mask_;
+ int32_t allocatable_double_codes_mask_;
+ const int* allocatable_general_codes_;
+ const int* allocatable_double_codes_;
+ char const* const* general_register_names_;
+ char const* const* double_register_names_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_REGISTER_CONFIGURATION_H_
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index d88e1199f8..1f19739331 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -13,23 +13,49 @@ namespace internal {
class Processor: public AstVisitor {
public:
- Processor(Isolate* isolate, Variable* result,
+ Processor(Isolate* isolate, Scope* scope, Variable* result,
AstValueFactory* ast_value_factory)
: result_(result),
result_assigned_(false),
+ replacement_(nullptr),
is_set_(false),
- in_try_(false),
+ zone_(ast_value_factory->zone()),
+ scope_(scope),
factory_(ast_value_factory) {
- InitializeAstVisitor(isolate, ast_value_factory->zone());
+ InitializeAstVisitor(isolate);
}
- virtual ~Processor() { }
+ Processor(Parser* parser, Scope* scope, Variable* result,
+ AstValueFactory* ast_value_factory)
+ : result_(result),
+ result_assigned_(false),
+ replacement_(nullptr),
+ is_set_(false),
+ scope_(scope),
+ factory_(ast_value_factory) {
+ InitializeAstVisitor(parser->stack_limit());
+ }
+
+ ~Processor() override {}
void Process(ZoneList<Statement*>* statements);
bool result_assigned() const { return result_assigned_; }
+ Zone* zone() { return zone_; }
+ Scope* scope() { return scope_; }
AstNodeFactory* factory() { return &factory_; }
+ // Returns ".result = value"
+ Expression* SetResult(Expression* value) {
+ result_assigned_ = true;
+ VariableProxy* result_proxy = factory()->NewVariableProxy(result_);
+ return factory()->NewAssignment(Token::ASSIGN, result_proxy, value,
+ RelocInfo::kNoPosition);
+ }
+
+ // Inserts '.result = undefined' in front of the given statement.
+ Statement* AssignUndefinedBefore(Statement* s);
+
private:
Variable* result_;
@@ -39,24 +65,22 @@ class Processor: public AstVisitor {
// there was ever an assignment to result_.
bool result_assigned_;
+ // When visiting a node, we "return" a replacement for that node in
+ // [replacement_]. In many cases this will just be the original node.
+ Statement* replacement_;
+
// To avoid storing to .result all the time, we eliminate some of
// the stores by keeping track of whether or not we're sure .result
// will be overwritten anyway. This is a bit more tricky than what I
- // was hoping for
+ // was hoping for.
bool is_set_;
- bool in_try_;
+ Zone* zone_;
+ Scope* scope_;
AstNodeFactory factory_;
- Expression* SetResult(Expression* value) {
- result_assigned_ = true;
- VariableProxy* result_proxy = factory()->NewVariableProxy(result_);
- return factory()->NewAssignment(
- Token::ASSIGN, result_proxy, value, RelocInfo::kNoPosition);
- }
-
// Node visitors.
-#define DEF_VISIT(type) virtual void Visit##type(type* node) override;
+#define DEF_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
@@ -66,9 +90,24 @@ class Processor: public AstVisitor {
};
+Statement* Processor::AssignUndefinedBefore(Statement* s) {
+ Expression* result_proxy = factory()->NewVariableProxy(result_);
+ Expression* undef = factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, undef, RelocInfo::kNoPosition);
+ Block* b = factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
+ b->statements()->Add(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
+ b->statements()->Add(s, zone());
+ return b;
+}
+
+
void Processor::Process(ZoneList<Statement*>* statements) {
for (int i = statements->length() - 1; i >= 0; --i) {
Visit(statements->at(i));
+ statements->Set(i, replacement_);
}
}
@@ -83,34 +122,52 @@ void Processor::VisitBlock(Block* node) {
// returns 'undefined'. To obtain the same behavior with v8, we need
// to prevent rewriting in that case.
if (!node->ignore_completion_value()) Process(node->statements());
+ replacement_ = node;
}
void Processor::VisitExpressionStatement(ExpressionStatement* node) {
// Rewrite : <x>; -> .result = <x>;
- if (!is_set_ && !node->expression()->IsThrow()) {
+ if (!is_set_) {
node->set_expression(SetResult(node->expression()));
- if (!in_try_) is_set_ = true;
+ is_set_ = true;
}
+ replacement_ = node;
}
void Processor::VisitIfStatement(IfStatement* node) {
- // Rewrite both then and else parts (reversed).
- bool save = is_set_;
- Visit(node->else_statement());
- bool set_after_then = is_set_;
- is_set_ = save;
+ // Rewrite both branches.
+ bool set_after = is_set_;
Visit(node->then_statement());
- is_set_ = is_set_ && set_after_then;
+ node->set_then_statement(replacement_);
+ bool set_in_then = is_set_;
+ is_set_ = set_after;
+ Visit(node->else_statement());
+ node->set_else_statement(replacement_);
+ is_set_ = is_set_ && set_in_then;
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
}
void Processor::VisitIterationStatement(IterationStatement* node) {
// Rewrite the body.
- bool set_after_loop = is_set_;
+ bool set_after = is_set_;
+ is_set_ = false; // We are in a loop, so we can't rely on [set_after].
Visit(node->body());
- is_set_ = is_set_ && set_after_loop;
+ node->set_body(replacement_);
+ is_set_ = is_set_ && set_after;
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
}
@@ -140,79 +197,141 @@ void Processor::VisitForOfStatement(ForOfStatement* node) {
void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
- // Rewrite both try and catch blocks (reversed order).
- bool set_after_catch = is_set_;
- Visit(node->catch_block());
- is_set_ = is_set_ && set_after_catch;
- bool save = in_try_;
- in_try_ = true;
+ // Rewrite both try and catch block.
+ bool set_after = is_set_;
Visit(node->try_block());
- in_try_ = save;
+ node->set_try_block(static_cast<Block*>(replacement_));
+ bool set_in_try = is_set_;
+ is_set_ = set_after;
+ Visit(node->catch_block());
+ node->set_catch_block(static_cast<Block*>(replacement_));
+ is_set_ = is_set_ && set_in_try;
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
}
void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
- // Rewrite both try and finally block (reversed order).
+ // Rewrite both try and finally block (in reverse order).
+ bool set_after = is_set_;
+ is_set_ = true; // Don't normally need to assign in finally block.
Visit(node->finally_block());
- bool save = in_try_;
- in_try_ = true;
+ node->set_finally_block(replacement_->AsBlock());
+ { // Save .result value at the beginning of the finally block and restore it
+ // at the end again: ".backup = .result; ...; .result = .backup"
+ // This is necessary because the finally block does not normally contribute
+ // to the completion value.
+ Variable* backup = scope()->NewTemporary(
+ factory()->ast_value_factory()->dot_result_string());
+ Expression* backup_proxy = factory()->NewVariableProxy(backup);
+ Expression* result_proxy = factory()->NewVariableProxy(result_);
+ Expression* save = factory()->NewAssignment(
+ Token::ASSIGN, backup_proxy, result_proxy, RelocInfo::kNoPosition);
+ Expression* restore = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, backup_proxy, RelocInfo::kNoPosition);
+ node->finally_block()->statements()->InsertAt(
+ 0, factory()->NewExpressionStatement(save, RelocInfo::kNoPosition),
+ zone());
+ node->finally_block()->statements()->Add(
+ factory()->NewExpressionStatement(restore, RelocInfo::kNoPosition),
+ zone());
+ }
+ is_set_ = set_after;
Visit(node->try_block());
- in_try_ = save;
+ node->set_try_block(replacement_->AsBlock());
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
}
void Processor::VisitSwitchStatement(SwitchStatement* node) {
- // Rewrite statements in all case clauses in reversed order.
+ // Rewrite statements in all case clauses (in reverse order).
ZoneList<CaseClause*>* clauses = node->cases();
- bool set_after_switch = is_set_;
+ bool set_after = is_set_;
for (int i = clauses->length() - 1; i >= 0; --i) {
CaseClause* clause = clauses->at(i);
Process(clause->statements());
}
- is_set_ = is_set_ && set_after_switch;
+ is_set_ = is_set_ && set_after;
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
}
void Processor::VisitContinueStatement(ContinueStatement* node) {
is_set_ = false;
+ replacement_ = node;
}
void Processor::VisitBreakStatement(BreakStatement* node) {
is_set_ = false;
+ replacement_ = node;
}
void Processor::VisitWithStatement(WithStatement* node) {
- bool set_after_body = is_set_;
Visit(node->statement());
- is_set_ = is_set_ && set_after_body;
+ node->set_statement(replacement_);
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
}
void Processor::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* node) {
Visit(node->statement());
+ node->set_statement(replacement_);
+ replacement_ = node;
}
-// Do nothing:
-void Processor::VisitVariableDeclaration(VariableDeclaration* node) {}
-void Processor::VisitFunctionDeclaration(FunctionDeclaration* node) {}
-void Processor::VisitImportDeclaration(ImportDeclaration* node) {}
-void Processor::VisitExportDeclaration(ExportDeclaration* node) {}
-void Processor::VisitEmptyStatement(EmptyStatement* node) {}
-void Processor::VisitReturnStatement(ReturnStatement* node) {}
-void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
+void Processor::VisitEmptyStatement(EmptyStatement* node) {
+ replacement_ = node;
+}
+
+
+void Processor::VisitReturnStatement(ReturnStatement* node) {
+ is_set_ = true;
+ replacement_ = node;
+}
+
+
+void Processor::VisitDebuggerStatement(DebuggerStatement* node) {
+ replacement_ = node;
+}
-// Expressions are never visited yet.
+// Expressions are never visited.
#define DEF_VISIT(type) \
void Processor::Visit##type(type* expr) { UNREACHABLE(); }
EXPRESSION_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
+// Declarations are never visited.
+#define DEF_VISIT(type) \
+ void Processor::Visit##type(type* expr) { UNREACHABLE(); }
+DECLARATION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+
// Assumes code has been parsed. Mutates the AST, so the AST should not
// continue to be used in the case of failure.
bool Rewriter::Rewrite(ParseInfo* info) {
@@ -228,7 +347,8 @@ bool Rewriter::Rewrite(ParseInfo* info) {
scope->NewTemporary(info->ast_value_factory()->dot_result_string());
// The name string must be internalized at this point.
DCHECK(!result->name().is_null());
- Processor processor(info->isolate(), result, info->ast_value_factory());
+ Processor processor(info->isolate(), scope, result,
+ info->ast_value_factory());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
@@ -253,5 +373,31 @@ bool Rewriter::Rewrite(ParseInfo* info) {
}
+bool Rewriter::Rewrite(Parser* parser, DoExpression* expr,
+ AstValueFactory* factory) {
+ Block* block = expr->block();
+ Scope* scope = block->scope();
+ ZoneList<Statement*>* body = block->statements();
+ VariableProxy* result = expr->result();
+ Variable* result_var = result->var();
+
+ if (!body->is_empty()) {
+ Processor processor(parser, scope, result_var, factory);
+ processor.Process(body);
+ if (processor.HasStackOverflow()) return false;
+
+ if (!processor.result_assigned()) {
+ AstNodeFactory* node_factory = processor.factory();
+ Expression* undef =
+ node_factory->NewUndefinedLiteral(RelocInfo::kNoPosition);
+ Statement* completion = node_factory->NewExpressionStatement(
+ processor.SetResult(undef), expr->position());
+ body->Add(completion, factory->zone());
+ }
+ }
+ return true;
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/rewriter.h b/deps/v8/src/rewriter.h
index b283a55ce0..fdb36d1d3e 100644
--- a/deps/v8/src/rewriter.h
+++ b/deps/v8/src/rewriter.h
@@ -8,7 +8,10 @@
namespace v8 {
namespace internal {
+class AstValueFactory;
+class DoExpression;
class ParseInfo;
+class Parser;
class Rewriter {
public:
@@ -19,9 +22,15 @@ class Rewriter {
// Assumes code has been parsed and scopes have been analyzed. Mutates the
// AST, so the AST should not continue to be used in the case of failure.
static bool Rewrite(ParseInfo* info);
+
+ // Rewrite a list of statements, using the same rules as a top-level program,
+ // to ensure identical behaviour of completion result.
+ static bool Rewrite(Parser* parser, DoExpression* expr,
+ AstValueFactory* factory);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REWRITER_H_
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 6b942d44a6..d2edd1b2b7 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -108,7 +108,7 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
int loop_nesting_levels) {
SharedFunctionInfo* shared = function->shared();
- if (!FLAG_use_osr || function->IsBuiltin()) {
+ if (!FLAG_use_osr || function->shared()->IsBuiltin()) {
return;
}
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index eff443d926..0d57929d06 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -39,6 +39,7 @@ class RuntimeProfiler {
bool any_ic_changed_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_RUNTIME_PROFILER_H_
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 6fc1ad4ea1..67eaa4b632 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -9,6 +9,7 @@
#include "src/elements.h"
#include "src/factory.h"
#include "src/isolate-inl.h"
+#include "src/key-accumulator.h"
#include "src/messages.h"
#include "src/prototype.h"
@@ -206,6 +207,7 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
}
KeyAccumulator accumulator(isolate);
+ // No need to separate protoype levels since we only get numbers/element keys
for (PrototypeIterator iter(isolate, array,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
@@ -216,16 +218,14 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
// collecting keys in that case.
return *isolate->factory()->NewNumberFromUint(length);
}
+ accumulator.NextPrototype();
Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
- Handle<FixedArray> current_keys =
- isolate->factory()->NewFixedArray(current->NumberOfOwnElements(NONE));
- current->GetOwnElementKeys(*current_keys, NONE);
- accumulator.AddKeys(current_keys, FixedArray::ALL_KEYS);
+ JSObject::CollectOwnElementKeys(current, &accumulator, NONE);
}
// Erase any keys >= length.
// TODO(adamk): Remove this step when the contract of %GetArrayKeys
// is changed to let this happen on the JS side.
- Handle<FixedArray> keys = accumulator.GetKeys();
+ Handle<FixedArray> keys = accumulator.GetKeys(KEEP_NUMBERS);
for (int i = 0; i < keys->length(); i++) {
if (NumberToUint32(keys->get(i)) >= length) keys->set_undefined(i);
}
@@ -253,7 +253,7 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
can_use_type_feedback = false;
} else if (value != 0) {
holey = true;
- if (value >= JSObject::kInitialMaxFastElementArray) {
+ if (value >= JSArray::kInitialMaxFastElementArray) {
can_inline_array_constructor = false;
}
}
@@ -321,8 +321,9 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
if (original_constructor->has_instance_prototype()) {
Handle<Object> prototype =
handle(original_constructor->instance_prototype(), isolate);
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetPrototype(array, prototype, false));
+ MAYBE_RETURN(JSObject::SetPrototype(array, prototype, false,
+ Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
}
}
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 84eab2ce11..636371c134 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -721,5 +721,5 @@ RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
uint32_t usize = NumberToUint32(*size);
return isolate->heap()->ToBoolean(AtomicIsLockFree(usize));
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 51e682f325..ca5fecb0ab 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -123,6 +123,7 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
Handle<Map> map =
isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ map->set_is_prototype_map(true);
if (constructor->map()->is_strong()) {
map->set_is_strong();
if (super_class->IsNull()) {
@@ -162,9 +163,8 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
Object);
if (!constructor_parent.is_null()) {
- RETURN_ON_EXCEPTION(
- isolate, JSObject::SetPrototype(constructor, constructor_parent, false),
- Object);
+ MAYBE_RETURN_NULL(JSObject::SetPrototype(constructor, constructor_parent,
+ false, Object::THROW_ON_ERROR));
}
JSObject::AddProperty(prototype, isolate->factory()->constructor_string(),
@@ -224,7 +224,6 @@ RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, constructor, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, prototype, 1);
- JSObject::MigrateSlowToFast(prototype, 0, "RuntimeToFastProperties");
JSObject::MigrateSlowToFast(constructor, 0, "RuntimeToFastProperties");
if (constructor->map()->is_strong()) {
@@ -269,7 +268,8 @@ static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
Handle<JSObject> home_object,
Handle<Name> name,
LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
@@ -293,7 +293,8 @@ static MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
Handle<JSObject> home_object,
uint32_t index,
LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
@@ -369,7 +370,8 @@ RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
Handle<Object> receiver, Handle<Name> name,
Handle<Object> value, LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
}
@@ -379,12 +381,10 @@ static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::SetSuperProperty(&it, value, language_mode,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED));
- return *result;
+ MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED),
+ isolate->heap()->exception());
+ return *value;
}
@@ -393,7 +393,8 @@ static Object* StoreElementToSuper(Isolate* isolate,
Handle<Object> receiver, uint32_t index,
Handle<Object> value,
LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
}
@@ -403,12 +404,10 @@ static Object* StoreElementToSuper(Isolate* isolate,
if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::SetSuperProperty(&it, value, language_mode,
- Object::MAY_BE_STORE_FROM_KEYED));
- return *result;
+ MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
+ Object::MAY_BE_STORE_FROM_KEYED),
+ isolate->heap()->exception());
+ return *value;
}
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 9f49e4d5d2..27216fb323 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -156,29 +156,24 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
RUNTIME_ASSERT_HANDLIFIED(function->function_bindings()->IsFixedArray(),
JSArray);
- Handle<FixedArray> bindings(function->function_bindings());
+ Handle<BindingsArray> bindings(function->function_bindings());
Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
Handle<String> target =
factory->NewStringFromAsciiChecked("[[TargetFunction]]");
result->set(0, *target);
- result->set(1, bindings->get(JSFunction::kBoundFunctionIndex));
+ result->set(1, bindings->bound_function());
Handle<String> bound_this =
factory->NewStringFromAsciiChecked("[[BoundThis]]");
result->set(2, *bound_this);
- result->set(3, bindings->get(JSFunction::kBoundThisIndex));
+ result->set(3, bindings->bound_this());
- Handle<FixedArray> arguments = factory->NewFixedArray(
- bindings->length() - JSFunction::kBoundArgumentsStartIndex);
- bindings->CopyTo(
- JSFunction::kBoundArgumentsStartIndex, *arguments, 0,
- bindings->length() - JSFunction::kBoundArgumentsStartIndex);
Handle<String> bound_args =
factory->NewStringFromAsciiChecked("[[BoundArgs]]");
result->set(4, *bound_args);
Handle<JSArray> arguments_array =
- factory->NewJSArrayWithElements(arguments);
+ BindingsArray::CreateBoundArguments(bindings);
result->set(5, *arguments_array);
return factory->NewJSArrayWithElements(result);
}
@@ -456,7 +451,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
it.frame()->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
// Omit functions from native and extension scripts.
- if (frames[i].function()->IsSubjectToDebugging()) n++;
+ if (frames[i].function()->shared()->IsSubjectToDebugging()) n++;
}
}
return Smi::FromInt(n);
@@ -534,7 +529,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Get scope info and read from it for local variable information.
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
- RUNTIME_ASSERT(function->IsSubjectToDebugging());
+ RUNTIME_ASSERT(function->shared()->IsSubjectToDebugging());
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
DCHECK(*scope_info != ScopeInfo::Empty(isolate));
@@ -709,22 +704,19 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
}
// Add the receiver (same as in function frame).
- // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
- // THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
- DCHECK(!function->IsBuiltin());
+ DCHECK(!function->shared()->IsBuiltin());
if (!receiver->IsJSObject() && is_sloppy(shared->language_mode())) {
- // If the receiver is not a JSObject and the function is not a
- // builtin or strict-mode we have hit an optimization where a
- // value object is not converted into a wrapped JS objects. To
- // hide this optimization from the debugger, we wrap the receiver
- // by creating correct wrapper object based on the calling frame's
- // native context.
- it.Advance();
+ // If the receiver is not a JSObject and the function is not a builtin or
+ // strict-mode we have hit an optimization where a value object is not
+ // converted into a wrapped JS objects. To hide this optimization from the
+ // debugger, we wrap the receiver by creating correct wrapper object based
+ // on the function's native context.
+ // See ECMA-262 6.0, 9.2.1.2, 6 b iii.
if (receiver->IsUndefined()) {
receiver = handle(function->global_proxy());
} else {
- Context* context = Context::cast(it.frame()->context());
+ Context* context = function->context();
Handle<Context> native_context(Context::cast(context->native_context()));
if (!Object::ToObject(isolate, receiver, native_context)
.ToHandle(&receiver)) {
@@ -1070,11 +1062,11 @@ RUNTIME_FUNCTION(Runtime_GetThreadDetails) {
// Sets the disable break state
// args[0]: disable break state
-RUNTIME_FUNCTION(Runtime_SetDisableBreak) {
+RUNTIME_FUNCTION(Runtime_SetBreakPointsActive) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0);
- isolate->debug()->set_disable_break(disable_break);
+ CONVERT_BOOLEAN_ARG_CHECKED(active, 0);
+ isolate->debug()->set_break_points_active(active);
return isolate->heap()->undefined_value();
}
@@ -1457,7 +1449,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
HandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- return *Object::GetPrototypeSkipHiddenPrototypes(isolate, obj);
+ return *Object::GetPrototype(isolate, obj);
}
@@ -1620,33 +1612,56 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
}
+bool DebugStepInIsActive(Debug* debug) {
+ return debug->is_active() && debug->IsStepping() &&
+ debug->last_step_action() == StepIn;
+}
+
+
// Check whether debugger is about to step into the callback that is passed
-// to a built-in function such as Array.forEach.
+// to a built-in function such as Array.forEach. This check is done before
+// %DebugPrepareStepInIfStepping and is not strictly necessary. However, if it
+// returns false, we can skip %DebugPrepareStepInIfStepping, useful in loops.
RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
+ SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
- Debug* debug = isolate->debug();
- if (!debug->is_active() || !debug->IsStepping() ||
- debug->last_step_action() != StepIn) {
+ if (!DebugStepInIsActive(isolate->debug())) {
return isolate->heap()->false_value();
}
- CONVERT_ARG_CHECKED(Object, callback, 0);
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ RUNTIME_ASSERT(object->IsJSFunction() || object->IsJSGeneratorObject());
// We do not step into the callback if it's a builtin other than a bound,
// or not even a function.
- return isolate->heap()->ToBoolean(
- callback->IsJSFunction() &&
- (JSFunction::cast(callback)->IsSubjectToDebugging() ||
- JSFunction::cast(callback)->shared()->bound()));
+ JSFunction* fun;
+ if (object->IsJSFunction()) {
+ fun = JSFunction::cast(object);
+ } else {
+ fun = JSGeneratorObject::cast(object)->function();
+ }
+ return isolate->heap()->ToBoolean(fun->shared()->IsSubjectToDebugging() ||
+ fun->shared()->bound());
+}
+
+
+void FloodDebugSubjectWithOneShot(Debug* debug, Handle<JSFunction> function) {
+ if (function->shared()->IsSubjectToDebugging() ||
+ function->shared()->bound()) {
+ // When leaving the function, step out has been activated, but not performed
+ // if we do not leave the builtin. To be able to step into the function
+ // again, we need to clear the step out at this point.
+ debug->ClearStepOut();
+ debug->FloodWithOneShotGeneric(function);
+ }
}
// Set one shot breakpoints for the callback function that is passed to a
-// built-in function such as Array.forEach to enable stepping into the callback.
+// built-in function such as Array.forEach to enable stepping into the callback,
+// if we are indeed stepping and the callback is subject to debugging.
RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
DCHECK(args.length() == 1);
- RUNTIME_ASSERT(isolate->debug()->is_active());
-
Debug* debug = isolate->debug();
- if (!debug->IsStepping()) return isolate->heap()->undefined_value();
+ if (!DebugStepInIsActive(debug)) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
@@ -1658,21 +1673,23 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
fun = Handle<JSFunction>(
Handle<JSGeneratorObject>::cast(object)->function(), isolate);
}
- // When leaving the function, step out has been activated, but not performed
- // if we do not leave the builtin. To be able to step into the function
- // again, we need to clear the step out at this point.
- debug->ClearStepOut();
- debug->FloodWithOneShotGeneric(fun);
+
+ FloodDebugSubjectWithOneShot(debug, fun);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
- DCHECK(args.length() == 2);
+ DCHECK(args.length() == 3);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, handler, 2);
isolate->PushPromise(promise, function);
+ Debug* debug = isolate->debug();
+ if (handler->IsJSFunction() && DebugStepInIsActive(debug)) {
+ FloodDebugSubjectWithOneShot(debug, Handle<JSFunction>::cast(handler));
+ }
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 18a0865f27..16e6149e7c 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -47,12 +47,40 @@ RUNTIME_FUNCTION(Runtime_FunctionNameShouldPrintAsAnonymous) {
}
-RUNTIME_FUNCTION(Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
+RUNTIME_FUNCTION(Runtime_CompleteFunctionConstruction) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- f->shared()->set_name_should_print_as_anonymous(true);
- return isolate->heap()->undefined_value();
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, new_target, 2);
+ func->shared()->set_name_should_print_as_anonymous(true);
+
+ // If new.target is equal to |constructor| then the function |func| created
+ // is already correctly setup and nothing else should be done here.
+ // But if new.target is not equal to |constructor| then we are have a
+ // Function builtin subclassing case and therefore the function |func|
+ // has wrong initial map. To fix that we create a new function object with
+ // correct initial map.
+ if (new_target->IsUndefined() || *constructor == *new_target) {
+ return *func;
+ }
+
+ // Create a new JSFunction object with correct initial map.
+ HandleScope handle_scope(isolate);
+ Handle<JSFunction> original_constructor =
+ Handle<JSFunction>::cast(new_target);
+
+ DCHECK(constructor->has_initial_map());
+ Handle<Map> initial_map =
+ JSFunction::EnsureDerivedHasInitialMap(original_constructor, constructor);
+
+ Handle<SharedFunctionInfo> shared_info(func->shared(), isolate);
+ Handle<Context> context(func->context(), isolate);
+ Handle<JSFunction> result =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ initial_map, shared_info, context, NOT_TENURED);
+ DCHECK_EQ(func->IsConstructor(), result->IsConstructor());
+ return *result;
}
@@ -135,7 +163,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
CONVERT_ARG_CHECKED(String, name, 1);
- fun->SetInstanceClassName(name);
+ fun->shared()->set_instance_class_name(name);
return isolate->heap()->undefined_value();
}
@@ -357,7 +385,7 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, bindee, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, bindee, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, this_object, 2);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(new_length, 3);
@@ -378,30 +406,28 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
}
// Initialize array of bindings (function, this, and any existing arguments
// if the function was already bound).
- Handle<FixedArray> new_bindings;
- int i;
+ Handle<BindingsArray> new_bindings;
+ int out_index = 0;
+ Handle<TypeFeedbackVector> vector(
+ bound_function->shared()->feedback_vector());
if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) {
- Handle<FixedArray> old_bindings(
+ Handle<BindingsArray> old_bindings(
JSFunction::cast(*bindee)->function_bindings());
- RUNTIME_ASSERT(old_bindings->length() > JSFunction::kBoundFunctionIndex);
- new_bindings =
- isolate->factory()->NewFixedArray(old_bindings->length() + argc);
- bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex),
- isolate);
- i = 0;
- for (int n = old_bindings->length(); i < n; i++) {
- new_bindings->set(i, old_bindings->get(i));
+ RUNTIME_ASSERT(old_bindings->bindings_count() >= 0);
+ bindee = handle(old_bindings->bound_function(), isolate);
+ Handle<Object> old_bound_this(old_bindings->bound_this(), isolate);
+ new_bindings = BindingsArray::New(isolate, vector, bindee, old_bound_this,
+ old_bindings->bindings_count() + argc);
+ for (int n = old_bindings->bindings_count(); out_index < n; out_index++) {
+ new_bindings->set_binding(out_index, old_bindings->binding(out_index));
}
} else {
- int array_size = JSFunction::kBoundArgumentsStartIndex + argc;
- new_bindings = isolate->factory()->NewFixedArray(array_size);
- new_bindings->set(JSFunction::kBoundFunctionIndex, *bindee);
- new_bindings->set(JSFunction::kBoundThisIndex, *this_object);
- i = 2;
+ new_bindings =
+ BindingsArray::New(isolate, vector, bindee, this_object, argc);
}
// Copy arguments, skipping the first which is "this_arg".
- for (int j = 0; j < argc; j++, i++) {
- new_bindings->set(i, *arguments[j + 1]);
+ for (int j = 0; j < argc; j++, out_index++) {
+ new_bindings->set_binding(out_index, *arguments[j + 1]);
}
new_bindings->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
bound_function->set_function_bindings(*new_bindings);
@@ -444,9 +470,9 @@ RUNTIME_FUNCTION(Runtime_BoundFunctionGetBindings) {
if (callable->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
if (function->shared()->bound()) {
- RUNTIME_ASSERT(function->function_bindings()->IsFixedArray());
- Handle<FixedArray> bindings(function->function_bindings());
- return *isolate->factory()->NewJSArrayWithElements(bindings);
+ RUNTIME_ASSERT(function->function_bindings()->IsBindingsArray());
+ Handle<BindingsArray> bindings(function->function_bindings());
+ return *BindingsArray::CreateRuntimeBindings(bindings);
}
}
return isolate->heap()->undefined_value();
@@ -462,12 +488,10 @@ RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
// The argument is a bound function. Extract its bound arguments
// and callable.
- Handle<FixedArray> bound_args =
- Handle<FixedArray>(FixedArray::cast(function->function_bindings()));
- int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex;
- Handle<Object> bound_function(
- JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)),
- isolate);
+ Handle<BindingsArray> bound_args =
+ handle(BindingsArray::cast(function->function_bindings()));
+ int bound_argc = bound_args->bindings_count();
+ Handle<Object> bound_function(bound_args->bound_function(), isolate);
DCHECK(!bound_function->IsJSFunction() ||
!Handle<JSFunction>::cast(bound_function)->shared()->bound());
@@ -475,8 +499,7 @@ RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
base::SmartArrayPointer<Handle<Object>> param_data =
Runtime::GetCallerArguments(isolate, bound_argc, &total_argc);
for (int i = 0; i < bound_argc; i++) {
- param_data[i] = Handle<Object>(
- bound_args->get(JSFunction::kBoundArgumentsStartIndex + i), isolate);
+ param_data[i] = handle(bound_args->binding(i), isolate);
}
Handle<Object> result;
@@ -547,41 +570,22 @@ RUNTIME_FUNCTION(Runtime_GetOriginalConstructor) {
DCHECK(args.length() == 0);
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- return frame->IsConstructor() ? frame->GetOriginalConstructor()
- : isolate->heap()->undefined_value();
+ // Currently we don't inline [[Construct]] calls.
+ return frame->IsConstructor() && !frame->HasInlinedFrames()
+ ? frame->GetOriginalConstructor()
+ : isolate->heap()->undefined_value();
}
-// TODO(bmeurer): Kill %_CallFunction ASAP as it is almost never used
-// correctly because of the weird semantics underneath.
-RUNTIME_FUNCTION(Runtime_CallFunction) {
+// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
+RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
HandleScope scope(isolate);
- DCHECK(args.length() >= 2);
- int argc = args.length() - 2;
- CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1);
- Object* receiver = args[0];
-
- // If there are too many arguments, allocate argv via malloc.
- const int argv_small_size = 10;
- Handle<Object> argv_small_buffer[argv_small_size];
- base::SmartArrayPointer<Handle<Object>> argv_large_buffer;
- Handle<Object>* argv = argv_small_buffer;
- if (argc > argv_small_size) {
- argv = new Handle<Object>[argc];
- if (argv == NULL) return isolate->StackOverflow();
- argv_large_buffer = base::SmartArrayPointer<Handle<Object>>(argv);
- }
-
- for (int i = 0; i < argc; ++i) {
- argv[i] = Handle<Object>(args[1 + i], isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
+ if (receiver->IsNull() || receiver->IsUndefined()) {
+ return isolate->global_proxy();
}
-
- Handle<JSReceiver> hfun(fun);
- Handle<Object> hreceiver(receiver, isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, hfun, hreceiver, argc, argv));
- return *result;
+ return *Object::ToObject(isolate, receiver).ToHandleChecked();
}
@@ -608,5 +612,6 @@ RUNTIME_FUNCTION(Runtime_ThrowStrongModeTooFewArguments) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
NewTypeError(MessageTemplate::kStrongArity));
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index a96758d9f3..b2bad77c98 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -89,5 +89,5 @@ RUNTIME_FUNCTION(Runtime_AtomicsFutexNumWaitersForTesting) {
return FutexEmulation::NumWaitersForTesting(isolate, array_buffer, addr);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 90d5532af3..478a954b3e 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -307,6 +307,7 @@ RUNTIME_FUNCTION(Runtime_FormatMessageString) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
MessageTemplate::FormatMessage(template_index, arg0, arg1, arg2));
+ isolate->native_context()->IncrementErrorsThrown();
return *result;
}
@@ -318,6 +319,7 @@ RUNTIME_FUNCTION(Runtime_FormatMessageString) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0); \
Handle<String> result; \
CallSite call_site(isolate, call_site_obj); \
+ RUNTIME_ASSERT(call_site.IsValid()) \
return RETURN(call_site.NAME(), isolate); \
}
@@ -370,18 +372,6 @@ RUNTIME_FUNCTION(Runtime_IncrementStatsCounter) {
}
-RUNTIME_FUNCTION(Runtime_Likely) {
- DCHECK(args.length() == 1);
- return args[0];
-}
-
-
-RUNTIME_FUNCTION(Runtime_Unlikely) {
- DCHECK(args.length() == 1);
- return args[0];
-}
-
-
RUNTIME_FUNCTION(Runtime_HarmonyToString) {
// TODO(caitp): Delete this runtime method when removing --harmony-tostring
return isolate->heap()->ToBoolean(FLAG_harmony_tostring);
@@ -423,7 +413,7 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
? new ParseInfo(&zone, location.function())
: new ParseInfo(&zone, location.script()));
if (Parser::ParseStatic(info.get())) {
- CallPrinter printer(isolate, &zone);
+ CallPrinter printer(isolate);
const char* string = printer.Print(info->literal(), location.start_pos());
return isolate->factory()->NewStringFromAsciiChecked(string);
} else {
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index e0a171267f..ef86869ccc 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -96,7 +96,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterGreaterThanOrEqual) {
RUNTIME_FUNCTION(Runtime_InterpreterStrictEquals) {
- SealHandleScope scope(isolate);
+ SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(Object, x, 0);
CONVERT_ARG_CHECKED(Object, y, 1);
@@ -105,7 +105,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterStrictEquals) {
RUNTIME_FUNCTION(Runtime_InterpreterStrictNotEquals) {
- SealHandleScope scope(isolate);
+ SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(Object, x, 0);
CONVERT_ARG_CHECKED(Object, y, 1);
@@ -114,12 +114,86 @@ RUNTIME_FUNCTION(Runtime_InterpreterStrictNotEquals) {
RUNTIME_FUNCTION(Runtime_InterpreterToBoolean) {
- SealHandleScope scope(isolate);
+ SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, x, 0);
return isolate->heap()->ToBoolean(x->BooleanValue());
}
+RUNTIME_FUNCTION(Runtime_InterpreterLogicalNot) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, x, 0);
+ return isolate->heap()->ToBoolean(!x->BooleanValue());
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterTypeOf) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ return Object::cast(*Object::TypeOf(isolate, x));
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+ CONVERT_SMI_ARG_CHECKED(pretenured_flag, 1);
+ Handle<Context> context(isolate->context(), isolate);
+ return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, context, static_cast<PretenureFlag>(pretenured_flag));
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterForInPrepare) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, property_names, 1);
+
+ Handle<Object> cache_type = property_names;
+ Handle<Map> cache_type_map = handle(property_names->map(), isolate);
+ Handle<Map> receiver_map = handle(receiver->map(), isolate);
+
+ Handle<FixedArray> cache_array;
+ int cache_length;
+
+ if (cache_type_map.is_identical_to(isolate->factory()->meta_map())) {
+ int enum_length = cache_type_map->EnumLength();
+ DescriptorArray* descriptors = receiver_map->instance_descriptors();
+ if (enum_length > 0 && descriptors->HasEnumCache()) {
+ cache_array = handle(descriptors->GetEnumCache(), isolate);
+ cache_length = cache_array->length();
+ } else {
+ cache_array = isolate->factory()->empty_fixed_array();
+ cache_length = 0;
+ }
+ } else {
+ cache_array = Handle<FixedArray>::cast(cache_type);
+ cache_length = cache_array->length();
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ if (receiver_map->instance_type() <= LAST_JS_PROXY_TYPE) {
+ DCHECK_GE(receiver_map->instance_type(), LAST_JS_PROXY_TYPE);
+ // Zero indicates proxy
+ cache_type = Handle<Object>(Smi::FromInt(0), isolate);
+ } else {
+ // One entails slow check
+ cache_type = Handle<Object>(Smi::FromInt(1), isolate);
+ }
+ }
+
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(4);
+ result->set(0, *receiver);
+ result->set(1, *cache_array);
+ result->set(2, *cache_type);
+ result->set(3, Smi::FromInt(cache_length));
+ return *result;
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index 947ef2c29b..dd3405a44c 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -229,7 +229,14 @@ RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) {
CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
- return *LiveEdit::CompareStrings(s1, s2);
+ Handle<JSArray> result = LiveEdit::CompareStrings(s1, s2);
+ uint32_t array_length;
+ CHECK(result->length()->ToArrayLength(&array_length));
+ if (array_length > 0) {
+ isolate->debug()->feature_tracker()->Track(DebugFeatureTracker::kLiveEdit);
+ }
+
+ return *result;
}
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 504261679e..70c587d745 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -6,6 +6,7 @@
#include "src/arguments.h"
#include "src/assembler.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
#include "src/third_party/fdlibm/fdlibm.h"
@@ -67,8 +68,8 @@ RUNTIME_FUNCTION(Runtime_RemPiO2) {
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_ARG_CHECKED(JSTypedArray, result, 1);
RUNTIME_ASSERT(result->byte_length() == Smi::FromInt(2 * sizeof(double)));
- void* backing_store = JSArrayBuffer::cast(result->buffer())->backing_store();
- double* y = static_cast<double*>(backing_store);
+ FixedFloat64Array* array = FixedFloat64Array::cast(result->elements());
+ double* y = static_cast<double*>(array->DataPtr());
return Smi::FromInt(fdlibm::rempio2(x, y));
}
@@ -244,5 +245,20 @@ RUNTIME_FUNCTION(Runtime_IsMinusZero) {
HeapNumber* number = HeapNumber::cast(obj);
return isolate->heap()->ToBoolean(IsMinusZero(number->value()));
}
+
+
+RUNTIME_FUNCTION(Runtime_InitializeRNG) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ static const int kSize = 4;
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(kSize);
+ uint16_t seeds[kSize];
+ do {
+ isolate->random_number_generator()->NextBytes(seeds,
+ kSize * sizeof(*seeds));
+ } while (!(seeds[0] && seeds[1] && seeds[2] && seeds[3]));
+ for (int i = 0; i < kSize; i++) array->set(i, Smi::FromInt(seeds[i]));
+ return *isolate->factory()->NewJSArrayWithElements(array);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 177b3ff584..f976df951c 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -171,12 +172,13 @@ RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
}
+// TODO(bmeurer): Kill this runtime entry. Uses in date.js are wrong anyway.
RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
- double double_value = DoubleToInteger(number);
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, input, Object::ToNumber(input));
+ double double_value = DoubleToInteger(input->Number());
// Map both -0 and +0 to +0.
if (double_value == 0) double_value = 0;
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 4782a31430..a16e1295b9 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -26,26 +26,12 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
Object);
}
- // Check if the given key is an array index.
- uint32_t index = 0;
- if (key->ToArrayIndex(&index)) {
- return Object::GetElement(isolate, object, index, language_mode);
- }
+ bool success = false;
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, object, key, &success);
+ if (!success) return MaybeHandle<Object>();
- // Convert the key to a name - possibly by calling back into JavaScript.
- Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
- Object);
-
- // Check if the name is trivially convertible to an index and get
- // the element if so.
- // TODO(verwaest): Make sure GetProperty(LookupIterator*) can handle this, and
- // remove the special casing here.
- if (name->AsArrayIndex(&index)) {
- return Object::GetElement(isolate, object, index);
- } else {
- return Object::GetProperty(object, name, language_mode);
- }
+ return Object::GetProperty(&it, language_mode);
}
@@ -70,7 +56,7 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
DisallowHeapAllocation no_allocation;
Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
Handle<Name> key = Handle<Name>::cast(key_obj);
- if (receiver->IsGlobalObject()) {
+ if (receiver->IsJSGlobalObject()) {
// Attempt dictionary lookup.
GlobalDictionary* dictionary = receiver->global_dictionary();
int entry = dictionary->FindEntry(key);
@@ -135,17 +121,12 @@ MaybeHandle<Object> Runtime::DeleteObjectProperty(Isolate* isolate,
Handle<JSReceiver> receiver,
Handle<Object> key,
LanguageMode language_mode) {
- // Check if the given key is an array index.
- uint32_t index = 0;
- if (key->ToArrayIndex(&index)) {
- return JSReceiver::DeleteElement(receiver, index, language_mode);
- }
+ bool success = false;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, receiver, key, &success, LookupIterator::HIDDEN);
+ if (!success) return MaybeHandle<Object>();
- Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
- Object);
-
- return JSReceiver::DeletePropertyOrElement(receiver, name, language_mode);
+ return JSReceiver::DeleteProperty(&it, language_mode);
}
@@ -162,18 +143,14 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
}
// Check if the given key is an array index.
- uint32_t index = 0;
- if (key->ToArrayIndex(&index)) {
- return Object::SetElement(isolate, object, index, value, language_mode);
- }
+ bool success = false;
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, object, key, &success);
+ if (!success) return MaybeHandle<Object>();
- Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
- Object);
-
- LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
- return Object::SetProperty(&it, value, language_mode,
- Object::MAY_BE_STORE_FROM_KEYED);
+ MAYBE_RETURN_NULL(Object::SetProperty(&it, value, language_mode,
+ Object::MAY_BE_STORE_FROM_KEYED));
+ return value;
}
@@ -181,68 +158,31 @@ RUNTIME_FUNCTION(Runtime_GetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- // We don't expect access checks to be needed on JSProxy objects.
- DCHECK(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
- PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
- do {
- if (PrototypeIterator::GetCurrent(iter)->IsAccessCheckNeeded() &&
- !isolate->MayAccess(PrototypeIterator::GetCurrent<JSObject>(iter))) {
- return isolate->heap()->null_value();
- }
- iter.AdvanceIgnoringProxies();
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return *PrototypeIterator::GetCurrent(iter);
- }
- } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
- return *PrototypeIterator::GetCurrent(iter);
+ return *Object::GetPrototype(isolate, obj);
}
RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- DCHECK(!obj->IsAccessCheckNeeded());
- DCHECK(!obj->map()->is_observed());
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::SetPrototype(obj, prototype, false));
- return *result;
+ MAYBE_RETURN(
+ JSReceiver::SetPrototype(obj, prototype, false, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ return *obj;
}
RUNTIME_FUNCTION(Runtime_SetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- if (obj->IsAccessCheckNeeded() && !isolate->MayAccess(obj)) {
- isolate->ReportFailedAccessCheck(obj);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->undefined_value();
- }
- if (obj->map()->is_observed()) {
- Handle<Object> old_value =
- Object::GetPrototypeSkipHiddenPrototypes(isolate, obj);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::SetPrototype(obj, prototype, true));
-
- Handle<Object> new_value =
- Object::GetPrototypeSkipHiddenPrototypes(isolate, obj);
- if (!new_value->SameValue(*old_value)) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::EnqueueChangeRecord(
- obj, "setPrototype", isolate->factory()->proto_string(),
- old_value));
- }
- return *result;
- }
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::SetPrototype(obj, prototype, true));
- return *result;
+ MAYBE_RETURN(
+ JSReceiver::SetPrototype(obj, prototype, true, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ return *obj;
}
@@ -325,19 +265,18 @@ RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
RUNTIME_FUNCTION(Runtime_PreventExtensions) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::PreventExtensions(obj));
- return *result;
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
+ if (JSReceiver::PreventExtensions(obj, Object::THROW_ON_ERROR).IsNothing())
+ return isolate->heap()->exception();
+ return *obj;
}
RUNTIME_FUNCTION(Runtime_IsExtensible) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsExtensible());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ return isolate->heap()->ToBoolean(JSObject::IsExtensible(obj));
}
@@ -399,7 +338,8 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalViaContext) {
// Lookup the named property on the global object.
Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
- Handle<GlobalObject> global_object(script_context->global_object(), isolate);
+ Handle<JSGlobalObject> global_object(script_context->global_object(),
+ isolate);
LookupIterator it(global_object, name, LookupIterator::HIDDEN);
// Switch to fast mode only if there is a data property and it's not on
@@ -433,7 +373,8 @@ Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
// Lookup the named property on the global object.
Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
- Handle<GlobalObject> global_object(script_context->global_object(), isolate);
+ Handle<JSGlobalObject> global_object(script_context->global_object(),
+ isolate);
LookupIterator it(global_object, name, LookupIterator::HIDDEN);
// Switch to fast mode only if there is a data property and it's not on
@@ -449,12 +390,10 @@ Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
script_context->set(slot, isolate->heap()->empty_property_cell());
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::SetProperty(&it, value, language_mode,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED));
- return *result;
+ MAYBE_RETURN(Object::SetProperty(&it, value, language_mode,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED),
+ isolate->heap()->exception());
+ return *value;
}
} // namespace
@@ -691,6 +630,8 @@ static Object* HasOwnPropertyImplementation(Isolate* isolate,
->is_hidden_prototype()) {
// TODO(verwaest): The recursion is not necessary for keys that are array
// indices. Removing this.
+ // Casting to JSObject is fine because JSProxies are never used as
+ // hidden prototypes.
return HasOwnPropertyImplementation(
isolate, Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
key);
@@ -831,8 +772,9 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
for (PrototypeIterator iter(isolate, object,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
- Handle<JSObject> jsproto =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ // Casting to JSObject is fine because |object| is guaranteed to be one,
+ // and we'll only look at hidden prototypes which are never JSProxies.
+ Handle<JSObject> jsproto = PrototypeIterator::GetCurrent<JSObject>(iter);
total_property_count += jsproto->NumberOfOwnProperties(filter);
}
@@ -847,8 +789,9 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
for (PrototypeIterator iter(isolate, object,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
- Handle<JSObject> jsproto =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ // Casting to JSObject is fine because |object| is guaranteed to be one,
+ // and we'll only look at hidden prototypes which are never JSProxies.
+ Handle<JSObject> jsproto = PrototypeIterator::GetCurrent<JSObject>(iter);
int own = jsproto->GetOwnPropertyNames(*names, next_copy_index, filter);
// Names from hidden prototypes may already have been added
// for inherited function template instances. Count the duplicates
@@ -873,7 +816,8 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
CHECK_EQ(total_property_count, next_copy_index);
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
for (int i = 0; i < total_property_count; i++) {
Handle<Name> name(Name::cast(names->get(i)));
if (name.is_identical_to(hidden_string)) continue;
@@ -920,11 +864,25 @@ RUNTIME_FUNCTION(Runtime_GetOwnElementNames) {
if (!args[0]->IsJSObject()) {
return isolate->heap()->undefined_value();
}
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+
+ // TODO(cbruni): implement proper prototype lookup like in GetOwnPropertyNames
+ if (object->IsJSGlobalProxy()) {
+ // All the elements are stored on the globa_object and not directly on the
+ // global object proxy.
+ PrototypeIterator iter(isolate, object,
+ PrototypeIterator::START_AT_PROTOTYPE);
+ if (iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+ return *isolate->factory()->NewJSArray(0);
+ }
+ // Casting to JSObject is fine because |object| is guaranteed to be one,
+ // and we'll only look at hidden prototypes which are never JSProxies.
+ object = PrototypeIterator::GetCurrent<JSObject>(iter);
+ }
- int n = obj->NumberOfOwnElements(NONE);
+ int n = object->NumberOfOwnElements(NONE);
Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
- obj->GetOwnElementKeys(*names, NONE);
+ object->GetOwnElementKeys(*names, NONE);
return *isolate->factory()->NewJSArrayWithElements(names);
}
@@ -989,34 +947,9 @@ RUNTIME_FUNCTION(Runtime_OwnKeys) {
Handle<FixedArray> contents;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, contents, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY));
-
- // Some fast paths through GetKeysInFixedArrayFor reuse a cached
- // property array and since the result is mutable we have to create
- // a fresh clone on each invocation.
- int length = contents->length();
- Handle<FixedArray> copy = isolate->factory()->NewFixedArray(length);
- int offset = 0;
- // Use an outer loop to avoid creating too many handles in the current
- // handle scope.
- while (offset < length) {
- HandleScope scope(isolate);
- offset += 100;
- int end = Min(offset, length);
- for (int i = offset - 100; i < end; i++) {
- Object* entry = contents->get(i);
- if (entry->IsString()) {
- copy->set(i, entry);
- } else {
- DCHECK(entry->IsNumber());
- Handle<Object> entry_handle(entry, isolate);
- Handle<Object> entry_str =
- isolate->factory()->NumberToString(entry_handle);
- copy->set(i, *entry_str);
- }
- }
- }
- return *isolate->factory()->NewJSArrayWithElements(copy);
+ isolate, contents, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY,
+ SKIP_SYMBOLS, CONVERT_TO_STRING));
+ return *isolate->factory()->NewJSArrayWithElements(contents);
}
@@ -1024,7 +957,7 @@ RUNTIME_FUNCTION(Runtime_ToFastProperties) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- if (object->IsJSObject() && !object->IsGlobalObject()) {
+ if (object->IsJSObject() && !object->IsJSGlobalObject()) {
JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0,
"RuntimeToFastProperties");
}
@@ -1066,45 +999,23 @@ static Object* Runtime_NewObjectHelper(Isolate* isolate,
// Handle stepping into constructors if step into is active.
if (debug->StepInActive()) debug->HandleStepIn(function, true);
- if (function->has_initial_map()) {
- if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
- // The 'Function' function ignores the receiver object when
- // called using 'new' and creates a new JSFunction object that
- // is returned. The receiver object is only used for error
- // reporting if an error occurs when constructing the new
- // JSFunction. Factory::NewJSObject() should not be used to
- // allocate JSFunctions since it does not properly initialize
- // the shared part of the function. Since the receiver is
- // ignored anyway, we use the global object as the receiver
- // instead of a new JSFunction object. This way, errors are
- // reported the same way whether or not 'Function' is called
- // using 'new'.
- return isolate->global_proxy();
- }
- }
-
// The function should be compiled for the optimization hints to be
// available.
Compiler::Compile(function, CLEAR_EXCEPTION);
- Handle<JSObject> result;
- if (site.is_null()) {
- result = isolate->factory()->NewJSObject(function);
- } else {
- result = isolate->factory()->NewJSObjectWithMemento(function, site);
+ JSFunction::EnsureHasInitialMap(function);
+ if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
+ // The 'Function' function ignores the receiver object when
+ // called using 'new' and creates a new JSFunction object that
+ // is returned.
+ return isolate->heap()->undefined_value();
}
- // Set up the prototoype using original function.
- // TODO(dslomov): instead of setting the __proto__,
- // use and cache the correct map.
- if (*original_function != *function) {
- if (original_function->has_instance_prototype()) {
- Handle<Object> prototype =
- handle(original_function->instance_prototype(), isolate);
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetPrototype(result, prototype, false));
- }
- }
+ Handle<Map> initial_map =
+ JSFunction::EnsureDerivedHasInitialMap(original_function, function);
+
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObjectFromMap(initial_map, NOT_TENURED, site);
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
@@ -1127,8 +1038,8 @@ RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- function->CompleteInobjectSlackTracking();
+ CONVERT_ARG_HANDLE_CHECKED(Map, initial_map, 0);
+ initial_map->CompleteInobjectSlackTracking();
return isolate->heap()->undefined_value();
}
@@ -1586,9 +1497,8 @@ RUNTIME_FUNCTION(Runtime_InstanceOf) {
if (callable->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
if (function->shared()->bound()) {
- Handle<FixedArray> bindings(function->function_bindings(), isolate);
- callable =
- handle(bindings->get(JSFunction::kBoundFunctionIndex), isolate);
+ Handle<BindingsArray> bindings(function->function_bindings(), isolate);
+ callable = handle(bindings->bound_function(), isolate);
}
}
DCHECK(callable->IsCallable());
@@ -1636,5 +1546,22 @@ RUNTIME_FUNCTION(Runtime_IsAccessCheckNeeded) {
}
+RUNTIME_FUNCTION(Runtime_ObjectDefineProperty) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, o, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, attributes, 2);
+ return JSReceiver::DefineProperty(isolate, o, name, attributes);
+}
+
+
+RUNTIME_FUNCTION(Runtime_ObjectDefineProperties) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, o, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, properties, 1);
+ return JSReceiver::DefineProperties(isolate, o, properties);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 48154ea275..b4cf184c40 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -693,8 +693,10 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
RUNTIME_ASSERT(pattern_length > 0);
if (limit == 0xffffffffu) {
+ FixedArray* last_match_cache_unused;
Handle<Object> cached_answer(
RegExpResultsCache::Lookup(isolate->heap(), *subject, *pattern,
+ &last_match_cache_unused,
RegExpResultsCache::STRING_SPLIT_SUBSTRINGS),
isolate);
if (*cached_answer != Smi::FromInt(0)) {
@@ -757,6 +759,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
if (limit == 0xffffffffu) {
if (result->HasFastObjectElements()) {
RegExpResultsCache::Enter(isolate, subject, pattern, elements,
+ isolate->factory()->empty_fixed_array(),
RegExpResultsCache::STRING_SPLIT_SUBSTRINGS);
}
}
@@ -785,6 +788,22 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
}
+RUNTIME_FUNCTION(Runtime_RegExpFlags) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ return regexp->flags();
+}
+
+
+RUNTIME_FUNCTION(Runtime_RegExpSource) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ return regexp->source();
+}
+
+
RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 3);
@@ -921,58 +940,24 @@ RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, escaped_source,
EscapeRegExpSource(isolate, source));
- Handle<Object> global = factory->ToBoolean(flags.is_global());
- Handle<Object> ignore_case = factory->ToBoolean(flags.is_ignore_case());
- Handle<Object> multiline = factory->ToBoolean(flags.is_multiline());
- Handle<Object> sticky = factory->ToBoolean(flags.is_sticky());
- Handle<Object> unicode = factory->ToBoolean(flags.is_unicode());
+ regexp->set_source(*escaped_source);
+ regexp->set_flags(Smi::FromInt(flags.value()));
Map* map = regexp->map();
Object* constructor = map->GetConstructor();
- if (!FLAG_harmony_regexps && !FLAG_harmony_unicode_regexps &&
- constructor->IsJSFunction() &&
+ if (constructor->IsJSFunction() &&
JSFunction::cast(constructor)->initial_map() == map) {
// If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, *escaped_source);
- // Both true and false are immovable immortal objects so no need for write
- // barrier.
- regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, *global,
- SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, *ignore_case,
- SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, *multiline,
- SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
Smi::FromInt(0), SKIP_WRITE_BARRIER);
} else {
- // Map has changed, so use generic, but slower, method. We also end here if
- // the --harmony-regexp flag is set, because the initial map does not have
- // space for the 'sticky' flag, since it is from the snapshot, but must work
- // both with and without --harmony-regexp. When sticky comes out from under
- // the flag, we will be able to use the fast initial map.
- PropertyAttributes final =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
+ // Map has changed, so use generic, but slower, method.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Handle<Object> zero(Smi::FromInt(0), isolate);
- JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->source_string(),
- escaped_source, final).Check();
- JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->global_string(),
- global, final).Check();
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->ignore_case_string(), ignore_case, final).Check();
JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->multiline_string(), multiline, final).Check();
- if (FLAG_harmony_regexps) {
- JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->sticky_string(),
- sticky, final).Check();
- }
- if (FLAG_harmony_unicode_regexps) {
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->unicode_string(), unicode, final).Check();
- }
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->last_index_string(), zero, writable).Check();
+ regexp, factory->last_index_string(),
+ Handle<Smi>(Smi::FromInt(0), isolate), writable)
+ .Check();
}
Handle<Object> result;
@@ -1017,23 +1002,23 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
static const int kMinLengthToCache = 0x1000;
if (subject_length > kMinLengthToCache) {
- Handle<Object> cached_answer(
- RegExpResultsCache::Lookup(isolate->heap(), *subject, regexp->data(),
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES),
- isolate);
- if (*cached_answer != Smi::FromInt(0)) {
+ FixedArray* last_match_cache;
+ Object* cached_answer = RegExpResultsCache::Lookup(
+ isolate->heap(), *subject, regexp->data(), &last_match_cache,
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
+ if (cached_answer->IsFixedArray()) {
+ int capture_registers = (capture_count + 1) * 2;
+ int32_t* last_match = NewArray<int32_t>(capture_registers);
+ for (int i = 0; i < capture_registers; i++) {
+ last_match[i] = Smi::cast(last_match_cache->get(i))->value();
+ }
Handle<FixedArray> cached_fixed_array =
- Handle<FixedArray>(FixedArray::cast(*cached_answer));
+ Handle<FixedArray>(FixedArray::cast(cached_answer));
// The cache FixedArray is a COW-array and can therefore be reused.
JSArray::SetContent(result_array, cached_fixed_array);
- // The actual length of the result array is stored in the last element of
- // the backing store (the backing FixedArray may have a larger capacity).
- Object* cached_fixed_array_last_element =
- cached_fixed_array->get(cached_fixed_array->length() - 1);
- Smi* js_array_length = Smi::cast(cached_fixed_array_last_element);
- result_array->set_length(js_array_length);
RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
- NULL);
+ last_match);
+ DeleteArray(last_match);
return *result_array;
}
}
@@ -1121,19 +1106,24 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
- NULL);
+ global_cache.LastSuccessfulMatch());
if (subject_length > kMinLengthToCache) {
- // Store the length of the result array into the last element of the
- // backing FixedArray.
- builder.EnsureCapacity(1);
- Handle<FixedArray> fixed_array = builder.array();
- fixed_array->set(fixed_array->length() - 1,
- Smi::FromInt(builder.length()));
+ // Store the last successful match into the array for caching.
+ // TODO(yangguo): do not expose last match to JS and simplify caching.
+ int capture_registers = (capture_count + 1) * 2;
+ Handle<FixedArray> last_match_cache =
+ isolate->factory()->NewFixedArray(capture_registers);
+ int32_t* last_match = global_cache.LastSuccessfulMatch();
+ for (int i = 0; i < capture_registers; i++) {
+ last_match_cache->set(i, Smi::FromInt(last_match[i]));
+ }
+ Handle<FixedArray> result_fixed_array = builder.array();
+ result_fixed_array->Shrink(builder.length());
// Cache the result and turn the FixedArray into a COW array.
- RegExpResultsCache::Enter(isolate, subject,
- handle(regexp->data(), isolate), fixed_array,
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
+ RegExpResultsCache::Enter(
+ isolate, subject, handle(regexp->data(), isolate), result_fixed_array,
+ last_match_cache, RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
}
return *builder.ToJSArray(result_array);
} else {
@@ -1149,8 +1139,8 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
HandleScope handles(isolate);
DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index c3928a7703..ecbe5cd17d 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -30,7 +30,7 @@ RUNTIME_FUNCTION(Runtime_ThrowConstAssignError) {
// May throw a RedeclarationError.
-static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
+static Object* DeclareGlobals(Isolate* isolate, Handle<JSGlobalObject> global,
Handle<String> name, Handle<Object> value,
PropertyAttributes attr, bool is_var,
bool is_const, bool is_function) {
@@ -87,7 +87,7 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<GlobalObject> global(isolate->global_object());
+ Handle<JSGlobalObject> global(isolate->global_object());
Handle<Context> context(isolate->context());
CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
@@ -155,7 +155,7 @@ RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- Handle<GlobalObject> global(isolate->context()->global_object());
+ Handle<JSGlobalObject> global(isolate->context()->global_object());
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, Object::SetProperty(global, name, value, language_mode));
@@ -172,7 +172,7 @@ RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- Handle<GlobalObject> global = isolate->global_object();
+ Handle<JSGlobalObject> global = isolate->global_object();
// Lookup the property as own on the global object.
LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
@@ -223,10 +223,22 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
int index;
PropertyAttributes attributes;
- ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
BindingFlags binding_flags;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding_flags);
+
+ if ((attr & EVAL_DECLARED) != 0) {
+ // Check for a conflict with a lexically scoped variable
+ context_arg->Lookup(name, LEXICAL_TEST, &index, &attributes,
+ &binding_flags);
+ if (attributes != ABSENT &&
+ (binding_flags == MUTABLE_CHECK_INITIALIZED ||
+ binding_flags == IMMUTABLE_CHECK_INITIALIZED)) {
+ return ThrowRedeclarationError(isolate, name);
+ }
+ attr = static_cast<PropertyAttributes>(attr & ~EVAL_DECLARED);
+ }
+
+ Handle<Object> holder = context->Lookup(name, DONT_FOLLOW_CHAINS, &index,
+ &attributes, &binding_flags);
if (holder.is_null()) {
// In case of JSProxy, an exception might have been thrown.
if (isolate->has_pending_exception()) return isolate->heap()->exception();
@@ -307,21 +319,14 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 1);
-
- return DeclareLookupSlot(isolate, name, initial_value, NONE);
-}
-
-
-RUNTIME_FUNCTION(Runtime_DeclareReadOnlyLookupSlot) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, property_attributes, 2);
- return DeclareLookupSlot(isolate, name, initial_value, READ_ONLY);
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(property_attributes->value());
+ return DeclareLookupSlot(isolate, name, initial_value, attributes);
}
@@ -410,7 +415,7 @@ template <typename T>
Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
T parameters, int argument_count) {
CHECK(!IsSubclassConstructor(callee->shared()->kind()));
- DCHECK(callee->has_simple_parameters());
+ DCHECK(callee->shared()->has_simple_parameters());
Handle<JSObject> result =
isolate->factory()->NewArgumentsObject(callee, argument_count);
@@ -621,7 +626,7 @@ RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
}
static Object* FindNameClash(Handle<ScopeInfo> scope_info,
- Handle<GlobalObject> global_object,
+ Handle<JSGlobalObject> global_object,
Handle<ScriptContextTable> script_context) {
Isolate* isolate = scope_info->GetIsolate();
for (int var = 0; var < scope_info->ContextLocalCount(); var++) {
@@ -643,7 +648,7 @@ static Object* FindNameClash(Handle<ScopeInfo> scope_info,
return ThrowRedeclarationError(isolate, name);
}
- GlobalObject::InvalidatePropertyCell(global_object, name);
+ JSGlobalObject::InvalidatePropertyCell(global_object, name);
}
}
return isolate->heap()->undefined_value();
@@ -656,7 +661,7 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
- Handle<GlobalObject> global_object(function->context()->global_object());
+ Handle<JSGlobalObject> global_object(function->context()->global_object());
Handle<Context> native_context(global_object->native_context());
Handle<ScriptContextTable> script_context_table(
native_context->script_context_table());
@@ -668,9 +673,8 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
// Script contexts have a canonical empty function as their closure, not the
// anonymous closure containing the global code. See
// FullCodeGenerator::PushFunctionArgumentForContextAllocation.
- Handle<JSFunction> closure(global_object->IsJSBuiltinsObject()
- ? *function
- : native_context->closure());
+ Handle<JSFunction> closure(
+ function->shared()->IsBuiltin() ? *function : native_context->closure());
Handle<Context> result =
isolate->factory()->NewScriptContext(closure, scope_info);
@@ -859,7 +863,8 @@ RUNTIME_FUNCTION(Runtime_DeclareModules) {
}
}
- JSObject::PreventExtensions(module).Assert();
+ if (JSObject::PreventExtensions(module, Object::THROW_ON_ERROR).IsNothing())
+ DCHECK(false);
}
DCHECK(!isolate->has_pending_exception());
@@ -905,7 +910,7 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
static Object* ComputeReceiverForNonGlobal(Isolate* isolate, JSObject* holder) {
- DCHECK(!holder->IsGlobalObject());
+ DCHECK(!holder->IsJSGlobalObject());
// If the holder isn't a context extension object, we just return it
// as the receiver. This allows arguments objects to be used as
@@ -983,7 +988,7 @@ static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
// GetProperty below can cause GC.
Handle<Object> receiver_handle(
- object->IsGlobalObject()
+ object->IsJSGlobalObject()
? Object::cast(isolate->heap()->undefined_value())
: object->IsJSProxy() ? static_cast<Object*>(*object)
: ComputeReceiverForNonGlobal(
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 3ce5a58e2b..dd4983e75f 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -405,18 +405,6 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
}
-RUNTIME_FUNCTION(Runtime_CharFromCode) {
- HandleScope handlescope(isolate);
- DCHECK(args.length() == 1);
- if (args[0]->IsNumber()) {
- CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
- code &= 0xffff;
- return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
- }
- return isolate->heap()->empty_string();
-}
-
-
RUNTIME_FUNCTION(Runtime_StringCompare) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
@@ -1185,8 +1173,14 @@ RUNTIME_FUNCTION(Runtime_FlattenString) {
RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_CharFromCode(args, isolate);
+ HandleScope handlescope(isolate);
+ DCHECK_EQ(1, args.length());
+ if (args[0]->IsNumber()) {
+ CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
+ code &= 0xffff;
+ return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
+ }
+ return isolate->heap()->empty_string();
}
@@ -1198,7 +1192,7 @@ RUNTIME_FUNCTION(Runtime_StringCharAt) {
if (std::isinf(args.number_at(1))) return isolate->heap()->empty_string();
Object* code = __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
if (code->IsNaN()) return isolate->heap()->empty_string();
- return __RT_impl_Runtime_CharFromCode(Arguments(1, &code), isolate);
+ return __RT_impl_Runtime_StringCharFromCode(Arguments(1, &code), isolate);
}
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 778c241709..234b45606d 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -28,7 +28,9 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
- return *isolate->factory()->NewPrivateSymbol(name);
+ Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
+ if (name->IsString()) symbol->set_name(*name);
+ return *symbol;
}
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 8a3fce0a92..f39e37072d 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -33,7 +33,7 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
holder, isolate, allocated_length, true,
is_shared ? SharedFlag::kShared : SharedFlag::kNotShared)) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
}
return *holder;
}
@@ -164,8 +164,8 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
// All checks are done, now we can modify objects.
- DCHECK(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
+ DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
+ holder->GetInternalFieldCount());
for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
holder->SetInternalField(i, Smi::FromInt(0));
}
@@ -238,8 +238,8 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
}
size_t byte_length = length * element_size;
- DCHECK(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
+ DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
+ holder->GetInternalFieldCount());
for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
holder->SetInternalField(i, Smi::FromInt(0));
}
@@ -441,8 +441,8 @@ RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset, 2);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length, 3);
- DCHECK(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
+ DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
+ holder->GetInternalFieldCount());
for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
holder->SetInternalField(i, Smi::FromInt(0));
}
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 4b072b1eb6..ded2c090c8 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -162,7 +162,7 @@ static inline ObjectPair MakePair(Object* x, Object* y) {
}
#endif
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_RUNTIME_RUNTIME_UTILS_H_
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 15451c5c6e..90f4e4ce33 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -4,6 +4,7 @@
#include "src/runtime/runtime.h"
+#include "src/assembler.h"
#include "src/contexts.h"
#include "src/handles-inl.h"
#include "src/heap/heap.h"
@@ -94,6 +95,31 @@ const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
}
+const Runtime::Function* Runtime::RuntimeFunctionTable(Isolate* isolate) {
+ if (isolate->external_reference_redirector()) {
+ // When running with the simulator we need to provide a table which has
+ // redirected runtime entry addresses.
+ if (!isolate->runtime_state()->redirected_intrinsic_functions()) {
+ size_t function_count = arraysize(kIntrinsicFunctions);
+ Function* redirected_functions = new Function[function_count];
+ memcpy(redirected_functions, kIntrinsicFunctions,
+ sizeof(kIntrinsicFunctions));
+ for (size_t i = 0; i < function_count; i++) {
+ ExternalReference redirected_entry(static_cast<Runtime::FunctionId>(i),
+ isolate);
+ redirected_functions[i].entry = redirected_entry.address();
+ }
+ isolate->runtime_state()->set_redirected_intrinsic_functions(
+ redirected_functions);
+ }
+
+ return isolate->runtime_state()->redirected_intrinsic_functions();
+ } else {
+ return kIntrinsicFunctions;
+ }
+}
+
+
std::ostream& operator<<(std::ostream& os, Runtime::FunctionId id) {
return os << Runtime::FunctionForId(id)->name;
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 6e55d74794..23f9bdc1f7 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -176,7 +176,7 @@ namespace internal {
F(DebugPrintScopes, 0, 1) \
F(GetThreadCount, 1, 1) \
F(GetThreadDetails, 2, 1) \
- F(SetDisableBreak, 1, 1) \
+ F(SetBreakPointsActive, 1, 1) \
F(GetBreakLocations, 2, 1) \
F(SetFunctionBreakPoint, 3, 1) \
F(SetScriptBreakPoint, 4, 1) \
@@ -201,7 +201,7 @@ namespace internal {
F(GetScript, 1, 1) \
F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
- F(DebugPushPromise, 2, 1) \
+ F(DebugPushPromise, 3, 1) \
F(DebugPopPromise, 0, 1) \
F(DebugPromiseEvent, 1, 1) \
F(DebugAsyncTaskEvent, 1, 1) \
@@ -225,14 +225,18 @@ namespace internal {
F(InterpreterGreaterThan, 2, 1) \
F(InterpreterLessThanOrEqual, 2, 1) \
F(InterpreterGreaterThanOrEqual, 2, 1) \
- F(InterpreterToBoolean, 1, 1)
+ F(InterpreterToBoolean, 1, 1) \
+ F(InterpreterLogicalNot, 1, 1) \
+ F(InterpreterTypeOf, 1, 1) \
+ F(InterpreterNewClosure, 2, 1) \
+ F(InterpreterForInPrepare, 1, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
F(FunctionGetName, 1, 1) \
F(FunctionSetName, 2, 1) \
F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
- F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
+ F(CompleteFunctionConstruction, 3, 1) \
F(FunctionIsArrow, 1, 1) \
F(FunctionIsConciseMethod, 1, 1) \
F(FunctionRemovePrototype, 1, 1) \
@@ -256,7 +260,7 @@ namespace internal {
F(Call, -1 /* >= 2 */, 1) \
F(Apply, 5, 1) \
F(GetOriginalConstructor, 0, 1) \
- F(CallFunction, -1 /* receiver + n args + function */, 1) \
+ F(ConvertReceiver, 1, 1) \
F(IsConstructCall, 0, 1) \
F(IsFunction, 1, 1)
@@ -345,8 +349,6 @@ namespace internal {
F(CallSiteIsConstructorRT, 1, 1) \
F(IS_VAR, 1, 1) \
F(IncrementStatsCounter, 1, 1) \
- F(Likely, 1, 1) \
- F(Unlikely, 1, 1) \
F(HarmonyToString, 0, 1) \
F(GetTypeFeedbackVector, 1, 1) \
F(GetCallerJSFunction, 0, 1) \
@@ -399,7 +401,8 @@ namespace internal {
F(RoundNumber, 1, 1) \
F(MathSqrt, 1, 1) \
F(MathFround, 1, 1) \
- F(IsMinusZero, 1, 1)
+ F(IsMinusZero, 1, 1) \
+ F(InitializeRNG, 0, 1)
#define FOR_EACH_INTRINSIC_NUMBERS(F) \
@@ -495,7 +498,9 @@ namespace internal {
F(InstanceOf, 2, 1) \
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
- F(IsAccessCheckNeeded, 1, 1)
+ F(IsAccessCheckNeeded, 1, 1) \
+ F(ObjectDefineProperties, 2, 1) \
+ F(ObjectDefineProperty, 3, 1)
#define FOR_EACH_INTRINSIC_OBSERVE(F) \
@@ -551,6 +556,8 @@ namespace internal {
F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringSplit, 3, 1) \
F(RegExpExec, 4, 1) \
+ F(RegExpFlags, 1, 1) \
+ F(RegExpSource, 1, 1) \
F(RegExpConstructResult, 3, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
F(MaterializeRegExpLiteral, 4, 1) \
@@ -564,8 +571,7 @@ namespace internal {
F(DeclareGlobals, 2, 1) \
F(InitializeVarGlobal, 3, 1) \
F(InitializeConstGlobal, 2, 1) \
- F(DeclareLookupSlot, 2, 1) \
- F(DeclareReadOnlyLookupSlot, 2, 1) \
+ F(DeclareLookupSlot, 3, 1) \
F(InitializeLegacyConstLookupSlot, 3, 1) \
F(NewSloppyArguments_Generic, 1, 1) \
F(NewStrictArguments_Generic, 1, 1) \
@@ -904,7 +910,6 @@ namespace internal {
F(InternalizeString, 1, 1) \
F(StringMatch, 3, 1) \
F(StringCharCodeAtRT, 2, 1) \
- F(CharFromCode, 1, 1) \
F(StringCompare, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
@@ -1110,27 +1115,6 @@ FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
//---------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
-class RuntimeState {
- public:
- unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
- return &to_upper_mapping_;
- }
- unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
- return &to_lower_mapping_;
- }
-
- private:
- RuntimeState() {}
- unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
- unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
-
- friend class Isolate;
- friend class Runtime;
-
- DISALLOW_COPY_AND_ASSIGN(RuntimeState);
-};
-
-
class Runtime : public AllStatic {
public:
enum FunctionId {
@@ -1179,6 +1163,9 @@ class Runtime : public AllStatic {
// Get the intrinsic function with the given function entry address.
static const Function* FunctionForEntry(Address ref);
+ // Get the runtime intrinsic function table.
+ static const Function* RuntimeFunctionTable(Isolate* isolate);
+
MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty(
Isolate* isolate, Handle<JSReceiver> receiver, Handle<Object> key,
LanguageMode language_mode);
@@ -1229,6 +1216,38 @@ class Runtime : public AllStatic {
};
+class RuntimeState {
+ public:
+ unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
+ return &to_upper_mapping_;
+ }
+ unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
+ return &to_lower_mapping_;
+ }
+
+ Runtime::Function* redirected_intrinsic_functions() {
+ return redirected_intrinsic_functions_.get();
+ }
+
+ void set_redirected_intrinsic_functions(
+ Runtime::Function* redirected_intrinsic_functions) {
+ redirected_intrinsic_functions_.Reset(redirected_intrinsic_functions);
+ }
+
+ private:
+ RuntimeState() {}
+ unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
+ unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
+
+ base::SmartArrayPointer<Runtime::Function> redirected_intrinsic_functions_;
+
+ friend class Isolate;
+ friend class Runtime;
+
+ DISALLOW_COPY_AND_ASSIGN(RuntimeState);
+};
+
+
std::ostream& operator<<(std::ostream&, Runtime::FunctionId);
//---------------------------------------------------------------------------
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index a7719e036d..fbb0152eb3 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -230,6 +230,7 @@ class SafepointTableBuilder BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SAFEPOINT_TABLE_H_
diff --git a/deps/v8/src/scanner-character-streams.h b/deps/v8/src/scanner-character-streams.h
index a26f50a892..8a0ae23926 100644
--- a/deps/v8/src/scanner-character-streams.h
+++ b/deps/v8/src/scanner-character-streams.h
@@ -21,16 +21,16 @@ class ExternalTwoByteString;
class BufferedUtf16CharacterStream: public Utf16CharacterStream {
public:
BufferedUtf16CharacterStream();
- virtual ~BufferedUtf16CharacterStream();
+ ~BufferedUtf16CharacterStream() override;
- virtual void PushBack(uc32 character);
+ void PushBack(uc32 character) override;
protected:
static const size_t kBufferSize = 512;
static const size_t kPushBackStepSize = 16;
- virtual size_t SlowSeekForward(size_t delta);
- virtual bool ReadBlock();
+ size_t SlowSeekForward(size_t delta) override;
+ bool ReadBlock() override;
virtual void SlowPushBack(uc16 character);
virtual size_t BufferSeekForward(size_t delta) = 0;
@@ -46,16 +46,16 @@ class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
public:
GenericStringUtf16CharacterStream(Handle<String> data, size_t start_position,
size_t end_position);
- virtual ~GenericStringUtf16CharacterStream();
+ ~GenericStringUtf16CharacterStream() override;
- virtual bool SetBookmark();
- virtual void ResetToBookmark();
+ bool SetBookmark() override;
+ void ResetToBookmark() override;
protected:
static const size_t kNoBookmark = -1;
- virtual size_t BufferSeekForward(size_t delta);
- virtual size_t FillBuffer(size_t position);
+ size_t BufferSeekForward(size_t delta) override;
+ size_t FillBuffer(size_t position) override;
Handle<String> string_;
size_t length_;
@@ -67,14 +67,14 @@ class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
public:
Utf8ToUtf16CharacterStream(const byte* data, size_t length);
- virtual ~Utf8ToUtf16CharacterStream();
+ ~Utf8ToUtf16CharacterStream() override;
static size_t CopyChars(uint16_t* dest, size_t length, const byte* src,
size_t* src_pos, size_t src_length);
protected:
- virtual size_t BufferSeekForward(size_t delta);
- virtual size_t FillBuffer(size_t char_position);
+ size_t BufferSeekForward(size_t delta) override;
+ size_t FillBuffer(size_t char_position) override;
void SetRawPosition(size_t char_position);
const byte* raw_data_;
@@ -103,7 +103,7 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
bookmark_data_offset_(0),
bookmark_utf8_split_char_buffer_length_(0) {}
- virtual ~ExternalStreamingStream() {
+ ~ExternalStreamingStream() override {
delete[] current_data_;
bookmark_buffer_.Dispose();
bookmark_data_.Dispose();
@@ -120,8 +120,8 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
size_t FillBuffer(size_t position) override;
- virtual bool SetBookmark() override;
- virtual void ResetToBookmark() override;
+ bool SetBookmark() override;
+ void ResetToBookmark() override;
private:
void HandleUtf8SplitCharacters(size_t* data_in_buffer);
@@ -154,23 +154,23 @@ class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
int start_position,
int end_position);
- virtual ~ExternalTwoByteStringUtf16CharacterStream();
+ ~ExternalTwoByteStringUtf16CharacterStream() override;
- virtual void PushBack(uc32 character) {
+ void PushBack(uc32 character) override {
DCHECK(buffer_cursor_ > raw_data_);
buffer_cursor_--;
pos_--;
}
- virtual bool SetBookmark();
- virtual void ResetToBookmark();
+ bool SetBookmark() override;
+ void ResetToBookmark() override;
protected:
- virtual size_t SlowSeekForward(size_t delta) {
+ size_t SlowSeekForward(size_t delta) override {
// Fast case always handles seeking.
return 0;
}
- virtual bool ReadBlock() {
+ bool ReadBlock() override {
// Entire string is read at start.
return false;
}
@@ -183,6 +183,7 @@ class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
size_t bookmark_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SCANNER_CHARACTER_STREAMS_H_
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index de799033b9..04712e9f32 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -1395,7 +1395,7 @@ bool Scanner::ScanRegExpFlags() {
}
literal.Complete();
- next_.location.end_pos = source_pos() - 1;
+ next_.location.end_pos = source_pos();
return true;
}
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index a86ed07ab9..6d0d4dc8ed 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -166,11 +166,7 @@ class LiteralBuffer {
public:
LiteralBuffer() : is_one_byte_(true), position_(0), backing_store_() { }
- ~LiteralBuffer() {
- if (backing_store_.length() > 0) {
- backing_store_.Dispose();
- }
- }
+ ~LiteralBuffer() { backing_store_.Dispose(); }
INLINE(void AddChar(uint32_t code_unit)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
@@ -763,6 +759,7 @@ class Scanner {
bool has_multiline_comment_before_next_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SCANNER_H_
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 732908a9e6..c061b8fceb 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -343,7 +343,6 @@ int ScopeInfo::ContextLength() {
scope_type() == WITH_SCOPE ||
(scope_type() == BLOCK_SCOPE && CallsSloppyEval() &&
is_declaration_scope()) ||
- (scope_type() == ARROW_SCOPE && CallsSloppyEval()) ||
(scope_type() == FUNCTION_SCOPE && CallsSloppyEval()) ||
scope_type() == MODULE_SCOPE;
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index 70a17cd7d4..2afc667c30 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -169,6 +169,7 @@ class ModuleInfo: public FixedArray {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SCOPEINFO_H_
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index a611d7364c..6a6b8ad45c 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -194,7 +194,6 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
language_mode_ = outer_scope != NULL ? outer_scope->language_mode_ : SLOPPY;
outer_scope_calls_sloppy_eval_ = false;
inner_scope_calls_eval_ = false;
- inner_scope_uses_arguments_ = false;
scope_nonlinear_ = false;
force_eager_compilation_ = false;
force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
@@ -325,7 +324,6 @@ bool Scope::Analyze(ParseInfo* info) {
void Scope::Initialize() {
- bool subclass_constructor = IsSubclassConstructor(function_kind_);
DCHECK(!already_resolved());
// Add this scope as a new inner scope of the outer scope.
@@ -338,6 +336,7 @@ void Scope::Initialize() {
// Declare convenience variables and the receiver.
if (is_declaration_scope() && has_this_declaration()) {
+ bool subclass_constructor = IsSubclassConstructor(function_kind_);
Variable* var = variables_.Declare(
this, ast_value_factory_->this_string(),
subclass_constructor ? CONST : VAR, Variable::THIS,
@@ -352,10 +351,8 @@ void Scope::Initialize() {
variables_.Declare(this, ast_value_factory_->arguments_string(), VAR,
Variable::ARGUMENTS, kCreatedInitialized);
- if (subclass_constructor || FLAG_harmony_new_target) {
- variables_.Declare(this, ast_value_factory_->new_target_string(), CONST,
- Variable::NORMAL, kCreatedInitialized);
- }
+ variables_.Declare(this, ast_value_factory_->new_target_string(), CONST,
+ Variable::NORMAL, kCreatedInitialized);
if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) {
@@ -377,12 +374,7 @@ Scope* Scope::FinalizeBlockScope() {
}
// Remove this scope from outer scope.
- for (int i = 0; i < outer_scope_->inner_scopes_.length(); i++) {
- if (outer_scope_->inner_scopes_[i] == this) {
- outer_scope_->inner_scopes_.Remove(i);
- break;
- }
- }
+ outer_scope()->RemoveInnerScope(this);
// Reparent inner scopes.
for (int i = 0; i < inner_scopes_.length(); i++) {
@@ -394,15 +386,35 @@ Scope* Scope::FinalizeBlockScope() {
outer_scope()->unresolved_.Add(unresolved_[i], zone());
}
- // Propagate usage flags to outer scope.
- if (uses_arguments()) outer_scope_->RecordArgumentsUsage();
- if (uses_super_property()) outer_scope_->RecordSuperPropertyUsage();
- if (scope_calls_eval_) outer_scope_->RecordEvalCall();
+ PropagateUsageFlagsToScope(outer_scope_);
return NULL;
}
+void Scope::ReplaceOuterScope(Scope* outer) {
+ DCHECK_NOT_NULL(outer);
+ DCHECK_NOT_NULL(outer_scope_);
+ DCHECK(!already_resolved());
+ DCHECK(!outer->already_resolved());
+ DCHECK(!outer_scope_->already_resolved());
+ outer_scope_->RemoveInnerScope(this);
+ outer->AddInnerScope(this);
+ outer_scope_ = outer;
+}
+
+
+void Scope::PropagateUsageFlagsToScope(Scope* other) {
+ DCHECK_NOT_NULL(other);
+ DCHECK(!already_resolved());
+ DCHECK(!other->already_resolved());
+ if (uses_arguments()) other->RecordArgumentsUsage();
+ if (uses_super_property()) other->RecordSuperPropertyUsage();
+ if (calls_eval()) other->RecordEvalCall();
+ if (scope_contains_with_) other->RecordWithStatement();
+}
+
+
Variable* Scope::LookupLocal(const AstRawString* name) {
Variable* result = variables_.Lookup(name);
if (result != NULL || scope_info_.is_null()) {
@@ -548,15 +560,16 @@ Variable* Scope::DeclareDynamicGlobal(const AstRawString* name) {
}
-void Scope::RemoveUnresolved(VariableProxy* var) {
+bool Scope::RemoveUnresolved(VariableProxy* var) {
// Most likely (always?) any variable we want to remove
// was just added before, so we search backwards.
for (int i = unresolved_.length(); i-- > 0;) {
if (unresolved_[i] == var) {
unresolved_.Remove(i);
- return;
+ return true;
}
}
+ return false;
}
@@ -601,10 +614,6 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
const AstRawString* name = decl->proxy()->raw_name();
// Iterate through all scopes until and including the declaration scope.
- // If the declaration scope is a (declaration) block scope, also continue
- // (that is to handle the special inner scope of functions with
- // destructuring parameters, which may not shadow any variables from
- // the surrounding function scope).
Scope* previous = NULL;
Scope* current = decl->scope();
// Lexical vs lexical conflicts within the same scope have already been
@@ -620,7 +629,7 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
}
previous = current;
current = current->outer_scope_;
- } while (!previous->is_declaration_scope() || previous->is_block_scope());
+ } while (!previous->is_declaration_scope());
}
return NULL;
}
@@ -769,15 +778,26 @@ int Scope::ContextChainLength(Scope* scope) {
int n = 0;
for (Scope* s = this; s != scope; s = s->outer_scope_) {
DCHECK(s != NULL); // scope must be in the scope chain
- if (s->is_with_scope() || s->num_heap_slots() > 0) n++;
- // Catch and module scopes always have heap slots.
- DCHECK(!s->is_catch_scope() || s->num_heap_slots() > 0);
- DCHECK(!s->is_module_scope() || s->num_heap_slots() > 0);
+ if (s->NeedsContext()) n++;
}
return n;
}
+int Scope::MaxNestedContextChainLength() {
+ int max_context_chain_length = 0;
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ Scope* scope = inner_scopes_[i];
+ max_context_chain_length = std::max(scope->MaxNestedContextChainLength(),
+ max_context_chain_length);
+ }
+ if (NeedsContext()) {
+ max_context_chain_length += 1;
+ }
+ return max_context_chain_length;
+}
+
+
Scope* Scope::DeclarationScope() {
Scope* scope = this;
while (!scope->is_declaration_scope()) {
@@ -848,16 +868,18 @@ void Scope::ReportMessage(int start_position, int end_position,
#ifdef DEBUG
-static const char* Header(ScopeType scope_type, bool is_declaration_scope) {
+static const char* Header(ScopeType scope_type, FunctionKind function_kind,
+ bool is_declaration_scope) {
switch (scope_type) {
case EVAL_SCOPE: return "eval";
- case FUNCTION_SCOPE: return "function";
+ // TODO(adamk): Should we print concise method scopes specially?
+ case FUNCTION_SCOPE:
+ return IsArrowFunction(function_kind) ? "arrow" : "function";
case MODULE_SCOPE: return "module";
case SCRIPT_SCOPE: return "global";
case CATCH_SCOPE: return "catch";
case BLOCK_SCOPE: return is_declaration_scope ? "varblock" : "block";
case WITH_SCOPE: return "with";
- case ARROW_SCOPE: return "arrow";
}
UNREACHABLE();
return NULL;
@@ -939,8 +961,8 @@ void Scope::Print(int n) {
int n1 = n0 + 2; // indentation
// Print header.
- Indent(n0, Header(scope_type_, is_declaration_scope()));
- if (!scope_name_->IsEmpty()) {
+ Indent(n0, Header(scope_type_, function_kind_, is_declaration_scope()));
+ if (scope_name_ != nullptr && !scope_name_->IsEmpty()) {
PrintF(" ");
PrintName(scope_name_);
}
@@ -983,9 +1005,6 @@ void Scope::Print(int n) {
if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
if (scope_uses_super_property_)
Indent(n1, "// scope uses 'super' property\n");
- if (inner_scope_uses_arguments_) {
- Indent(n1, "// inner scope uses 'arguments'\n");
- }
if (outer_scope_calls_sloppy_eval_) {
Indent(n1, "// outer scope calls 'eval' in sloppy context\n");
}
@@ -1111,7 +1130,8 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
if (var != NULL && proxy->is_assigned()) var->set_maybe_assigned();
*binding_kind = DYNAMIC_LOOKUP;
return NULL;
- } else if (calls_sloppy_eval() && name_can_be_shadowed) {
+ } else if (calls_sloppy_eval() && !is_script_scope() &&
+ name_can_be_shadowed) {
// A variable binding may have been found in an outer scope, but the current
// scope makes a sloppy 'eval' call, so the found variable may not be
// the correct one (the 'eval' may introduce a binding with the same name).
@@ -1332,14 +1352,6 @@ void Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
if (inner->scope_calls_eval_ || inner->inner_scope_calls_eval_) {
inner_scope_calls_eval_ = true;
}
- // If the inner scope is an arrow function, propagate the flags tracking
- // usage of arguments/super/this, but do not propagate them out from normal
- // functions.
- if (!inner->is_function_scope() || inner->is_arrow_scope()) {
- if (inner->scope_uses_arguments_ || inner->inner_scope_uses_arguments_) {
- inner_scope_uses_arguments_ = true;
- }
- }
if (inner->force_eager_compilation_) {
force_eager_compilation_ = true;
}
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 61bf6338f7..d115097803 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -112,6 +112,15 @@ class Scope: public ZoneObject {
// tree and its children are reparented.
Scope* FinalizeBlockScope();
+ // Inserts outer_scope into this scope's scope chain (and removes this
+ // from the current outer_scope_'s inner_scopes_).
+ // Assumes outer_scope_ is non-null.
+ void ReplaceOuterScope(Scope* outer_scope);
+
+ // Propagates any eagerly-gathered scope usage flags (such as calls_eval())
+ // to the passed-in scope.
+ void PropagateUsageFlagsToScope(Scope* other);
+
Zone* zone() const { return zone_; }
// ---------------------------------------------------------------------------
@@ -178,13 +187,19 @@ class Scope: public ZoneObject {
return proxy;
}
+ void AddUnresolved(VariableProxy* proxy) {
+ DCHECK(!already_resolved());
+ DCHECK(!proxy->is_resolved());
+ unresolved_.Add(proxy, zone_);
+ }
+
// Remove a unresolved variable. During parsing, an unresolved variable
// may have been added optimistically, but then only the variable name
// was used (typically for labels). If the variable was not declared, the
// addition introduced a new unresolved variable which may end up being
// allocated globally as a "ghost" variable. RemoveUnresolved removes
// such a variable again if it was added; otherwise this is a no-op.
- void RemoveUnresolved(VariableProxy* var);
+ bool RemoveUnresolved(VariableProxy* var);
// Creates a new temporary variable in this scope's TemporaryScope. The
// name is only used for printing and cannot be used to find the variable.
@@ -226,7 +241,7 @@ class Scope: public ZoneObject {
void RecordWithStatement() { scope_contains_with_ = true; }
// Inform the scope that the corresponding code contains an eval call.
- void RecordEvalCall() { if (!is_script_scope()) scope_calls_eval_ = true; }
+ void RecordEvalCall() { scope_calls_eval_ = true; }
// Inform the scope that the corresponding code uses "arguments".
void RecordArgumentsUsage() { scope_uses_arguments_ = true; }
@@ -302,15 +317,15 @@ class Scope: public ZoneObject {
// Specific scope types.
bool is_eval_scope() const { return scope_type_ == EVAL_SCOPE; }
- bool is_function_scope() const {
- return scope_type_ == FUNCTION_SCOPE || scope_type_ == ARROW_SCOPE;
- }
+ bool is_function_scope() const { return scope_type_ == FUNCTION_SCOPE; }
bool is_module_scope() const { return scope_type_ == MODULE_SCOPE; }
bool is_script_scope() const { return scope_type_ == SCRIPT_SCOPE; }
bool is_catch_scope() const { return scope_type_ == CATCH_SCOPE; }
bool is_block_scope() const { return scope_type_ == BLOCK_SCOPE; }
bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
- bool is_arrow_scope() const { return scope_type_ == ARROW_SCOPE; }
+ bool is_arrow_scope() const {
+ return is_function_scope() && IsArrowFunction(function_kind_);
+ }
bool is_declaration_scope() const { return is_declaration_scope_; }
void set_is_declaration_scope() { is_declaration_scope_ = true; }
@@ -328,26 +343,28 @@ class Scope: public ZoneObject {
// Is this scope inside a with statement.
bool inside_with() const { return scope_inside_with_; }
- // Does this scope contain a with statement.
- bool contains_with() const { return scope_contains_with_; }
// Does this scope access "arguments".
bool uses_arguments() const { return scope_uses_arguments_; }
- // Does any inner scope access "arguments".
- bool inner_uses_arguments() const { return inner_scope_uses_arguments_; }
// Does this scope access "super" property (super.foo).
bool uses_super_property() const { return scope_uses_super_property_; }
// Does this scope have the potential to execute declarations non-linearly?
bool is_nonlinear() const { return scope_nonlinear_; }
// Whether this needs to be represented by a runtime context.
- bool NeedsContext() const { return num_heap_slots() > 0; }
+ bool NeedsContext() const {
+ // Catch and module scopes always have heap slots.
+ DCHECK(!is_catch_scope() || num_heap_slots() > 0);
+ DCHECK(!is_module_scope() || num_heap_slots() > 0);
+ return is_with_scope() || num_heap_slots() > 0;
+ }
bool NeedsHomeObject() const {
return scope_uses_super_property_ ||
- (scope_calls_eval_ && (IsConciseMethod(function_kind()) ||
- IsAccessorFunction(function_kind()) ||
- IsClassConstructor(function_kind())));
+ ((scope_calls_eval_ || inner_scope_calls_eval_) &&
+ (IsConciseMethod(function_kind()) ||
+ IsAccessorFunction(function_kind()) ||
+ IsClassConstructor(function_kind())));
}
const Scope* NearestOuterEvalScope() const {
@@ -374,8 +391,6 @@ class Scope: public ZoneObject {
return receiver_;
}
- Variable* LookupThis() { return Lookup(ast_value_factory_->this_string()); }
-
// TODO(wingo): Add a GLOBAL_SCOPE scope type which will lexically allocate
// "this" (and no other variable) on the native context. Script scopes then
// will not have a "this" declaration.
@@ -519,6 +534,10 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
+ // The maximum number of nested contexts required for this scope and any inner
+ // scopes.
+ int MaxNestedContextChainLength();
+
// Find the first function, script, eval or (declaration) block scope. This is
// the scope where var declarations will be hoisted to in the implementation.
Scope* DeclarationScope();
@@ -579,9 +598,7 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Implementation.
- protected:
- friend class ParserFactory;
-
+ private:
// Scope tree.
Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
@@ -657,7 +674,6 @@ class Scope: public ZoneObject {
// Computed via PropagateScopeInfo.
bool outer_scope_calls_sloppy_eval_;
bool inner_scope_calls_eval_;
- bool inner_scope_uses_arguments_;
bool force_eager_compilation_;
bool force_context_allocation_;
@@ -791,7 +807,6 @@ class Scope: public ZoneObject {
MUST_USE_RESULT
bool AllocateVariables(ParseInfo* info, AstNodeFactory* factory);
- private:
// Construct a scope based on the scope info.
Scope(Zone* zone, Scope* inner_scope, ScopeType type,
Handle<ScopeInfo> scope_info, AstValueFactory* value_factory);
@@ -807,6 +822,16 @@ class Scope: public ZoneObject {
}
}
+ void RemoveInnerScope(Scope* inner_scope) {
+ DCHECK_NOT_NULL(inner_scope);
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ if (inner_scopes_[i] == inner_scope) {
+ inner_scopes_.Remove(i);
+ break;
+ }
+ }
+ }
+
void SetDefaults(ScopeType type, Scope* outer_scope,
Handle<ScopeInfo> scope_info,
FunctionKind function_kind = kNormalFunction);
@@ -821,6 +846,7 @@ class Scope: public ZoneObject {
int class_declaration_group_start_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SCOPES_H_
diff --git a/deps/v8/src/small-pointer-list.h b/deps/v8/src/small-pointer-list.h
index 241689e5b2..9ece249064 100644
--- a/deps/v8/src/small-pointer-list.h
+++ b/deps/v8/src/small-pointer-list.h
@@ -170,6 +170,7 @@ class SmallPointerList {
DISALLOW_COPY_AND_ASSIGN(SmallPointerList);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SMALL_POINTER_LIST_H_
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 09cbf93e1e..c69025adca 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -9,10 +9,8 @@
#include "include/libplatform/libplatform.h"
#include "src/assembler.h"
#include "src/base/platform/platform.h"
-#include "src/bootstrapper.h"
#include "src/flags.h"
#include "src/list.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index c923a0f353..c1e2fcde62 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -62,6 +62,7 @@ void ReadNatives();
void DisposeNatives();
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SNAPSHOT_NATIVES_H_
diff --git a/deps/v8/src/snapshot/serialize.cc b/deps/v8/src/snapshot/serialize.cc
index fde170d0bf..4ccadd256d 100644
--- a/deps/v8/src/snapshot/serialize.cc
+++ b/deps/v8/src/snapshot/serialize.cc
@@ -129,8 +129,12 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"double_constants.minus_one_half");
Add(ExternalReference::stress_deopt_count(isolate).address(),
"Isolate::stress_deopt_count_address()");
- Add(ExternalReference::vector_store_virtual_register(isolate).address(),
- "Isolate::vector_store_virtual_register()");
+ Add(ExternalReference::virtual_handler_register(isolate).address(),
+ "Isolate::virtual_handler_register()");
+ Add(ExternalReference::virtual_slot_register(isolate).address(),
+ "Isolate::virtual_slot_register()");
+ Add(ExternalReference::runtime_function_table_address(isolate).address(),
+ "Runtime::runtime_function_table_address()");
// Debug addresses
Add(ExternalReference::debug_after_break_target_address(isolate).address(),
@@ -350,31 +354,6 @@ const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
}
-RootIndexMap::RootIndexMap(Isolate* isolate) {
- map_ = isolate->root_index_map();
- if (map_ != NULL) return;
- map_ = new HashMap(HashMap::PointersMatch);
- for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
- Object* root = isolate->heap()->root(root_index);
- // Omit root entries that can be written after initialization. They must
- // not be referenced through the root list in the snapshot.
- if (root->IsHeapObject() &&
- isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
- HeapObject* heap_object = HeapObject::cast(root);
- HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
- if (entry != NULL) {
- // Some are initialized to a previous value in the root list.
- DCHECK_LT(GetValue(entry), i);
- } else {
- SetValue(LookupEntry(map_, heap_object, true), i);
- }
- }
- }
- isolate->set_root_index_map(map_);
-}
-
-
class CodeAddressMap: public CodeEventLogger {
public:
explicit CodeAddressMap(Isolate* isolate)
@@ -382,18 +361,17 @@ class CodeAddressMap: public CodeEventLogger {
isolate->logger()->addCodeEventListener(this);
}
- virtual ~CodeAddressMap() {
+ ~CodeAddressMap() override {
isolate_->logger()->removeCodeEventListener(this);
}
- virtual void CodeMoveEvent(Address from, Address to) {
+ void CodeMoveEvent(Address from, Address to) override {
address_to_name_map_.Move(from, to);
}
- virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
- }
+ void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) override {}
- virtual void CodeDeleteEvent(Address from) {
+ void CodeDeleteEvent(Address from) override {
address_to_name_map_.Remove(from);
}
@@ -473,10 +451,8 @@ class CodeAddressMap: public CodeEventLogger {
DISALLOW_COPY_AND_ASSIGN(NameMap);
};
- virtual void LogRecordedBuffer(Code* code,
- SharedFunctionInfo*,
- const char* name,
- int length) {
+ void LogRecordedBuffer(Code* code, SharedFunctionInfo*, const char* name,
+ int length) override {
address_to_name_map_.Insert(code->address(), name, length);
}
@@ -707,7 +683,7 @@ class StringTableInsertionKey : public HashTableKey {
return String::cast(key)->Hash();
}
- MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate) override {
+ MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
return handle(string_, isolate);
}
@@ -1397,21 +1373,21 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
code_has_been_output_(false) {}
void Serialize();
void SerializeDeferred();
- void VisitPointers(Object** start, Object** end);
- void VisitEmbeddedPointer(RelocInfo* target);
- void VisitExternalReference(Address* p);
- void VisitExternalReference(RelocInfo* rinfo);
- void VisitInternalReference(RelocInfo* rinfo);
- void VisitCodeTarget(RelocInfo* target);
- void VisitCodeEntry(Address entry_address);
- void VisitCell(RelocInfo* rinfo);
- void VisitRuntimeEntry(RelocInfo* reloc);
+ void VisitPointers(Object** start, Object** end) override;
+ void VisitEmbeddedPointer(RelocInfo* target) override;
+ void VisitExternalReference(Address* p) override;
+ void VisitExternalReference(RelocInfo* rinfo) override;
+ void VisitInternalReference(RelocInfo* rinfo) override;
+ void VisitCodeTarget(RelocInfo* target) override;
+ void VisitCodeEntry(Address entry_address) override;
+ void VisitCell(RelocInfo* rinfo) override;
+ void VisitRuntimeEntry(RelocInfo* reloc) override;
// Used for seralizing the external strings that hold the natives source.
void VisitExternalOneByteString(
- v8::String::ExternalOneByteStringResource** resource);
+ v8::String::ExternalOneByteStringResource** resource) override;
// We can't serialize a heap with external two byte strings.
void VisitExternalTwoByteString(
- v8::String::ExternalStringResource** resource) {
+ v8::String::ExternalStringResource** resource) override {
UNREACHABLE();
}
@@ -1504,7 +1480,6 @@ void PartialSerializer::Serialize(Object** o) {
context->set(Context::NEXT_CONTEXT_LINK,
isolate_->heap()->undefined_value());
DCHECK(!context->global_object()->IsUndefined());
- DCHECK(!context->builtins()->IsUndefined());
}
}
VisitPointer(o);
@@ -2425,7 +2400,7 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
// Serialize code object.
SnapshotByteSink sink(info->code()->CodeSize() * 2);
- CodeSerializer cs(isolate, &sink, *source, info->code());
+ CodeSerializer cs(isolate, &sink, *source);
DisallowHeapAllocation no_gc;
Object** location = Handle<Object>::cast(info).location();
cs.VisitPointer(location);
@@ -2479,14 +2454,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
return;
case Code::FUNCTION:
DCHECK(code_object->has_reloc_info_for_serialization());
- // Only serialize the code for the toplevel function unless specified
- // by flag. Replace code of inner functions by the lazy compile builtin.
- // This is safe, as checked in Compiler::GetSharedFunctionInfo.
- if (code_object != main_code_ && !FLAG_serialize_inner) {
- SerializeBuiltin(Builtins::kCompileLazy, how_to_code, where_to_point);
- } else {
- SerializeGeneric(code_object, how_to_code, where_to_point);
- }
+ SerializeGeneric(code_object, how_to_code, where_to_point);
return;
case Code::WASM_FUNCTION:
UNREACHABLE();
@@ -2497,7 +2465,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
- CHECK(!obj->IsJSGlobalProxy() && !obj->IsGlobalObject());
+ CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
// There should be no hash table embedded. They would require rehashing.
CHECK(!obj->IsHashTable());
// We expect no instantiated function objects or contexts.
diff --git a/deps/v8/src/snapshot/serialize.h b/deps/v8/src/snapshot/serialize.h
index d5374a28e0..f68ad3739a 100644
--- a/deps/v8/src/snapshot/serialize.h
+++ b/deps/v8/src/snapshot/serialize.h
@@ -5,7 +5,7 @@
#ifndef V8_SNAPSHOT_SERIALIZE_H_
#define V8_SNAPSHOT_SERIALIZE_H_
-#include "src/hashmap.h"
+#include "src/address-map.h"
#include "src/heap/heap.h"
#include "src/objects.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -70,54 +70,6 @@ class ExternalReferenceEncoder {
};
-class AddressMapBase {
- protected:
- static void SetValue(HashMap::Entry* entry, uint32_t v) {
- entry->value = reinterpret_cast<void*>(v);
- }
-
- static uint32_t GetValue(HashMap::Entry* entry) {
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
- }
-
- inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
- bool insert) {
- if (insert) {
- map->LookupOrInsert(Key(obj), Hash(obj));
- }
- return map->Lookup(Key(obj), Hash(obj));
- }
-
- private:
- static uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
- }
-
- static void* Key(HeapObject* obj) {
- return reinterpret_cast<void*>(obj->address());
- }
-};
-
-
-class RootIndexMap : public AddressMapBase {
- public:
- explicit RootIndexMap(Isolate* isolate);
-
- static const int kInvalidRootIndex = -1;
-
- int Lookup(HeapObject* obj) {
- HashMap::Entry* entry = LookupEntry(map_, obj, false);
- if (entry) return GetValue(entry);
- return kInvalidRootIndex;
- }
-
- private:
- HashMap* map_;
-
- DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
-};
-
-
class PartialCacheIndexMap : public AddressMapBase {
public:
PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
@@ -140,125 +92,6 @@ class PartialCacheIndexMap : public AddressMapBase {
};
-class BackReference {
- public:
- explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
-
- BackReference() : bitfield_(kInvalidValue) {}
-
- static BackReference SourceReference() { return BackReference(kSourceValue); }
-
- static BackReference GlobalProxyReference() {
- return BackReference(kGlobalProxyValue);
- }
-
- static BackReference LargeObjectReference(uint32_t index) {
- return BackReference(SpaceBits::encode(LO_SPACE) |
- ChunkOffsetBits::encode(index));
- }
-
- static BackReference DummyReference() { return BackReference(kDummyValue); }
-
- static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
- uint32_t chunk_offset) {
- DCHECK(IsAligned(chunk_offset, kObjectAlignment));
- DCHECK_NE(LO_SPACE, space);
- return BackReference(
- SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
- ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
- }
-
- bool is_valid() const { return bitfield_ != kInvalidValue; }
- bool is_source() const { return bitfield_ == kSourceValue; }
- bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
-
- AllocationSpace space() const {
- DCHECK(is_valid());
- return SpaceBits::decode(bitfield_);
- }
-
- uint32_t chunk_offset() const {
- DCHECK(is_valid());
- return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
- }
-
- uint32_t large_object_index() const {
- DCHECK(is_valid());
- DCHECK(chunk_index() == 0);
- return ChunkOffsetBits::decode(bitfield_);
- }
-
- uint32_t chunk_index() const {
- DCHECK(is_valid());
- return ChunkIndexBits::decode(bitfield_);
- }
-
- uint32_t reference() const {
- DCHECK(is_valid());
- return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
- }
-
- uint32_t bitfield() const { return bitfield_; }
-
- private:
- static const uint32_t kInvalidValue = 0xFFFFFFFF;
- static const uint32_t kSourceValue = 0xFFFFFFFE;
- static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
- static const uint32_t kDummyValue = 0xFFFFFFFC;
- static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
- static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
-
- public:
- static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
-
- private:
- class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
- class ChunkIndexBits
- : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
- class SpaceBits
- : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
- };
-
- uint32_t bitfield_;
-};
-
-
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class BackReferenceMap : public AddressMapBase {
- public:
- BackReferenceMap()
- : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
-
- ~BackReferenceMap() { delete map_; }
-
- BackReference Lookup(HeapObject* obj) {
- HashMap::Entry* entry = LookupEntry(map_, obj, false);
- return entry ? BackReference(GetValue(entry)) : BackReference();
- }
-
- void Add(HeapObject* obj, BackReference b) {
- DCHECK(b.is_valid());
- DCHECK_NULL(LookupEntry(map_, obj, false));
- HashMap::Entry* entry = LookupEntry(map_, obj, true);
- SetValue(entry, b.bitfield());
- }
-
- void AddSourceString(String* string) {
- Add(string, BackReference::SourceReference());
- }
-
- void AddGlobalProxy(HeapObject* global_proxy) {
- Add(global_proxy, BackReference::GlobalProxyReference());
- }
-
- private:
- DisallowHeapAllocation no_allocation_;
- HashMap* map_;
- DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
-};
-
-
class HotObjectsList {
public:
HotObjectsList() : index_(0) {
@@ -534,7 +367,7 @@ class Deserializer: public SerializerDeserializer {
DecodeReservation(data->Reservations());
}
- virtual ~Deserializer();
+ ~Deserializer() override;
// Deserialize the snapshot into an empty heap.
void Deserialize(Isolate* isolate);
@@ -554,11 +387,9 @@ class Deserializer: public SerializerDeserializer {
}
private:
- virtual void VisitPointers(Object** start, Object** end);
+ void VisitPointers(Object** start, Object** end) override;
- virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
- UNREACHABLE();
- }
+ void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
void Initialize(Isolate* isolate);
@@ -645,7 +476,7 @@ class CodeAddressMap;
class Serializer : public SerializerDeserializer {
public:
Serializer(Isolate* isolate, SnapshotByteSink* sink);
- ~Serializer();
+ ~Serializer() override;
void VisitPointers(Object** start, Object** end) override;
void EncodeReservations(List<SerializedData::Reservation>* out) const;
@@ -792,12 +623,12 @@ class PartialSerializer : public Serializer {
InitializeCodeAddressMap();
}
- ~PartialSerializer() { OutputStatistics("PartialSerializer"); }
+ ~PartialSerializer() override { OutputStatistics("PartialSerializer"); }
// Serialize the objects reachable from a single object pointer.
void Serialize(Object** o);
- virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
private:
int PartialSnapshotCacheIndex(HeapObject* o);
@@ -816,7 +647,7 @@ class PartialSerializer : public Serializer {
class StartupSerializer : public Serializer {
public:
StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
- ~StartupSerializer() { OutputStatistics("StartupSerializer"); }
+ ~StartupSerializer() override { OutputStatistics("StartupSerializer"); }
// The StartupSerializer has to serialize the root array, which is slightly
// different.
@@ -827,8 +658,8 @@ class StartupSerializer : public Serializer {
// 2) Partial snapshot cache.
// 3) Weak references (e.g. the string table).
virtual void SerializeStrongReferences();
- virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
void SerializeWeakReferencesAndDeferred();
void Serialize() {
SerializeStrongReferences();
@@ -863,16 +694,15 @@ class CodeSerializer : public Serializer {
const List<uint32_t>* stub_keys() const { return &stub_keys_; }
private:
- CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source,
- Code* main_code)
- : Serializer(isolate, sink), source_(source), main_code_(main_code) {
+ CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
+ : Serializer(isolate, sink), source_(source) {
back_reference_map_.AddSourceString(source);
}
- ~CodeSerializer() { OutputStatistics("CodeSerializer"); }
+ ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
- virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
WhereToPoint where_to_point);
@@ -886,7 +716,6 @@ class CodeSerializer : public Serializer {
DisallowHeapAllocation no_gc_;
String* source_;
- Code* main_code_;
List<uint32_t> stub_keys_;
DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
@@ -989,6 +818,7 @@ class SerializedCodeData : public SerializedData {
static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
static const int kHeaderSize = kChecksum2Offset + kInt32Size;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SNAPSHOT_SERIALIZE_H_
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index e0290c9415..af617ccee1 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -101,7 +101,7 @@ class SnapshotByteSink {
List<byte> data_;
};
-} // namespace v8::internal
+} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_SNAPSHOT_SOURCE_SINK_H_
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 1379644fd8..f0b90bbacd 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -89,6 +89,7 @@ class Snapshot : public AllStatic {
void SetSnapshotFromFile(StartupData* snapshot_blob);
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SNAPSHOT_SNAPSHOT_H_
diff --git a/deps/v8/src/splay-tree-inl.h b/deps/v8/src/splay-tree-inl.h
index 6c7b4f404c..1adfdac6db 100644
--- a/deps/v8/src/splay-tree-inl.h
+++ b/deps/v8/src/splay-tree-inl.h
@@ -290,6 +290,7 @@ void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SPLAY_TREE_INL_H_
diff --git a/deps/v8/src/splay-tree.h b/deps/v8/src/splay-tree.h
index 30e5d6787f..bee8429e39 100644
--- a/deps/v8/src/splay-tree.h
+++ b/deps/v8/src/splay-tree.h
@@ -198,6 +198,7 @@ class SplayTree {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SPLAY_TREE_H_
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/string-builder.cc
index e24def6b68..6c5144d574 100644
--- a/deps/v8/src/string-builder.cc
+++ b/deps/v8/src/string-builder.cc
@@ -52,7 +52,7 @@ IncrementalStringBuilder::IncrementalStringBuilder(Isolate* isolate)
part_length_(kInitialPartLength),
current_index_(0) {
// Create an accumulator handle starting with the empty string.
- accumulator_ = Handle<String>(isolate->heap()->empty_string(), isolate);
+ accumulator_ = Handle<String>::New(isolate->heap()->empty_string(), isolate);
current_part_ =
factory()->NewRawOneByteString(part_length_).ToHandleChecked();
}
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index 554277dab1..98bd82b97a 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -346,10 +346,12 @@ class IncrementalStringBuilder {
DCHECK(string->length() >= required_length);
}
- ~NoExtendString() {
+ Handle<String> Finalize() {
Handle<SeqString> string = Handle<SeqString>::cast(string_);
int length = NoExtend<DestChar>::written();
- *string_.location() = *SeqString::Truncate(string, length);
+ Handle<String> result = SeqString::Truncate(string, length);
+ string_ = Handle<String>();
+ return result;
}
private:
@@ -429,7 +431,7 @@ void IncrementalStringBuilder::Append(SrcChar c) {
}
if (current_index_ == part_length_) Extend();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STRING_BUILDER_H_
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index c0cc2cad4b..7db09934f5 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -563,6 +563,7 @@ int SearchString(Isolate* isolate,
return search.Search(subject, start_index);
}
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STRING_SEARCH_H_
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index 2f11b182d0..03ea0620ad 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -178,6 +178,7 @@ class StringStream final {
DISALLOW_IMPLICIT_CONSTRUCTORS(StringStream);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STRING_STREAM_H_
diff --git a/deps/v8/src/strtod.h b/deps/v8/src/strtod.h
index 737b5484c5..75e60b029e 100644
--- a/deps/v8/src/strtod.h
+++ b/deps/v8/src/strtod.h
@@ -14,6 +14,7 @@ namespace internal {
// contain a dot or a sign. It must not start with '0', and must not be empty.
double Strtod(Vector<const char> buffer, int exponent);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STRTOD_H_
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.cc b/deps/v8/src/third_party/fdlibm/fdlibm.cc
index 1d49de0248..0ef2301ae3 100644
--- a/deps/v8/src/third_party/fdlibm/fdlibm.cc
+++ b/deps/v8/src/third_party/fdlibm/fdlibm.cc
@@ -29,75 +29,6 @@ namespace fdlibm {
inline double scalbn(double x, int y) { return _scalb(x, y); }
#endif // _MSC_VER
-const double MathConstants::constants[] = {
- 6.36619772367581382433e-01, // invpio2 0
- 1.57079632673412561417e+00, // pio2_1 1
- 6.07710050650619224932e-11, // pio2_1t 2
- 6.07710050630396597660e-11, // pio2_2 3
- 2.02226624879595063154e-21, // pio2_2t 4
- 2.02226624871116645580e-21, // pio2_3 5
- 8.47842766036889956997e-32, // pio2_3t 6
- -1.66666666666666324348e-01, // S1 7 coefficients for sin
- 8.33333333332248946124e-03, // 8
- -1.98412698298579493134e-04, // 9
- 2.75573137070700676789e-06, // 10
- -2.50507602534068634195e-08, // 11
- 1.58969099521155010221e-10, // S6 12
- 4.16666666666666019037e-02, // C1 13 coefficients for cos
- -1.38888888888741095749e-03, // 14
- 2.48015872894767294178e-05, // 15
- -2.75573143513906633035e-07, // 16
- 2.08757232129817482790e-09, // 17
- -1.13596475577881948265e-11, // C6 18
- 3.33333333333334091986e-01, // T0 19 coefficients for tan
- 1.33333333333201242699e-01, // 20
- 5.39682539762260521377e-02, // 21
- 2.18694882948595424599e-02, // 22
- 8.86323982359930005737e-03, // 23
- 3.59207910759131235356e-03, // 24
- 1.45620945432529025516e-03, // 25
- 5.88041240820264096874e-04, // 26
- 2.46463134818469906812e-04, // 27
- 7.81794442939557092300e-05, // 28
- 7.14072491382608190305e-05, // 29
- -1.85586374855275456654e-05, // 30
- 2.59073051863633712884e-05, // T12 31
- 7.85398163397448278999e-01, // pio4 32
- 3.06161699786838301793e-17, // pio4lo 33
- 6.93147180369123816490e-01, // ln2_hi 34
- 1.90821492927058770002e-10, // ln2_lo 35
- 6.666666666666666666e-01, // 2/3 36
- 6.666666666666735130e-01, // LP1 37 coefficients for log1p
- 3.999999999940941908e-01, // 38
- 2.857142874366239149e-01, // 39
- 2.222219843214978396e-01, // 40
- 1.818357216161805012e-01, // 41
- 1.531383769920937332e-01, // 42
- 1.479819860511658591e-01, // LP7 43
- 7.09782712893383973096e+02, // 44 overflow threshold for expm1
- 1.44269504088896338700e+00, // 1/ln2 45
- -3.33333333333331316428e-02, // Q1 46 coefficients for expm1
- 1.58730158725481460165e-03, // 47
- -7.93650757867487942473e-05, // 48
- 4.00821782732936239552e-06, // 49
- -2.01099218183624371326e-07, // Q5 50
- 710.4758600739439, // 51 overflow threshold sinh, cosh
- 4.34294481903251816668e-01, // ivln10 52 coefficients for log10
- 3.01029995663611771306e-01, // log10_2hi 53
- 3.69423907715893078616e-13, // log10_2lo 54
- 5.99999999999994648725e-01, // L1 55 coefficients for log2
- 4.28571428578550184252e-01, // 56
- 3.33333329818377432918e-01, // 57
- 2.72728123808534006489e-01, // 58
- 2.30660745775561754067e-01, // 59
- 2.06975017800338417784e-01, // L6 60
- 9.61796693925975554329e-01, // cp 61 2/(3*ln(2))
- 9.61796700954437255859e-01, // cp_h 62
- -7.02846165095275826516e-09, // cp_l 63
- 5.84962487220764160156e-01, // dp_h 64
- 1.35003920212974897128e-08 // dp_l 65
-};
-
// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
static const int two_over_pi[] = {
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.h b/deps/v8/src/third_party/fdlibm/fdlibm.h
index c7bc09a1b8..e417c8ce59 100644
--- a/deps/v8/src/third_party/fdlibm/fdlibm.h
+++ b/deps/v8/src/third_party/fdlibm/fdlibm.h
@@ -21,11 +21,7 @@ namespace fdlibm {
int rempio2(double x, double* y);
-// Constants to be exposed to builtins via Float64Array.
-struct MathConstants {
- static const double constants[66];
-};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FDLIBM_H_
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.js b/deps/v8/src/third_party/fdlibm/fdlibm.js
index a8935565b7..a5e789f38a 100644
--- a/deps/v8/src/third_party/fdlibm/fdlibm.js
+++ b/deps/v8/src/third_party/fdlibm/fdlibm.js
@@ -16,16 +16,9 @@
// The following is a straightforward translation of fdlibm routines
// by Raymond Toy (rtoy@google.com).
-// Double constants that do not have empty lower 32 bits are found in fdlibm.cc
-// and exposed through kMath as typed array. We assume the compiler to convert
-// from decimal to binary accurately enough to produce the intended values.
-// kMath is initialized to a Float64Array during genesis and not writable.
// rempio2result is used as a container for return values of %RemPiO2. It is
// initialized to a two-element Float64Array during genesis.
-var kMath;
-var rempio2result;
-
(function(global, utils) {
"use strict";
@@ -35,27 +28,33 @@ var rempio2result;
// -------------------------------------------------------------------
// Imports
+var GlobalFloat64Array = global.Float64Array;
var GlobalMath = global.Math;
-
var MathAbs;
var MathExp;
+var NaN = %GetRootNaN();
+var rempio2result;
utils.Import(function(from) {
MathAbs = from.MathAbs;
MathExp = from.MathExp;
});
+utils.CreateDoubleResultArray = function(global) {
+ rempio2result = new GlobalFloat64Array(2);
+};
+
// -------------------------------------------------------------------
-define INVPIO2 = kMath[0];
-define PIO2_1 = kMath[1];
-define PIO2_1T = kMath[2];
-define PIO2_2 = kMath[3];
-define PIO2_2T = kMath[4];
-define PIO2_3 = kMath[5];
-define PIO2_3T = kMath[6];
-define PIO4 = kMath[32];
-define PIO4LO = kMath[33];
+define INVPIO2 = 6.36619772367581382433e-01;
+define PIO2_1 = 1.57079632673412561417;
+define PIO2_1T = 6.07710050650619224932e-11;
+define PIO2_2 = 6.07710050630396597660e-11;
+define PIO2_2T = 2.02226624879595063154e-21;
+define PIO2_3 = 2.02226624871116645580e-21;
+define PIO2_3T = 8.47842766036889956997e-32;
+define PIO4 = 7.85398163397448278999e-01;
+define PIO4LO = 3.06161699786838301793e-17;
// Compute k and r such that x - k*pi/2 = r where |r| < pi/4. For
// precision, r is returned as two values y0 and y1 such that r = y0 + y1
@@ -267,9 +266,19 @@ endmacro
// Set returnTan to 1 for tan; -1 for cot. Anything else is illegal
// and will cause incorrect results.
//
-macro KTAN(x)
-kMath[19+x]
-endmacro
+define T00 = 3.33333333333334091986e-01;
+define T01 = 1.33333333333201242699e-01;
+define T02 = 5.39682539762260521377e-02;
+define T03 = 2.18694882948595424599e-02;
+define T04 = 8.86323982359930005737e-03;
+define T05 = 3.59207910759131235356e-03;
+define T06 = 1.45620945432529025516e-03;
+define T07 = 5.88041240820264096874e-04;
+define T08 = 2.46463134818469906812e-04;
+define T09 = 7.81794442939557092300e-05;
+define T10 = 7.14072491382608190305e-05;
+define T11 = -1.85586374855275456654e-05;
+define T12 = 2.59073051863633712884e-05;
function KernelTan(x, y, returnTan) {
var z;
@@ -312,13 +321,13 @@ function KernelTan(x, y, returnTan) {
// Break x^5 * (T1 + x^2*T2 + ...) into
// x^5 * (T1 + x^4*T3 + ... + x^20*T11) +
// x^5 * (x^2 * (T2 + x^4*T4 + ... + x^22*T12))
- var r = KTAN(1) + w * (KTAN(3) + w * (KTAN(5) +
- w * (KTAN(7) + w * (KTAN(9) + w * KTAN(11)))));
- var v = z * (KTAN(2) + w * (KTAN(4) + w * (KTAN(6) +
- w * (KTAN(8) + w * (KTAN(10) + w * KTAN(12))))));
+ var r = T01 + w * (T03 + w * (T05 +
+ w * (T07 + w * (T09 + w * T11))));
+ var v = z * (T02 + w * (T04 + w * (T06 +
+ w * (T08 + w * (T10 + w * T12)))));
var s = z * x;
r = y + z * (s * (r + v) + y);
- r = r + KTAN(0) * s;
+ r = r + T00 * s;
w = x + r;
if (ix >= 0x3fe59428) {
return (1 - ((hx >> 30) & 2)) *
@@ -451,12 +460,17 @@ function MathTan(x) {
//
// See HP-15C Advanced Functions Handbook, p.193.
//
-define LN2_HI = kMath[34];
-define LN2_LO = kMath[35];
-define TWO_THIRD = kMath[36];
-macro KLOG1P(x)
-(kMath[37+x])
-endmacro
+define LN2_HI = 6.93147180369123816490e-01;
+define LN2_LO = 1.90821492927058770002e-10;
+define TWO_THIRD = 6.666666666666666666e-01;
+define LP1 = 6.666666666666735130e-01;
+define LP2 = 3.999999999940941908e-01;
+define LP3 = 2.857142874366239149e-01;
+define LP4 = 2.222219843214978396e-01;
+define LP5 = 1.818357216161805012e-01;
+define LP6 = 1.531383769920937332e-01;
+define LP7 = 1.479819860511658591e-01;
+
// 2^54
define TWO54 = 18014398509481984;
@@ -476,7 +490,7 @@ function MathLog1p(x) {
if (x === -1) {
return -INFINITY; // log1p(-1) = -inf
} else {
- return NAN; // log1p(x<-1) = NaN
+ return NaN; // log1p(x<-1) = NaN
}
} else if (ax < 0x3c900000) {
// For |x| < 2^-54 we can return x.
@@ -492,7 +506,7 @@ function MathLog1p(x) {
}
}
- // Handle Infinity and NAN
+ // Handle Infinity and NaN
if (hx >= 0x7ff00000) return x;
if (k !== 0) {
@@ -538,9 +552,8 @@ function MathLog1p(x) {
var s = f / (2 + f);
var z = s * s;
- var R = z * (KLOG1P(0) + z * (KLOG1P(1) + z *
- (KLOG1P(2) + z * (KLOG1P(3) + z *
- (KLOG1P(4) + z * (KLOG1P(5) + z * KLOG1P(6)))))));
+ var R = z * (LP1 + z * (LP2 + z * (LP3 + z * (LP4 +
+ z * (LP5 + z * (LP6 + z * LP7))))));
if (k === 0) {
return f - (hfsq - s * (hfsq + R));
} else {
@@ -637,11 +650,13 @@ function MathLog1p(x) {
// For IEEE double
// if x > 7.09782712893383973096e+02 then expm1(x) overflow
//
-define KEXPM1_OVERFLOW = kMath[44];
-define INVLN2 = kMath[45];
-macro KEXPM1(x)
-(kMath[46+x])
-endmacro
+define KEXPM1_OVERFLOW = 7.09782712893383973096e+02;
+define INVLN2 = 1.44269504088896338700;
+define EXPM1_1 = -3.33333333333331316428e-02;
+define EXPM1_2 = 1.58730158725481460165e-03;
+define EXPM1_3 = -7.93650757867487942473e-05;
+define EXPM1_4 = 4.00821782732936239552e-06;
+define EXPM1_5 = -2.01099218183624371326e-07;
function MathExpm1(x) {
x = x * 1; // Convert to number.
@@ -701,8 +716,8 @@ function MathExpm1(x) {
// x is now in primary range
var hfx = 0.5 * x;
var hxs = x * hfx;
- var r1 = 1 + hxs * (KEXPM1(0) + hxs * (KEXPM1(1) + hxs *
- (KEXPM1(2) + hxs * (KEXPM1(3) + hxs * KEXPM1(4)))));
+ var r1 = 1 + hxs * (EXPM1_1 + hxs * (EXPM1_2 + hxs *
+ (EXPM1_3 + hxs * (EXPM1_4 + hxs * EXPM1_5))));
t = 3 - r1 * hfx;
var e = hxs * ((r1 - t) / (6 - x * t));
if (k === 0) { // c is 0
@@ -760,7 +775,7 @@ function MathExpm1(x) {
// sinh(x) is |x| if x is +Infinity, -Infinity, or NaN.
// only sinh(0)=0 is exact for finite x.
//
-define KSINH_OVERFLOW = kMath[51];
+define KSINH_OVERFLOW = 710.4758600739439;
define TWO_M28 = 3.725290298461914e-9; // 2^-28, empty lower half
define LOG_MAXD = 709.7822265625; // 0x40862e42 00000000, empty lower half
@@ -812,7 +827,7 @@ function MathSinh(x) {
// cosh(x) is |x| if x is +INF, -INF, or NaN.
// only cosh(0)=1 is exact for finite x.
//
-define KCOSH_OVERFLOW = kMath[51];
+define KCOSH_OVERFLOW = 710.4758600739439;
function MathCosh(x) {
x = x * 1; // Convert to number.
@@ -843,6 +858,63 @@ function MathCosh(x) {
return INFINITY;
}
+// ES6 draft 09-27-13, section 20.2.2.33.
+// Math.tanh(x)
+// Method :
+// x -x
+// e - e
+// 0. tanh(x) is defined to be -----------
+// x -x
+// e + e
+// 1. reduce x to non-negative by tanh(-x) = -tanh(x).
+// 2. 0 <= x <= 2**-55 : tanh(x) := x*(one+x)
+// -t
+// 2**-55 < x <= 1 : tanh(x) := -----; t = expm1(-2x)
+// t + 2
+// 2
+// 1 <= x <= 22.0 : tanh(x) := 1- ----- ; t = expm1(2x)
+// t + 2
+// 22.0 < x <= INF : tanh(x) := 1.
+//
+// Special cases:
+// tanh(NaN) is NaN;
+// only tanh(0) = 0 is exact for finite argument.
+//
+
+define TWO_M55 = 2.77555756156289135105e-17; // 2^-55, empty lower half
+
+function MathTanh(x) {
+ x = x * 1; // Convert to number.
+ // x is Infinity or NaN
+ if (!NUMBER_IS_FINITE(x)) {
+ if (x > 0) return 1;
+ if (x < 0) return -1;
+ return x;
+ }
+
+ var ax = MathAbs(x);
+ var z;
+ // |x| < 22
+ if (ax < 22) {
+ if (ax < TWO_M55) {
+ // |x| < 2^-55, tanh(small) = small.
+ return x;
+ }
+ if (ax >= 1) {
+ // |x| >= 1
+ var t = MathExpm1(2 * ax);
+ z = 1 - 2 / (t + 2);
+ } else {
+ var t = MathExpm1(-2 * ax);
+ z = -t / (t + 2);
+ }
+ } else {
+ // |x| > 22, return +/- 1
+ z = 1;
+ }
+ return (x >= 0) ? z : -z;
+}
+
// ES6 draft 09-27-13, section 20.2.2.21.
// Return the base 10 logarithm of x
//
@@ -870,9 +942,9 @@ function MathCosh(x) {
// log10(10**N) = N for N=0,1,...,22.
//
-define IVLN10 = kMath[52];
-define LOG10_2HI = kMath[53];
-define LOG10_2LO = kMath[54];
+define IVLN10 = 4.34294481903251816668e-01;
+define LOG10_2HI = 3.01029995663611771306e-01;
+define LOG10_2LO = 3.69423907715893078616e-13;
function MathLog10(x) {
x = x * 1; // Convert to number.
@@ -885,7 +957,7 @@ function MathLog10(x) {
// log10(+/- 0) = -Infinity.
if (((hx & 0x7fffffff) | lx) === 0) return -INFINITY;
// log10 of negative number is NaN.
- if (hx < 0) return NAN;
+ if (hx < 0) return NaN;
// Subnormal number. Scale up x.
k -= 54;
x *= TWO54;
@@ -920,18 +992,21 @@ function MathLog10(x) {
// log2(x) = w1 + w2
// where w1 has 53-24 = 29 bits of trailing zeroes.
-define DP_H = kMath[64];
-define DP_L = kMath[65];
+define DP_H = 5.84962487220764160156e-01;
+define DP_L = 1.35003920212974897128e-08;
// Polynomial coefficients for (3/2)*(log2(x) - 2*s - 2/3*s^3)
-macro KLOG2(x)
-(kMath[55+x])
-endmacro
+define LOG2_1 = 5.99999999999994648725e-01;
+define LOG2_2 = 4.28571428578550184252e-01;
+define LOG2_3 = 3.33333329818377432918e-01;
+define LOG2_4 = 2.72728123808534006489e-01;
+define LOG2_5 = 2.30660745775561754067e-01;
+define LOG2_6 = 2.06975017800338417784e-01;
// cp = 2/(3*ln(2)). Note that cp_h + cp_l is cp, but with more accuracy.
-define CP = kMath[61];
-define CP_H = kMath[62];
-define CP_L = kMath[63];
+define CP = 9.61796693925975554329e-01;
+define CP_H = 9.61796700954437255859e-01;
+define CP_L = -7.02846165095275826516e-09;
// 2^53
define TWO53 = 9007199254740992;
@@ -947,7 +1022,7 @@ function MathLog2(x) {
if ((ix | lx) == 0) return -INFINITY;
// log(x) = NaN, if x < 0
- if (hx < 0) return NAN;
+ if (hx < 0) return NaN;
// log2(Infinity) = Infinity, log2(NaN) = NaN
if (ix >= 0x7ff00000) return x;
@@ -996,8 +1071,8 @@ function MathLog2(x) {
// Compute log2(ax)
var s2 = ss * ss;
- var r = s2 * s2 * (KLOG2(0) + s2 * (KLOG2(1) + s2 * (KLOG2(2) + s2 * (
- KLOG2(3) + s2 * (KLOG2(4) + s2 * KLOG2(5))))));
+ var r = s2 * s2 * (LOG2_1 + s2 * (LOG2_2 + s2 * (LOG2_3 + s2 * (
+ LOG2_4 + s2 * (LOG2_5 + s2 * LOG2_6)))));
r += s_l * (s_h + ss);
s2 = s_h * s_h;
t_h = %_ConstructDouble(%_DoubleHi(3.0 + s2 + r), 0);
@@ -1007,10 +1082,10 @@ function MathLog2(x) {
v = s_l * t_h + t_l * ss;
// 2 / (3 * log(2)) * (ss + ...)
- p_h = %_ConstructDouble(%_DoubleHi(u + v), 0);
- p_l = v - (p_h - u);
- z_h = CP_H * p_h;
- z_l = CP_L * p_h + p_l * CP + dp_l;
+ var p_h = %_ConstructDouble(%_DoubleHi(u + v), 0);
+ var p_l = v - (p_h - u);
+ var z_h = CP_H * p_h;
+ var z_l = CP_L * p_h + p_l * CP + dp_l;
// log2(ax) = (ss + ...) * 2 / (3 * log(2)) = n + dp_h + z_h + z_l
var t = n;
@@ -1029,6 +1104,7 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"tan", MathTan,
"sinh", MathSinh,
"cosh", MathCosh,
+ "tanh", MathTanh,
"log10", MathLog10,
"log2", MathLog2,
"log1p", MathLog1p,
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index db9092d21b..2443e84238 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -318,6 +318,7 @@ class Token {
static const char token_type[NUM_TOKENS];
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TOKEN_H_
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index f31eff96ba..a5ec52f667 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -173,6 +173,7 @@ void TransitionArray::SetNumberOfTransitions(int number_of_transitions) {
set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TRANSITIONS_INL_H_
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index b0aab9502e..1fcd3860d0 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -41,6 +41,14 @@ class TransitionArray: public FixedArray {
static Map* SearchTransition(Map* map, PropertyKind kind, Name* name,
PropertyAttributes attributes);
+ static MaybeHandle<Map> SearchTransition(Handle<Map> map, PropertyKind kind,
+ Handle<Name> name,
+ PropertyAttributes attributes) {
+ if (Map* transition = SearchTransition(*map, kind, *name, attributes)) {
+ return handle(transition);
+ }
+ return MaybeHandle<Map>();
+ }
static Map* SearchSpecial(Map* map, Symbol* name);
@@ -302,6 +310,7 @@ class TransitionArray: public FixedArray {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TRANSITIONS_H_
diff --git a/deps/v8/src/type-cache.cc b/deps/v8/src/type-cache.cc
new file mode 100644
index 0000000000..9ed8621487
--- /dev/null
+++ b/deps/v8/src/type-cache.cc
@@ -0,0 +1,24 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/type-cache.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+base::LazyInstance<TypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+
+// static
+TypeCache const& TypeCache::Get() { return kCache.Get(); }
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/type-cache.h b/deps/v8/src/type-cache.h
new file mode 100644
index 0000000000..5eed557f7d
--- /dev/null
+++ b/deps/v8/src/type-cache.h
@@ -0,0 +1,114 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TYPE_CACHE_H_
+#define V8_TYPE_CACHE_H_
+
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+
+class TypeCache final {
+ private:
+ // This has to be first for the initialization magic to work.
+ Zone zone_;
+
+ public:
+ static TypeCache const& Get();
+
+ TypeCache() = default;
+
+ Type* const kInt8 =
+ CreateNative(CreateRange<int8_t>(), Type::UntaggedIntegral8());
+ Type* const kUint8 =
+ CreateNative(CreateRange<uint8_t>(), Type::UntaggedIntegral8());
+ Type* const kUint8Clamped = kUint8;
+ Type* const kInt16 =
+ CreateNative(CreateRange<int16_t>(), Type::UntaggedIntegral16());
+ Type* const kUint16 =
+ CreateNative(CreateRange<uint16_t>(), Type::UntaggedIntegral16());
+ Type* const kInt32 =
+ CreateNative(Type::Signed32(), Type::UntaggedIntegral32());
+ Type* const kUint32 =
+ CreateNative(Type::Unsigned32(), Type::UntaggedIntegral32());
+ Type* const kFloat32 = CreateNative(Type::Number(), Type::UntaggedFloat32());
+ Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
+
+ Type* const kSmi = CreateNative(Type::SignedSmall(), Type::TaggedSigned());
+ Type* const kHeapNumber = CreateNative(Type::Number(), Type::TaggedPointer());
+
+ Type* const kSingletonZero = CreateRange(0.0, 0.0);
+ Type* const kSingletonOne = CreateRange(1.0, 1.0);
+ Type* const kZeroOrOne = CreateRange(0.0, 1.0);
+ Type* const kZeroToThirtyTwo = CreateRange(0.0, 32.0);
+ Type* const kZeroish =
+ Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
+ Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
+ Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
+ Type* const kIntegerOrMinusZero =
+ Type::Union(kInteger, Type::MinusZero(), zone());
+ Type* const kIntegerOrMinusZeroOrNaN =
+ Type::Union(kIntegerOrMinusZero, Type::NaN(), zone());
+
+ Type* const kPositiveSafeInteger = CreateRange(0.0, kMaxSafeInteger);
+
+ Type* const kIntegral32 = Type::Union(kInt32, kUint32, zone());
+
+ // The FixedArray::length property always containts a smi in the range
+ // [0, FixedArray::kMaxLength].
+ Type* const kFixedArrayLengthType = CreateNative(
+ CreateRange(0.0, FixedArray::kMaxLength), Type::TaggedSigned());
+
+ // The FixedDoubleArray::length property always containts a smi in the range
+ // [0, FixedDoubleArray::kMaxLength].
+ Type* const kFixedDoubleArrayLengthType = CreateNative(
+ CreateRange(0.0, FixedDoubleArray::kMaxLength), Type::TaggedSigned());
+
+ // The JSArray::length property always contains a tagged number in the range
+ // [0, kMaxUInt32].
+ Type* const kJSArrayLengthType =
+ CreateNative(Type::Unsigned32(), Type::Tagged());
+
+ // The String::length property always contains a smi in the range
+ // [0, String::kMaxLength].
+ Type* const kStringLengthType =
+ CreateNative(CreateRange(0.0, String::kMaxLength), Type::TaggedSigned());
+
+#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
+ Type* const k##TypeName##Array = CreateArray(k##TypeName);
+ TYPED_ARRAYS(TYPED_ARRAY)
+#undef TYPED_ARRAY
+
+ private:
+ Type* CreateArray(Type* element) { return Type::Array(element, zone()); }
+
+ Type* CreateArrayFunction(Type* array) {
+ Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
+ Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
+ Type* arg3 = arg2;
+ return Type::Function(array, arg1, arg2, arg3, zone());
+ }
+
+ Type* CreateNative(Type* semantic, Type* representation) {
+ return Type::Intersect(semantic, representation, zone());
+ }
+
+ template <typename T>
+ Type* CreateRange() {
+ return CreateRange(std::numeric_limits<T>::min(),
+ std::numeric_limits<T>::max());
+ }
+
+ Type* CreateRange(double min, double max) {
+ return Type::Range(min, max, zone());
+ }
+
+ Zone* zone() { return &zone_; }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TYPE_CACHE_H_
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index 4d1c345e68..fed28b671e 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -10,6 +10,36 @@
namespace v8 {
namespace internal {
+
+template <typename Derived>
+FeedbackVectorSlot FeedbackVectorSpecBase<Derived>::AddSlot(
+ FeedbackVectorSlotKind kind) {
+ Derived* derived = static_cast<Derived*>(this);
+
+ int slot = derived->slots();
+ int entries_per_slot = TypeFeedbackMetadata::GetSlotSize(kind);
+ derived->append(kind);
+ for (int i = 1; i < entries_per_slot; i++) {
+ derived->append(FeedbackVectorSlotKind::INVALID);
+ }
+ return FeedbackVectorSlot(slot);
+}
+
+
+// static
+TypeFeedbackMetadata* TypeFeedbackMetadata::cast(Object* obj) {
+ DCHECK(obj->IsTypeFeedbackVector());
+ return reinterpret_cast<TypeFeedbackMetadata*>(obj);
+}
+
+
+int TypeFeedbackMetadata::slot_count() const {
+ if (length() == 0) return 0;
+ DCHECK(length() > kReservedIndexCount);
+ return Smi::cast(get(kSlotsCountIndex))->value();
+}
+
+
// static
TypeFeedbackVector* TypeFeedbackVector::cast(Object* obj) {
DCHECK(obj->IsTypeFeedbackVector());
@@ -17,9 +47,37 @@ TypeFeedbackVector* TypeFeedbackVector::cast(Object* obj) {
}
-int TypeFeedbackVector::first_ic_slot_index() const {
- DCHECK(length() >= kReservedIndexCount);
- return Smi::cast(get(kFirstICSlotIndex))->value();
+int TypeFeedbackMetadata::GetSlotSize(FeedbackVectorSlotKind kind) {
+ DCHECK_NE(FeedbackVectorSlotKind::INVALID, kind);
+ DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, kind);
+ return kind == FeedbackVectorSlotKind::GENERAL ? 1 : 2;
+}
+
+
+bool TypeFeedbackVector::is_empty() const {
+ if (length() == 0) return true;
+ DCHECK(length() > kReservedIndexCount);
+ return false;
+}
+
+
+int TypeFeedbackVector::slot_count() const {
+ if (length() == 0) return 0;
+ DCHECK(length() > kReservedIndexCount);
+ return length() - kReservedIndexCount;
+}
+
+
+TypeFeedbackMetadata* TypeFeedbackVector::metadata() const {
+ return is_empty() ? TypeFeedbackMetadata::cast(GetHeap()->empty_fixed_array())
+ : TypeFeedbackMetadata::cast(get(kMetadataIndex));
+}
+
+
+FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
+ FeedbackVectorSlot slot) const {
+ DCHECK(!is_empty());
+ return metadata()->GetKind(slot);
}
@@ -52,51 +110,17 @@ void TypeFeedbackVector::change_ic_generic_count(int delta) {
}
-int TypeFeedbackVector::Slots() const {
- if (length() == 0) return 0;
- return Max(
- 0, first_ic_slot_index() - ic_metadata_length() - kReservedIndexCount);
-}
-
-
-int TypeFeedbackVector::ICSlots() const {
- if (length() == 0) return 0;
- return (length() - first_ic_slot_index()) / elements_per_ic_slot();
-}
-
-
-int TypeFeedbackVector::ic_metadata_length() const {
- return VectorICComputer::word_count(ICSlots());
-}
-
-
-// Conversion from a slot or ic slot to an integer index to the underlying
-// array.
int TypeFeedbackVector::GetIndex(FeedbackVectorSlot slot) const {
- DCHECK(slot.ToInt() < first_ic_slot_index());
- return kReservedIndexCount + ic_metadata_length() + slot.ToInt();
-}
-
-
-int TypeFeedbackVector::GetIndex(FeedbackVectorICSlot slot) const {
- int first_ic_slot = first_ic_slot_index();
- DCHECK(slot.ToInt() < ICSlots());
- return first_ic_slot + slot.ToInt() * elements_per_ic_slot();
+ DCHECK(slot.ToInt() < slot_count());
+ return kReservedIndexCount + slot.ToInt();
}
// Conversion from an integer index to either a slot or an ic slot. The caller
// should know what kind she expects.
FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) const {
- DCHECK(index >= kReservedIndexCount && index < first_ic_slot_index());
- return FeedbackVectorSlot(index - ic_metadata_length() - kReservedIndexCount);
-}
-
-
-FeedbackVectorICSlot TypeFeedbackVector::ToICSlot(int index) const {
- DCHECK(index >= first_ic_slot_index() && index < length());
- int ic_slot = (index - first_ic_slot_index()) / elements_per_ic_slot();
- return FeedbackVectorICSlot(ic_slot);
+ DCHECK(index >= kReservedIndexCount && index < length());
+ return FeedbackVectorSlot(index - kReservedIndexCount);
}
@@ -111,17 +135,6 @@ void TypeFeedbackVector::Set(FeedbackVectorSlot slot, Object* value,
}
-Object* TypeFeedbackVector::Get(FeedbackVectorICSlot slot) const {
- return get(GetIndex(slot));
-}
-
-
-void TypeFeedbackVector::Set(FeedbackVectorICSlot slot, Object* value,
- WriteBarrierMode mode) {
- set(GetIndex(slot), value, mode);
-}
-
-
Handle<Object> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->uninitialized_symbol();
}
@@ -137,8 +150,8 @@ Handle<Object> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
}
-Object* TypeFeedbackVector::RawUninitializedSentinel(Heap* heap) {
- return heap->uninitialized_symbol();
+Object* TypeFeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
+ return isolate->heap()->uninitialized_symbol();
}
@@ -146,7 +159,10 @@ Object* FeedbackNexus::GetFeedback() const { return vector()->Get(slot()); }
Object* FeedbackNexus::GetFeedbackExtra() const {
- DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
+#ifdef DEBUG
+ FeedbackVectorSlotKind kind = vector()->GetKind(slot());
+ DCHECK_LT(1, TypeFeedbackMetadata::GetSlotSize(kind));
+#endif
int extra_index = vector()->GetIndex(slot()) + 1;
return vector()->get(extra_index);
}
@@ -159,14 +175,17 @@ void FeedbackNexus::SetFeedback(Object* feedback, WriteBarrierMode mode) {
void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
WriteBarrierMode mode) {
- DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
+#ifdef DEBUG
+ FeedbackVectorSlotKind kind = vector()->GetKind(slot());
+ DCHECK_LT(1, TypeFeedbackMetadata::GetSlotSize(kind));
+#endif
int index = vector()->GetIndex(slot()) + 1;
vector()->set(index, feedback_extra, mode);
}
Isolate* FeedbackNexus::GetIsolate() const { return vector()->GetIsolate(); }
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPE_FEEDBACK_VECTOR_INL_H_
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index 9da3c655fb..9fb03bb673 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -13,21 +13,28 @@
namespace v8 {
namespace internal {
+
+static bool IsPropertyNameFeedback(Object* feedback) {
+ return feedback->IsString() ||
+ (feedback->IsSymbol() && !Symbol::cast(feedback)->is_private());
+}
+
+
std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind) {
- return os << TypeFeedbackVector::Kind2String(kind);
+ return os << TypeFeedbackMetadata::Kind2String(kind);
}
-FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
- FeedbackVectorICSlot slot) const {
+FeedbackVectorSlotKind TypeFeedbackMetadata::GetKind(
+ FeedbackVectorSlot slot) const {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
return VectorICComputer::decode(data, slot.ToInt());
}
-void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot,
- FeedbackVectorSlotKind kind) {
+void TypeFeedbackMetadata::SetKind(FeedbackVectorSlot slot,
+ FeedbackVectorSlotKind kind) {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
int new_data = VectorICComputer::encode(data, slot.ToInt(), kind);
@@ -35,99 +42,143 @@ void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot,
}
-template Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
+template Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(
Isolate* isolate, const StaticFeedbackVectorSpec* spec);
-template Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
+template Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(
Isolate* isolate, const FeedbackVectorSpec* spec);
// static
template <typename Spec>
-Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
- const Spec* spec) {
+Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
+ const Spec* spec) {
const int slot_count = spec->slots();
- const int ic_slot_count = spec->ic_slots();
- const int index_count = VectorICComputer::word_count(ic_slot_count);
- const int length = slot_count + (ic_slot_count * elements_per_ic_slot()) +
- index_count + kReservedIndexCount;
+ const int slot_kinds_length = VectorICComputer::word_count(slot_count);
+ const int length = slot_kinds_length + kReservedIndexCount;
if (length == kReservedIndexCount) {
- return Handle<TypeFeedbackVector>::cast(
+ return Handle<TypeFeedbackMetadata>::cast(
isolate->factory()->empty_fixed_array());
}
+#ifdef DEBUG
+ for (int i = 0; i < slot_count;) {
+ FeedbackVectorSlotKind kind = spec->GetKind(i);
+ int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+ for (int j = 1; j < entry_size; j++) {
+ FeedbackVectorSlotKind kind = spec->GetKind(i + j);
+ DCHECK_EQ(FeedbackVectorSlotKind::INVALID, kind);
+ }
+ i += entry_size;
+ }
+#endif
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length, TENURED);
- if (ic_slot_count > 0) {
- array->set(kFirstICSlotIndex,
- Smi::FromInt(slot_count + index_count + kReservedIndexCount));
- } else {
- array->set(kFirstICSlotIndex, Smi::FromInt(length));
- }
- array->set(kWithTypesIndex, Smi::FromInt(0));
- array->set(kGenericCountIndex, Smi::FromInt(0));
- // Fill the indexes with zeros.
- for (int i = 0; i < index_count; i++) {
+ array->set(kSlotsCountIndex, Smi::FromInt(slot_count));
+ // Fill the bit-vector part with zeros.
+ for (int i = 0; i < slot_kinds_length; i++) {
array->set(kReservedIndexCount + i, Smi::FromInt(0));
}
- // Ensure we can skip the write barrier
- Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
- DCHECK_EQ(isolate->heap()->uninitialized_symbol(), *uninitialized_sentinel);
- for (int i = kReservedIndexCount + index_count; i < length; i++) {
- array->set(i, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ Handle<TypeFeedbackMetadata> metadata =
+ Handle<TypeFeedbackMetadata>::cast(array);
+ for (int i = 0; i < slot_count; i++) {
+ metadata->SetKind(FeedbackVectorSlot(i), spec->GetKind(i));
}
+ return metadata;
+}
- Handle<TypeFeedbackVector> vector = Handle<TypeFeedbackVector>::cast(array);
- for (int i = 0; i < ic_slot_count; i++) {
- vector->SetKind(FeedbackVectorICSlot(i), spec->GetKind(i));
+
+bool TypeFeedbackMetadata::SpecDiffersFrom(
+ const FeedbackVectorSpec* other_spec) const {
+ if (other_spec->slots() != slot_count()) {
+ return true;
}
- return vector;
+
+ int slots = slot_count();
+ for (int i = 0; i < slots; i++) {
+ if (GetKind(FeedbackVectorSlot(i)) != other_spec->GetKind(i)) {
+ return true;
+ }
+ }
+ return false;
}
-template int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec*,
- FeedbackVectorICSlot);
-template int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec*,
- FeedbackVectorSlot);
+const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
+ switch (kind) {
+ case FeedbackVectorSlotKind::INVALID:
+ return "INVALID";
+ case FeedbackVectorSlotKind::CALL_IC:
+ return "CALL_IC";
+ case FeedbackVectorSlotKind::LOAD_IC:
+ return "LOAD_IC";
+ case FeedbackVectorSlotKind::KEYED_LOAD_IC:
+ return "KEYED_LOAD_IC";
+ case FeedbackVectorSlotKind::STORE_IC:
+ return "STORE_IC";
+ case FeedbackVectorSlotKind::KEYED_STORE_IC:
+ return "KEYED_STORE_IC";
+ case FeedbackVectorSlotKind::GENERAL:
+ return "STUB";
+ case FeedbackVectorSlotKind::KINDS_NUMBER:
+ break;
+ }
+ UNREACHABLE();
+ return "?";
+}
// static
-template <typename Spec>
-int TypeFeedbackVector::GetIndexFromSpec(const Spec* spec,
- FeedbackVectorSlot slot) {
- const int ic_slot_count = spec->ic_slots();
- const int index_count = VectorICComputer::word_count(ic_slot_count);
- return kReservedIndexCount + index_count + slot.ToInt();
+Handle<TypeFeedbackVector> TypeFeedbackVector::New(
+ Isolate* isolate, Handle<TypeFeedbackMetadata> metadata) {
+ Factory* factory = isolate->factory();
+
+ const int slot_count = metadata->slot_count();
+ const int length = slot_count + kReservedIndexCount;
+ if (length == kReservedIndexCount) {
+ return Handle<TypeFeedbackVector>::cast(factory->empty_fixed_array());
+ }
+
+ Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
+ array->set(kMetadataIndex, *metadata);
+ array->set(kWithTypesIndex, Smi::FromInt(0));
+ array->set(kGenericCountIndex, Smi::FromInt(0));
+
+ // Ensure we can skip the write barrier
+ Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
+ DCHECK_EQ(*factory->uninitialized_symbol(), *uninitialized_sentinel);
+ for (int i = kReservedIndexCount; i < length; i++) {
+ array->set(i, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ }
+
+ return Handle<TypeFeedbackVector>::cast(array);
}
// static
-template <typename Spec>
-int TypeFeedbackVector::GetIndexFromSpec(const Spec* spec,
- FeedbackVectorICSlot slot) {
- const int slot_count = spec->slots();
- const int ic_slot_count = spec->ic_slots();
- const int index_count = VectorICComputer::word_count(ic_slot_count);
- return kReservedIndexCount + index_count + slot_count +
- slot.ToInt() * elements_per_ic_slot();
+int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec* spec,
+ FeedbackVectorSlot slot) {
+ return kReservedIndexCount + slot.ToInt();
}
// static
int TypeFeedbackVector::PushAppliedArgumentsIndex() {
- const int index_count = VectorICComputer::word_count(1);
- return kReservedIndexCount + index_count;
+ return kReservedIndexCount;
}
// static
Handle<TypeFeedbackVector> TypeFeedbackVector::CreatePushAppliedArgumentsVector(
Isolate* isolate) {
- FeedbackVectorSlotKind kinds[] = {FeedbackVectorSlotKind::KEYED_LOAD_IC};
- StaticFeedbackVectorSpec spec(0, 1, kinds);
+ StaticFeedbackVectorSpec spec;
+ FeedbackVectorSlot slot = spec.AddKeyedLoadICSlot();
+ // TODO(ishell): allocate this metadata only once.
+ Handle<TypeFeedbackMetadata> feedback_metadata =
+ TypeFeedbackMetadata::New(isolate, &spec);
Handle<TypeFeedbackVector> feedback_vector =
- isolate->factory()->NewTypeFeedbackVector(&spec);
- DCHECK(PushAppliedArgumentsIndex() ==
- feedback_vector->GetIndex(FeedbackVectorICSlot(0)));
+ TypeFeedbackVector::New(isolate, feedback_metadata);
+ DCHECK_EQ(PushAppliedArgumentsIndex(), feedback_vector->GetIndex(slot));
+ USE(slot);
return feedback_vector;
}
@@ -142,100 +193,71 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
}
-bool TypeFeedbackVector::SpecDiffersFrom(
- const FeedbackVectorSpec* other_spec) const {
- if (other_spec->slots() != Slots() || other_spec->ic_slots() != ICSlots()) {
- return true;
- }
-
- int ic_slots = ICSlots();
- for (int i = 0; i < ic_slots; i++) {
- if (GetKind(FeedbackVectorICSlot(i)) != other_spec->GetKind(i)) {
- return true;
- }
- }
- return false;
-}
-
-
// This logic is copied from
// StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget.
-static bool ClearLogic(Heap* heap) {
- return FLAG_cleanup_code_caches_at_gc &&
- heap->isolate()->serializer_enabled();
+static bool ClearLogic(Isolate* isolate) {
+ return FLAG_cleanup_code_caches_at_gc && isolate->serializer_enabled();
}
void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
bool force_clear) {
- int slots = Slots();
- Heap* heap = GetIsolate()->heap();
+ Isolate* isolate = GetIsolate();
- if (!force_clear && !ClearLogic(heap)) return;
+ if (!force_clear && !ClearLogic(isolate)) return;
Object* uninitialized_sentinel =
- TypeFeedbackVector::RawUninitializedSentinel(heap);
- for (int i = 0; i < slots; i++) {
- FeedbackVectorSlot slot(i);
- Object* obj = Get(slot);
- if (obj->IsHeapObject()) {
- InstanceType instance_type =
- HeapObject::cast(obj)->map()->instance_type();
- // AllocationSites are exempt from clearing. They don't store Maps
- // or Code pointers which can cause memory leaks if not cleared
- // regularly.
- if (instance_type != ALLOCATION_SITE_TYPE) {
- Set(slot, uninitialized_sentinel, SKIP_WRITE_BARRIER);
- }
- }
- }
-}
-
+ TypeFeedbackVector::RawUninitializedSentinel(isolate);
-void TypeFeedbackVector::ClearICSlotsImpl(SharedFunctionInfo* shared,
- bool force_clear) {
- Heap* heap = GetIsolate()->heap();
+ TypeFeedbackMetadataIterator iter(metadata());
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
- if (!force_clear && !ClearLogic(heap)) return;
-
- int slots = ICSlots();
- Code* host = shared->code();
- Object* uninitialized_sentinel =
- TypeFeedbackVector::RawUninitializedSentinel(heap);
- for (int i = 0; i < slots; i++) {
- FeedbackVectorICSlot slot(i);
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
- FeedbackVectorSlotKind kind = GetKind(slot);
switch (kind) {
case FeedbackVectorSlotKind::CALL_IC: {
CallICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
break;
}
case FeedbackVectorSlotKind::LOAD_IC: {
LoadICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
break;
}
case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
KeyedLoadICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
break;
}
case FeedbackVectorSlotKind::STORE_IC: {
DCHECK(FLAG_vector_stores);
StoreICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
break;
}
case FeedbackVectorSlotKind::KEYED_STORE_IC: {
DCHECK(FLAG_vector_stores);
KeyedStoreICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
+ break;
+ }
+ case FeedbackVectorSlotKind::GENERAL: {
+ if (obj->IsHeapObject()) {
+ InstanceType instance_type =
+ HeapObject::cast(obj)->map()->instance_type();
+ // AllocationSites are exempt from clearing. They don't store Maps
+ // or Code pointers which can cause memory leaks if not cleared
+ // regularly.
+ if (instance_type != ALLOCATION_SITE_TYPE) {
+ Set(slot, uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ }
+ }
break;
}
- case FeedbackVectorSlotKind::UNUSED:
+ case FeedbackVectorSlotKind::INVALID:
case FeedbackVectorSlotKind::KINDS_NUMBER:
UNREACHABLE();
break;
@@ -258,22 +280,22 @@ void TypeFeedbackVector::ClearAllKeyedStoreICs(Isolate* isolate) {
void TypeFeedbackVector::ClearKeyedStoreICs(SharedFunctionInfo* shared) {
- Heap* heap = GetIsolate()->heap();
+ Isolate* isolate = GetIsolate();
- int slots = ICSlots();
Code* host = shared->code();
Object* uninitialized_sentinel =
- TypeFeedbackVector::RawUninitializedSentinel(heap);
- for (int i = 0; i < slots; i++) {
- FeedbackVectorICSlot slot(i);
+ TypeFeedbackVector::RawUninitializedSentinel(isolate);
+
+ TypeFeedbackMetadataIterator iter(metadata());
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
+ if (kind != FeedbackVectorSlotKind::KEYED_STORE_IC) continue;
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
- FeedbackVectorSlotKind kind = GetKind(slot);
- if (kind == FeedbackVectorSlotKind::KEYED_STORE_IC) {
- DCHECK(FLAG_vector_stores);
- KeyedStoreICNexus nexus(this, slot);
- nexus.Clear(host);
- }
+ DCHECK(FLAG_vector_stores);
+ KeyedStoreICNexus nexus(this, slot);
+ nexus.Clear(host);
}
}
}
@@ -281,29 +303,7 @@ void TypeFeedbackVector::ClearKeyedStoreICs(SharedFunctionInfo* shared) {
// static
Handle<TypeFeedbackVector> TypeFeedbackVector::DummyVector(Isolate* isolate) {
- return Handle<TypeFeedbackVector>::cast(isolate->factory()->dummy_vector());
-}
-
-
-const char* TypeFeedbackVector::Kind2String(FeedbackVectorSlotKind kind) {
- switch (kind) {
- case FeedbackVectorSlotKind::UNUSED:
- return "UNUSED";
- case FeedbackVectorSlotKind::CALL_IC:
- return "CALL_IC";
- case FeedbackVectorSlotKind::LOAD_IC:
- return "LOAD_IC";
- case FeedbackVectorSlotKind::KEYED_LOAD_IC:
- return "KEYED_LOAD_IC";
- case FeedbackVectorSlotKind::STORE_IC:
- return "STORE_IC";
- case FeedbackVectorSlotKind::KEYED_STORE_IC:
- return "KEYED_STORE_IC";
- case FeedbackVectorSlotKind::KINDS_NUMBER:
- break;
- }
- UNREACHABLE();
- return "?";
+ return isolate->factory()->dummy_vector();
}
@@ -657,9 +657,10 @@ void KeyedStoreICNexus::ConfigurePolymorphic(MapHandleList* maps,
int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
- if (feedback->IsFixedArray() || feedback->IsString()) {
+ bool is_named_feedback = IsPropertyNameFeedback(feedback);
+ if (feedback->IsFixedArray() || is_named_feedback) {
int found = 0;
- if (feedback->IsString()) {
+ if (is_named_feedback) {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
@@ -694,8 +695,9 @@ int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
Object* feedback = GetFeedback();
- if (feedback->IsFixedArray() || feedback->IsString()) {
- if (feedback->IsString()) {
+ bool is_named_feedback = IsPropertyNameFeedback(feedback);
+ if (feedback->IsFixedArray() || is_named_feedback) {
+ if (is_named_feedback) {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
@@ -732,8 +734,9 @@ MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
bool FeedbackNexus::FindHandlers(CodeHandleList* code_list, int length) const {
Object* feedback = GetFeedback();
int count = 0;
- if (feedback->IsFixedArray() || feedback->IsString()) {
- if (feedback->IsString()) {
+ bool is_named_feedback = IsPropertyNameFeedback(feedback);
+ if (feedback->IsFixedArray() || is_named_feedback) {
+ if (is_named_feedback) {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
@@ -777,7 +780,7 @@ void KeyedLoadICNexus::Clear(Code* host) {
Name* KeyedLoadICNexus::FindFirstName() const {
Object* feedback = GetFeedback();
- if (feedback->IsString()) {
+ if (IsPropertyNameFeedback(feedback)) {
return Name::cast(feedback);
}
return NULL;
@@ -786,7 +789,7 @@ Name* KeyedLoadICNexus::FindFirstName() const {
Name* KeyedStoreICNexus::FindFirstName() const {
Object* feedback = GetFeedback();
- if (feedback->IsString()) {
+ if (IsPropertyNameFeedback(feedback)) {
return Name::cast(feedback);
}
return NULL;
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index 5c28fca55f..9aca68f71c 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -7,10 +7,8 @@
#include <vector>
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/elements-kind.h"
-#include "src/heap/heap.h"
-#include "src/isolate.h"
#include "src/objects.h"
#include "src/zone-containers.h"
@@ -19,13 +17,20 @@ namespace internal {
enum class FeedbackVectorSlotKind {
- UNUSED,
+ // This kind means that the slot points to the middle of other slot
+ // which occupies more than one feedback vector element.
+ // There must be no such slots in the system.
+ INVALID,
+
CALL_IC,
LOAD_IC,
KEYED_LOAD_IC,
STORE_IC,
KEYED_STORE_IC,
+ // This is a general purpose slot that occupies one feedback vector element.
+ GENERAL,
+
KINDS_NUMBER // Last value indicating number of kinds.
};
@@ -33,168 +38,182 @@ enum class FeedbackVectorSlotKind {
std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind);
-class StaticFeedbackVectorSpec {
+template <typename Derived>
+class FeedbackVectorSpecBase {
public:
- StaticFeedbackVectorSpec() : slots_(0), ic_slots_(0), ic_kinds_(NULL) {}
- StaticFeedbackVectorSpec(int slots, int ic_slots,
- FeedbackVectorSlotKind* ic_slot_kinds)
- : slots_(slots), ic_slots_(ic_slots), ic_kinds_(ic_slot_kinds) {}
+ inline FeedbackVectorSlot AddSlot(FeedbackVectorSlotKind kind);
- int slots() const { return slots_; }
+ FeedbackVectorSlot AddCallICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::CALL_IC);
+ }
- int ic_slots() const { return ic_slots_; }
+ FeedbackVectorSlot AddLoadICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
+ }
- FeedbackVectorSlotKind GetKind(int ic_slot) const {
- DCHECK(ic_slots_ > 0 && ic_slot < ic_slots_);
- return ic_kinds_[ic_slot];
+ FeedbackVectorSlot AddKeyedLoadICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
}
- private:
- int slots_;
- int ic_slots_;
- FeedbackVectorSlotKind* ic_kinds_;
+ FeedbackVectorSlot AddStoreICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::STORE_IC);
+ }
+
+ FeedbackVectorSlot AddKeyedStoreICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::KEYED_STORE_IC);
+ }
+
+ FeedbackVectorSlot AddGeneralSlot() {
+ return AddSlot(FeedbackVectorSlotKind::GENERAL);
+ }
};
-class FeedbackVectorSpec {
+class StaticFeedbackVectorSpec
+ : public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
public:
- explicit FeedbackVectorSpec(Zone* zone)
- : slots_(0), ic_slots_(0), ic_slot_kinds_(zone) {}
+ StaticFeedbackVectorSpec() : slots_(0) {}
int slots() const { return slots_; }
- void increase_slots(int count) {
- DCHECK_LT(0, count);
- slots_ += count;
- }
- int ic_slots() const { return ic_slots_; }
- void increase_ic_slots(int count) {
- DCHECK_LT(0, count);
- ic_slots_ += count;
- ic_slot_kinds_.resize(ic_slots_);
+ FeedbackVectorSlotKind GetKind(int slot) const {
+ DCHECK(slot >= 0 && slot < slots_);
+ return kinds_[slot];
}
- FeedbackVectorICSlot AddSlot(FeedbackVectorSlotKind kind) {
- int slot = ic_slots_;
- increase_ic_slots(1);
- ic_slot_kinds_[slot] = static_cast<unsigned char>(kind);
- return FeedbackVectorICSlot(slot);
- }
+ private:
+ friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
- FeedbackVectorICSlot AddSlots(FeedbackVectorSlotKind kind, int count) {
- int slot = ic_slots_;
- increase_ic_slots(count);
- for (int i = 0; i < count; i++) {
- ic_slot_kinds_[slot + i] = static_cast<unsigned char>(kind);
- }
- return FeedbackVectorICSlot(slot);
+ void append(FeedbackVectorSlotKind kind) {
+ DCHECK(slots_ < kMaxLength);
+ kinds_[slots_++] = kind;
}
- FeedbackVectorICSlot AddCallICSlot() {
- return AddSlot(FeedbackVectorSlotKind::CALL_IC);
- }
+ static const int kMaxLength = 12;
- FeedbackVectorICSlot AddLoadICSlot() {
- return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
- }
+ int slots_;
+ FeedbackVectorSlotKind kinds_[kMaxLength];
+};
- FeedbackVectorICSlot AddLoadICSlots(int count) {
- return AddSlots(FeedbackVectorSlotKind::LOAD_IC, count);
- }
- FeedbackVectorICSlot AddKeyedLoadICSlot() {
- return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
+class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
+ public:
+ explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
+ slot_kinds_.reserve(16);
}
- FeedbackVectorICSlot AddStoreICSlot() {
- return AddSlot(FeedbackVectorSlotKind::STORE_IC);
- }
+ int slots() const { return static_cast<int>(slot_kinds_.size()); }
- FeedbackVectorSlot AddStubSlot() {
- int slot = slots_;
- increase_slots(1);
- return FeedbackVectorSlot(slot);
+ FeedbackVectorSlotKind GetKind(int slot) const {
+ return static_cast<FeedbackVectorSlotKind>(slot_kinds_.at(slot));
}
- FeedbackVectorSlot AddStubSlots(int count) {
- int slot = slots_;
- increase_slots(count);
- return FeedbackVectorSlot(slot);
- }
+ private:
+ friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
- FeedbackVectorSlotKind GetKind(int ic_slot) const {
- return static_cast<FeedbackVectorSlotKind>(ic_slot_kinds_.at(ic_slot));
+ void append(FeedbackVectorSlotKind kind) {
+ slot_kinds_.push_back(static_cast<unsigned char>(kind));
}
+ ZoneVector<unsigned char> slot_kinds_;
+};
+
+
+// The shape of the TypeFeedbackMetadata is an array with:
+// 0: slot_count
+// 1..N: slot kinds packed into a bit vector
+//
+class TypeFeedbackMetadata : public FixedArray {
+ public:
+ // Casting.
+ static inline TypeFeedbackMetadata* cast(Object* obj);
+
+ static const int kSlotsCountIndex = 0;
+ static const int kReservedIndexCount = 1;
+
+ // Returns number of feedback vector elements used by given slot kind.
+ static inline int GetSlotSize(FeedbackVectorSlotKind kind);
+
+ bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
+
+ // Returns number of slots in the vector.
+ inline int slot_count() const;
+
+ // Returns slot kind for given slot.
+ FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
+
+ template <typename Spec>
+ static Handle<TypeFeedbackMetadata> New(Isolate* isolate, const Spec* spec);
+
+#ifdef OBJECT_PRINT
+ // For gdb debugging.
+ void Print();
+#endif // OBJECT_PRINT
+
+ DECLARE_PRINTER(TypeFeedbackMetadata)
+
+ static const char* Kind2String(FeedbackVectorSlotKind kind);
+
private:
- int slots_;
- int ic_slots_;
- ZoneVector<unsigned char> ic_slot_kinds_;
+ static const int kFeedbackVectorSlotKindBits = 3;
+ STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
+ (1 << kFeedbackVectorSlotKindBits));
+
+ void SetKind(FeedbackVectorSlot slot, FeedbackVectorSlotKind kind);
+
+ typedef BitSetComputer<FeedbackVectorSlotKind, kFeedbackVectorSlotKindBits,
+ kSmiValueSize, uint32_t> VectorICComputer;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackMetadata);
};
// The shape of the TypeFeedbackVector is an array with:
-// 0: first_ic_slot_index (== length() if no ic slots are present)
+// 0: feedback metadata
// 1: ics_with_types
// 2: ics_with_generic_info
-// 3: type information for ic slots, if any
-// ...
-// N: first feedback slot (N >= 3)
+// 3: feedback slot #0 (N >= 3)
// ...
-// [<first_ic_slot_index>: feedback slot]
-// ...to length() - 1
+// N + slot_count - 1: feedback slot #(slot_count-1)
//
class TypeFeedbackVector : public FixedArray {
public:
// Casting.
static inline TypeFeedbackVector* cast(Object* obj);
- static const int kReservedIndexCount = 3;
- static const int kFirstICSlotIndex = 0;
+ static const int kMetadataIndex = 0;
static const int kWithTypesIndex = 1;
static const int kGenericCountIndex = 2;
+ static const int kReservedIndexCount = 3;
- static int elements_per_ic_slot() { return 2; }
-
- inline int first_ic_slot_index() const;
inline int ic_with_type_info_count();
inline void change_ic_with_type_info_count(int delta);
inline int ic_generic_count();
inline void change_ic_generic_count(int delta);
- inline int ic_metadata_length() const;
- bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
+ inline bool is_empty() const;
- inline int Slots() const;
- inline int ICSlots() const;
+ // Returns number of slots in the vector.
+ inline int slot_count() const;
- // Conversion from a slot or ic slot to an integer index to the underlying
- // array.
- inline int GetIndex(FeedbackVectorSlot slot) const;
- inline int GetIndex(FeedbackVectorICSlot slot) const;
+ inline TypeFeedbackMetadata* metadata() const;
- template <typename Spec>
- static int GetIndexFromSpec(const Spec* spec, FeedbackVectorSlot slot);
- template <typename Spec>
- static int GetIndexFromSpec(const Spec* spec, FeedbackVectorICSlot slot);
+ // Conversion from a slot to an integer index to the underlying array.
+ inline int GetIndex(FeedbackVectorSlot slot) const;
+ static int GetIndexFromSpec(const FeedbackVectorSpec* spec,
+ FeedbackVectorSlot slot);
- // Conversion from an integer index to either a slot or an ic slot. The caller
- // should know what kind she expects.
+ // Conversion from an integer index to the underlying array to a slot.
inline FeedbackVectorSlot ToSlot(int index) const;
- inline FeedbackVectorICSlot ToICSlot(int index) const;
inline Object* Get(FeedbackVectorSlot slot) const;
inline void Set(FeedbackVectorSlot slot, Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline Object* Get(FeedbackVectorICSlot slot) const;
- inline void Set(FeedbackVectorICSlot slot, Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- // IC slots need metadata to recognize the type of IC.
- FeedbackVectorSlotKind GetKind(FeedbackVectorICSlot slot) const;
+ // Returns slot kind for given slot.
+ inline FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
- template <typename Spec>
- static Handle<TypeFeedbackVector> Allocate(Isolate* isolate,
- const Spec* spec);
+ static Handle<TypeFeedbackVector> New(Isolate* isolate,
+ Handle<TypeFeedbackMetadata> metadata);
static Handle<TypeFeedbackVector> Copy(Isolate* isolate,
Handle<TypeFeedbackVector> vector);
@@ -206,19 +225,13 @@ class TypeFeedbackVector : public FixedArray {
DECLARE_PRINTER(TypeFeedbackVector)
- // Clears the vector slots and the vector ic slots.
+ // Clears the vector slots.
void ClearSlots(SharedFunctionInfo* shared) { ClearSlotsImpl(shared, true); }
+
void ClearSlotsAtGCTime(SharedFunctionInfo* shared) {
ClearSlotsImpl(shared, false);
}
- void ClearICSlots(SharedFunctionInfo* shared) {
- ClearICSlotsImpl(shared, true);
- }
- void ClearICSlotsAtGCTime(SharedFunctionInfo* shared) {
- ClearICSlotsImpl(shared, false);
- }
-
static void ClearAllKeyedStoreICs(Isolate* isolate);
void ClearKeyedStoreICs(SharedFunctionInfo* shared);
@@ -233,37 +246,25 @@ class TypeFeedbackVector : public FixedArray {
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Heap* heap);
+ static inline Object* RawUninitializedSentinel(Isolate* isolate);
static const int kDummyLoadICSlot = 0;
- static const int kDummyKeyedLoadICSlot = 1;
- static const int kDummyStoreICSlot = 2;
- static const int kDummyKeyedStoreICSlot = 3;
+ static const int kDummyKeyedLoadICSlot = 2;
+ static const int kDummyStoreICSlot = 4;
+ static const int kDummyKeyedStoreICSlot = 6;
static Handle<TypeFeedbackVector> DummyVector(Isolate* isolate);
- static FeedbackVectorICSlot DummySlot(int dummyIndex) {
+ static FeedbackVectorSlot DummySlot(int dummyIndex) {
DCHECK(dummyIndex >= 0 && dummyIndex <= kDummyKeyedStoreICSlot);
- return FeedbackVectorICSlot(dummyIndex);
+ return FeedbackVectorSlot(dummyIndex);
}
static int PushAppliedArgumentsIndex();
static Handle<TypeFeedbackVector> CreatePushAppliedArgumentsVector(
Isolate* isolate);
- static const char* Kind2String(FeedbackVectorSlotKind kind);
-
private:
- static const int kFeedbackVectorSlotKindBits = 3;
- STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
- (1 << kFeedbackVectorSlotKindBits));
-
- void SetKind(FeedbackVectorICSlot slot, FeedbackVectorSlotKind kind);
-
- typedef BitSetComputer<FeedbackVectorSlotKind, kFeedbackVectorSlotKindBits,
- kSmiValueSize, uint32_t> VectorICComputer;
-
void ClearSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
- void ClearICSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackVector);
};
@@ -284,13 +285,60 @@ STATIC_ASSERT(Name::kEmptyHashField == 0x3);
STATIC_ASSERT(Name::kHashNotComputedMask == kHeapObjectTag);
+class TypeFeedbackMetadataIterator {
+ public:
+ explicit TypeFeedbackMetadataIterator(Handle<TypeFeedbackMetadata> metadata)
+ : metadata_handle_(metadata),
+ slot_(FeedbackVectorSlot(0)),
+ slot_kind_(FeedbackVectorSlotKind::INVALID) {}
+
+ explicit TypeFeedbackMetadataIterator(TypeFeedbackMetadata* metadata)
+ : metadata_(metadata),
+ slot_(FeedbackVectorSlot(0)),
+ slot_kind_(FeedbackVectorSlotKind::INVALID) {}
+
+ bool HasNext() const { return slot_.ToInt() < metadata()->slot_count(); }
+
+ FeedbackVectorSlot Next() {
+ DCHECK(HasNext());
+ FeedbackVectorSlot slot = slot_;
+ slot_kind_ = metadata()->GetKind(slot);
+ slot_ = FeedbackVectorSlot(slot_.ToInt() + entry_size());
+ return slot;
+ }
+
+ // Returns slot kind of the last slot returned by Next().
+ FeedbackVectorSlotKind kind() const {
+ DCHECK_NE(FeedbackVectorSlotKind::INVALID, slot_kind_);
+ DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, slot_kind_);
+ return slot_kind_;
+ }
+
+ // Returns entry size of the last slot returned by Next().
+ int entry_size() const { return TypeFeedbackMetadata::GetSlotSize(kind()); }
+
+ private:
+ TypeFeedbackMetadata* metadata() const {
+ return !metadata_handle_.is_null() ? *metadata_handle_ : metadata_;
+ }
+
+ // The reason for having a handle and a raw pointer to the meta data is
+ // to have a single iterator implementation for both "handlified" and raw
+ // pointer use cases.
+ Handle<TypeFeedbackMetadata> metadata_handle_;
+ TypeFeedbackMetadata* metadata_;
+ FeedbackVectorSlot slot_;
+ FeedbackVectorSlotKind slot_kind_;
+};
+
+
// A FeedbackNexus is the combination of a TypeFeedbackVector and a slot.
// Derived classes customize the update and retrieval of feedback.
class FeedbackNexus {
public:
- FeedbackNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ FeedbackNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: vector_handle_(vector), vector_(NULL), slot_(slot) {}
- FeedbackNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ FeedbackNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: vector_(vector), slot_(slot) {}
virtual ~FeedbackNexus() {}
@@ -301,7 +349,7 @@ class FeedbackNexus {
TypeFeedbackVector* vector() const {
return vector_handle_.is_null() ? vector_ : *vector_handle_;
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
InlineCacheState ic_state() const { return StateFromFeedback(); }
Map* FindFirstMap() const {
@@ -347,7 +395,7 @@ class FeedbackNexus {
// be done, like allocation.
Handle<TypeFeedbackVector> vector_handle_;
TypeFeedbackVector* vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
};
@@ -357,11 +405,11 @@ class CallICNexus : public FeedbackNexus {
// the count appropriately (ie, by 2).
static const int kCallCountIncrement = 2;
- CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
}
- CallICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ CallICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
}
@@ -390,15 +438,15 @@ class CallICNexus : public FeedbackNexus {
class LoadICNexus : public FeedbackNexus {
public:
- LoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ LoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
}
explicit LoadICNexus(Isolate* isolate)
- : FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
- TypeFeedbackVector::DummySlot(
- TypeFeedbackVector::kDummyLoadICSlot)) {}
- LoadICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(
+ TypeFeedbackVector::DummyVector(isolate),
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot)) {}
+ LoadICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
}
@@ -415,11 +463,11 @@ class LoadICNexus : public FeedbackNexus {
class KeyedLoadICNexus : public FeedbackNexus {
public:
- KeyedLoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ KeyedLoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
}
- KeyedLoadICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ KeyedLoadICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
}
@@ -440,15 +488,15 @@ class KeyedLoadICNexus : public FeedbackNexus {
class StoreICNexus : public FeedbackNexus {
public:
- StoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ StoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
}
explicit StoreICNexus(Isolate* isolate)
- : FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
- TypeFeedbackVector::DummySlot(
- TypeFeedbackVector::kDummyStoreICSlot)) {}
- StoreICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(
+ TypeFeedbackVector::DummyVector(isolate),
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot)) {}
+ StoreICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
}
@@ -465,16 +513,15 @@ class StoreICNexus : public FeedbackNexus {
class KeyedStoreICNexus : public FeedbackNexus {
public:
- KeyedStoreICNexus(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot)
+ KeyedStoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
}
explicit KeyedStoreICNexus(Isolate* isolate)
- : FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
- TypeFeedbackVector::DummySlot(
- TypeFeedbackVector::kDummyKeyedStoreICSlot)) {}
- KeyedStoreICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(
+ TypeFeedbackVector::DummyVector(isolate),
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)) {}
+ KeyedStoreICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
}
@@ -497,7 +544,7 @@ class KeyedStoreICNexus : public FeedbackNexus {
InlineCacheState StateFromFeedback() const override;
Name* FindFirstName() const override;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TRANSITIONS_H_
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index ef5432176b..c049af18cb 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -55,27 +55,8 @@ Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorSlot slot) {
Handle<Object>::cast(isolate()->factory()->undefined_value());
Object* obj = feedback_vector_->Get(slot);
- // Slots do not embed direct pointers to functions. Instead a WeakCell is
- // always used.
- DCHECK(!obj->IsJSFunction());
- if (obj->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(obj);
- if (cell->cleared()) return undefined;
- obj = cell->value();
- }
-
- return Handle<Object>(obj, isolate());
-}
-
-
-Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorICSlot slot) {
- DCHECK(slot.ToInt() >= 0 && slot.ToInt() < feedback_vector_->length());
- Handle<Object> undefined =
- Handle<Object>::cast(isolate()->factory()->undefined_value());
- Object* obj = feedback_vector_->Get(slot);
-
- // Vector-based ICs do not embed direct pointers to maps, functions.
- // Instead a WeakCell is always used.
+ // Slots do not embed direct pointers to maps, functions. Instead
+ // a WeakCell is always used.
if (obj->IsWeakCell()) {
WeakCell* cell = WeakCell::cast(obj);
if (cell->cleared()) return undefined;
@@ -105,7 +86,7 @@ InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(TypeFeedbackId id) {
InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (!slot.IsInvalid()) {
FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
if (kind == FeedbackVectorSlotKind::LOAD_IC) {
@@ -131,7 +112,7 @@ bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorICSlot slot) {
+bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorSlot slot) {
if (!slot.IsInvalid()) {
FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
if (kind == FeedbackVectorSlotKind::STORE_IC) {
@@ -146,7 +127,7 @@ bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorICSlot slot) {
}
-bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorICSlot slot) {
+bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorSlot slot) {
Handle<Object> value = GetInfo(slot);
return value->IsUndefined() ||
value.is_identical_to(
@@ -154,7 +135,7 @@ bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorICSlot slot) {
}
-bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorICSlot slot) {
+bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorSlot slot) {
Handle<Object> value = GetInfo(slot);
return value->IsAllocationSite() || value->IsJSFunction();
}
@@ -194,7 +175,7 @@ void TypeFeedbackOracle::GetStoreModeAndKeyType(
void TypeFeedbackOracle::GetStoreModeAndKeyType(
- FeedbackVectorICSlot slot, KeyedAccessStoreMode* store_mode,
+ FeedbackVectorSlot slot, KeyedAccessStoreMode* store_mode,
IcCheckType* key_type) {
if (!slot.IsInvalid() &&
feedback_vector_->GetKind(slot) ==
@@ -209,8 +190,7 @@ void TypeFeedbackOracle::GetStoreModeAndKeyType(
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(
- FeedbackVectorICSlot slot) {
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(FeedbackVectorSlot slot) {
Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
return Handle<JSFunction>(isolate()->native_context()->array_function());
@@ -233,7 +213,7 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(
Handle<AllocationSite> TypeFeedbackOracle::GetCallAllocationSite(
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
return Handle<AllocationSite>::cast(info);
@@ -343,7 +323,7 @@ bool TypeFeedbackOracle::HasOnlyStringMaps(SmallMapList* receiver_types) {
}
-void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
@@ -356,7 +336,7 @@ void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorICSlot slot,
void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
- FeedbackVectorICSlot slot, SmallMapList* receiver_types, bool* is_string,
+ FeedbackVectorSlot slot, SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type) {
receiver_types->Clear();
if (slot.IsInvalid()) {
@@ -380,7 +360,7 @@ void TypeFeedbackOracle::AssignmentReceiverTypes(TypeFeedbackId id,
}
-void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
@@ -399,7 +379,7 @@ void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
- FeedbackVectorICSlot slot, SmallMapList* receiver_types,
+ FeedbackVectorSlot slot, SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
receiver_types->Clear();
CollectReceiverTypes(slot, receiver_types);
@@ -414,14 +394,14 @@ void TypeFeedbackOracle::CountReceiverTypes(TypeFeedbackId id,
}
-void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types) {
receiver_types->Clear();
if (!slot.IsInvalid()) CollectReceiverTypes(slot, receiver_types);
}
-void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
Handle<Name> name,
Code::Flags flags,
SmallMapList* types) {
@@ -467,7 +447,7 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
}
-void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* types) {
FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
if (kind == FeedbackVectorSlotKind::STORE_IC) {
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 96cc39f007..23cf40ff57 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -26,11 +26,11 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<Context> native_context);
InlineCacheState LoadInlineCacheState(TypeFeedbackId id);
- InlineCacheState LoadInlineCacheState(FeedbackVectorICSlot slot);
+ InlineCacheState LoadInlineCacheState(FeedbackVectorSlot slot);
bool StoreIsUninitialized(TypeFeedbackId id);
- bool StoreIsUninitialized(FeedbackVectorICSlot slot);
- bool CallIsUninitialized(FeedbackVectorICSlot slot);
- bool CallIsMonomorphic(FeedbackVectorICSlot slot);
+ bool StoreIsUninitialized(FeedbackVectorSlot slot);
+ bool CallIsUninitialized(FeedbackVectorSlot slot);
+ bool CallIsMonomorphic(FeedbackVectorSlot slot);
bool KeyedArrayCallIsHoley(TypeFeedbackId id);
bool CallNewIsMonomorphic(FeedbackVectorSlot slot);
@@ -43,33 +43,33 @@ class TypeFeedbackOracle: public ZoneObject {
void GetStoreModeAndKeyType(TypeFeedbackId id,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
- void GetStoreModeAndKeyType(FeedbackVectorICSlot slot,
+ void GetStoreModeAndKeyType(FeedbackVectorSlot slot,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
- void PropertyReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
+ void PropertyReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
- void KeyedPropertyReceiverTypes(FeedbackVectorICSlot slot,
+ void KeyedPropertyReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type);
void AssignmentReceiverTypes(TypeFeedbackId id, Handle<Name> name,
SmallMapList* receiver_types);
- void AssignmentReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
+ void AssignmentReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
void KeyedAssignmentReceiverTypes(TypeFeedbackId id,
SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
- void KeyedAssignmentReceiverTypes(FeedbackVectorICSlot slot,
+ void KeyedAssignmentReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
void CountReceiverTypes(TypeFeedbackId id,
SmallMapList* receiver_types);
- void CountReceiverTypes(FeedbackVectorICSlot slot,
+ void CountReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types);
- void CollectReceiverTypes(FeedbackVectorICSlot slot, SmallMapList* types);
+ void CollectReceiverTypes(FeedbackVectorSlot slot, SmallMapList* types);
void CollectReceiverTypes(TypeFeedbackId id,
SmallMapList* types);
template <class T>
@@ -82,8 +82,8 @@ class TypeFeedbackOracle: public ZoneObject {
native_context;
}
- Handle<JSFunction> GetCallTarget(FeedbackVectorICSlot slot);
- Handle<AllocationSite> GetCallAllocationSite(FeedbackVectorICSlot slot);
+ Handle<JSFunction> GetCallTarget(FeedbackVectorSlot slot);
+ Handle<AllocationSite> GetCallAllocationSite(FeedbackVectorSlot slot);
Handle<JSFunction> GetCallNewTarget(FeedbackVectorSlot slot);
Handle<AllocationSite> GetCallNewAllocationSite(FeedbackVectorSlot slot);
@@ -114,7 +114,7 @@ class TypeFeedbackOracle: public ZoneObject {
Isolate* isolate() const { return isolate_; }
private:
- void CollectReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
+ void CollectReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
Code::Flags flags, SmallMapList* types);
void CollectReceiverTypes(TypeFeedbackId id, Handle<Name> name,
Code::Flags flags, SmallMapList* types);
@@ -143,7 +143,6 @@ class TypeFeedbackOracle: public ZoneObject {
// Returns an element from the type feedback vector. Returns undefined
// if there is no information.
Handle<Object> GetInfo(FeedbackVectorSlot slot);
- Handle<Object> GetInfo(FeedbackVectorICSlot slot);
private:
Handle<Context> native_context_;
@@ -155,6 +154,7 @@ class TypeFeedbackOracle: public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPE_INFO_H_
diff --git a/deps/v8/src/types-inl.h b/deps/v8/src/types-inl.h
index 699d642d59..9af4bccd2e 100644
--- a/deps/v8/src/types-inl.h
+++ b/deps/v8/src/types-inl.h
@@ -481,6 +481,7 @@ void HeapTypeConfig::range_set_double(i::Handle<HeapTypeConfig::Range> range,
i::Handle<Object> number = isolate->factory()->NewNumber(value);
range->set(index + 2, *number);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPES_INL_H_
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index af1700254b..b1002be26a 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -173,7 +173,7 @@ TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
if (type->IsRange()) return type->AsRange()->Lub();
if (type->IsContext()) return kInternal & kTaggedPointer;
if (type->IsArray()) return kOtherObject;
- if (type->IsFunction()) return kOtherObject; // TODO(rossberg): kFunction
+ if (type->IsFunction()) return kFunction;
UNREACHABLE();
return kNone;
}
@@ -231,7 +231,6 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_ARRAY_BUFFER_TYPE:
@@ -248,7 +247,8 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
if (map->is_undetectable()) return kUndetectable;
return kOtherObject;
case JS_FUNCTION_TYPE:
- return kOtherObject; // TODO(rossberg): there should be a Function type.
+ if (map->is_undetectable()) return kUndetectable;
+ return kFunction;
case JS_REGEXP_TYPE:
return kOtherObject; // TODO(rossberg): there should be a RegExp type.
case JS_PROXY_TYPE:
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 3acd5cc842..9ce650d943 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -159,38 +159,29 @@ namespace internal {
// clang-format off
#define MASK_BITSET_TYPE_LIST(V) \
- V(Representation, 0xfff00000u) \
- V(Semantic, 0x000ffffeu)
+ V(Representation, 0xff800000u) \
+ V(Semantic, 0x007ffffeu)
#define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
#define SEMANTIC(k) ((k) & BitsetType::kSemantic)
#define REPRESENTATION_BITSET_TYPE_LIST(V) \
V(None, 0) \
- V(UntaggedBit, 1u << 20 | kSemantic) \
- V(UntaggedSigned8, 1u << 21 | kSemantic) \
- V(UntaggedSigned16, 1u << 22 | kSemantic) \
- V(UntaggedSigned32, 1u << 23 | kSemantic) \
- V(UntaggedUnsigned8, 1u << 24 | kSemantic) \
- V(UntaggedUnsigned16, 1u << 25 | kSemantic) \
- V(UntaggedUnsigned32, 1u << 26 | kSemantic) \
+ V(UntaggedBit, 1u << 23 | kSemantic) \
+ V(UntaggedIntegral8, 1u << 24 | kSemantic) \
+ V(UntaggedIntegral16, 1u << 25 | kSemantic) \
+ V(UntaggedIntegral32, 1u << 26 | kSemantic) \
V(UntaggedFloat32, 1u << 27 | kSemantic) \
V(UntaggedFloat64, 1u << 28 | kSemantic) \
V(UntaggedPointer, 1u << 29 | kSemantic) \
V(TaggedSigned, 1u << 30 | kSemantic) \
V(TaggedPointer, 1u << 31 | kSemantic) \
\
- V(UntaggedSigned, kUntaggedSigned8 | kUntaggedSigned16 | \
- kUntaggedSigned32) \
- V(UntaggedUnsigned, kUntaggedUnsigned8 | kUntaggedUnsigned16 | \
- kUntaggedUnsigned32) \
- V(UntaggedIntegral8, kUntaggedSigned8 | kUntaggedUnsigned8) \
- V(UntaggedIntegral16, kUntaggedSigned16 | kUntaggedUnsigned16) \
- V(UntaggedIntegral32, kUntaggedSigned32 | kUntaggedUnsigned32) \
- V(UntaggedIntegral, kUntaggedBit | kUntaggedSigned | kUntaggedUnsigned) \
- V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
- V(UntaggedNumber, kUntaggedIntegral | kUntaggedFloat) \
- V(Untagged, kUntaggedNumber | kUntaggedPointer) \
+ V(UntaggedIntegral, kUntaggedBit | kUntaggedIntegral8 | \
+ kUntaggedIntegral16 | kUntaggedIntegral32) \
+ V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
+ V(UntaggedNumber, kUntaggedIntegral | kUntaggedFloat) \
+ V(Untagged, kUntaggedNumber | kUntaggedPointer) \
V(Tagged, kTaggedSigned | kTaggedPointer)
#define INTERNAL_BITSET_TYPE_LIST(V) \
@@ -214,37 +205,39 @@ namespace internal {
V(Undetectable, 1u << 16 | REPRESENTATION(kTaggedPointer)) \
V(OtherObject, 1u << 17 | REPRESENTATION(kTaggedPointer)) \
V(Proxy, 1u << 18 | REPRESENTATION(kTaggedPointer)) \
- V(Internal, 1u << 19 | REPRESENTATION(kTagged | kUntagged)) \
+ V(Function, 1u << 19 | REPRESENTATION(kTaggedPointer)) \
+ V(Internal, 1u << 20 | REPRESENTATION(kTagged | kUntagged)) \
\
- V(Signed31, kUnsigned30 | kNegative31) \
- V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
- V(Negative32, kNegative31 | kOtherSigned32) \
- V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
- V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | kOtherUnsigned32) \
- V(Integral32, kSigned32 | kUnsigned32) \
- V(PlainNumber, kIntegral32 | kOtherNumber) \
- V(OrderedNumber, kPlainNumber | kMinusZero) \
- V(MinusZeroOrNaN, kMinusZero | kNaN) \
- V(Number, kOrderedNumber | kNaN) \
- V(String, kInternalizedString | kOtherString) \
- V(UniqueName, kSymbol | kInternalizedString) \
- V(Name, kSymbol | kString) \
- V(BooleanOrNumber, kBoolean | kNumber) \
- V(NullOrUndefined, kNull | kUndefined) \
- V(NumberOrString, kNumber | kString) \
- V(NumberOrUndefined, kNumber | kUndefined) \
- V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
- V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
- V(DetectableReceiver, kOtherObject | kProxy) \
- V(Detectable, kDetectableReceiver | kNumber | kName) \
- V(Object, kOtherObject | kUndetectable) \
- V(Receiver, kObject | kProxy) \
- V(ReceiverOrUndefined, kReceiver | kUndefined) \
- V(StringOrReceiver, kString | kReceiver) \
- V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
- kReceiver) \
- V(NonNumber, kUnique | kString | kInternal) \
- V(Any, 0xfffffffeu)
+ V(Signed31, kUnsigned30 | kNegative31) \
+ V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
+ V(Negative32, kNegative31 | kOtherSigned32) \
+ V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
+ V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | \
+ kOtherUnsigned32) \
+ V(Integral32, kSigned32 | kUnsigned32) \
+ V(PlainNumber, kIntegral32 | kOtherNumber) \
+ V(OrderedNumber, kPlainNumber | kMinusZero) \
+ V(MinusZeroOrNaN, kMinusZero | kNaN) \
+ V(Number, kOrderedNumber | kNaN) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(BooleanOrNumber, kBoolean | kNumber) \
+ V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
+ V(NullOrUndefined, kNull | kUndefined) \
+ V(NumberOrString, kNumber | kString) \
+ V(NumberOrUndefined, kNumber | kUndefined) \
+ V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
+ V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
+ V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
+ V(Detectable, kDetectableReceiver | kNumber | kName) \
+ V(Object, kFunction | kOtherObject | kUndetectable) \
+ V(Receiver, kObject | kProxy) \
+ V(StringOrReceiver, kString | kReceiver) \
+ V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
+ kReceiver) \
+ V(NonNumber, kUnique | kString | kInternal) \
+ V(Any, 0xfffffffeu)
// clang-format on
@@ -1186,6 +1179,7 @@ struct BoundsImpl {
typedef BoundsImpl<ZoneTypeConfig> Bounds;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPES_H_
diff --git a/deps/v8/src/typing-asm.cc b/deps/v8/src/typing-asm.cc
index f7688964a5..b267113400 100644
--- a/deps/v8/src/typing-asm.cc
+++ b/deps/v8/src/typing-asm.cc
@@ -9,16 +9,10 @@
#include "src/ast.h"
#include "src/codegen.h"
#include "src/scopes.h"
-#include "src/zone-type-cache.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
-namespace {
-
-base::LazyInstance<ZoneTypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
#define FAIL(node, msg) \
do { \
@@ -43,7 +37,8 @@ base::LazyInstance<ZoneTypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
FunctionLiteral* root)
- : script_(script),
+ : zone_(zone),
+ script_(script),
root_(root),
valid_(true),
stdlib_types_(zone),
@@ -57,8 +52,8 @@ AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
ZoneAllocationPolicy(zone)),
in_function_(false),
building_function_tables_(false),
- cache_(kCache.Get()) {
- InitializeAstVisitor(isolate, zone);
+ cache_(TypeCache::Get()) {
+ InitializeAstVisitor(isolate);
InitializeStdlib();
}
@@ -435,6 +430,11 @@ void AsmTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
}
+void AsmTyper::VisitDoExpression(DoExpression* expr) {
+ FAIL(expr, "do-expression encountered");
+}
+
+
void AsmTyper::VisitConditional(Conditional* expr) {
RECURSE(VisitWithExpectation(expr->condition(), cache_.kInt32,
"condition expected to be integer"));
@@ -446,8 +446,9 @@ void AsmTyper::VisitConditional(Conditional* expr) {
expr->else_expression(), expected_type_,
"conditional else branch type mismatch with enclosing expression"));
Type* else_type = computed_type_;
- Type* type = Type::Intersect(then_type, else_type, zone());
- if (!(type->Is(cache_.kInt32) || type->Is(cache_.kFloat64))) {
+ Type* type = Type::Union(then_type, else_type, zone());
+ if (!(type->Is(cache_.kInt32) || type->Is(cache_.kUint32) ||
+ type->Is(cache_.kFloat32) || type->Is(cache_.kFloat64))) {
FAIL(expr, "ill-typed conditional");
}
IntersectResult(expr, type);
@@ -609,24 +610,30 @@ void AsmTyper::VisitHeapAccess(Property* expr) {
}
bin->set_bounds(Bounds(cache_.kInt32));
} else {
- BinaryOperation* bin = expr->key()->AsBinaryOperation();
- if (bin == NULL || bin->op() != Token::SAR) {
- FAIL(expr->key(), "expected >> in heap access");
- }
- RECURSE(VisitWithExpectation(bin->left(), cache_.kInt32,
- "array index expected to be integer"));
- Literal* right = bin->right()->AsLiteral();
- if (right == NULL || right->raw_value()->ContainsDot()) {
- FAIL(right, "heap access shift must be integer");
- }
- RECURSE(VisitWithExpectation(bin->right(), cache_.kInt32,
- "array shift expected to be integer"));
- int n = static_cast<int>(right->raw_value()->AsNumber());
- int expected_shift = ElementShiftSize(type);
- if (expected_shift < 0 || n != expected_shift) {
- FAIL(right, "heap access shift must match element size");
+ Literal* literal = expr->key()->AsLiteral();
+ if (literal) {
+ RECURSE(VisitWithExpectation(literal, cache_.kInt32,
+ "array index expected to be integer"));
+ } else {
+ BinaryOperation* bin = expr->key()->AsBinaryOperation();
+ if (bin == NULL || bin->op() != Token::SAR) {
+ FAIL(expr->key(), "expected >> in heap access");
+ }
+ RECURSE(VisitWithExpectation(bin->left(), cache_.kInt32,
+ "array index expected to be integer"));
+ Literal* right = bin->right()->AsLiteral();
+ if (right == NULL || right->raw_value()->ContainsDot()) {
+ FAIL(right, "heap access shift must be integer");
+ }
+ RECURSE(VisitWithExpectation(bin->right(), cache_.kInt32,
+ "array shift expected to be integer"));
+ int n = static_cast<int>(right->raw_value()->AsNumber());
+ int expected_shift = ElementShiftSize(type);
+ if (expected_shift < 0 || n != expected_shift) {
+ FAIL(right, "heap access shift must match element size");
+ }
+ bin->set_bounds(Bounds(cache_.kInt32));
}
- bin->set_bounds(Bounds(cache_.kInt32));
}
IntersectResult(expr, type);
}
@@ -794,6 +801,34 @@ void AsmTyper::VisitCountOperation(CountOperation* expr) {
}
+void AsmTyper::VisitIntegerBitwiseOperator(BinaryOperation* expr,
+ Type* left_expected,
+ Type* right_expected,
+ Type* result_type, bool conversion) {
+ RECURSE(VisitWithExpectation(expr->left(), left_expected,
+ "left bit operand expected to be integer"));
+ int left_intish = intish_;
+ Type* left_type = computed_type_;
+ RECURSE(VisitWithExpectation(expr->right(), right_expected,
+ "right bit operand expected to be integer"));
+ int right_intish = intish_;
+ Type* right_type = computed_type_;
+ if (left_intish > kMaxUncombinedAdditiveSteps) {
+ FAIL(expr, "too many consecutive additive ops");
+ }
+ if (right_intish > kMaxUncombinedAdditiveSteps) {
+ FAIL(expr, "too many consecutive additive ops");
+ }
+ intish_ = 0;
+ if (!conversion) {
+ if (!left_type->Is(right_type) || !right_type->Is(left_type)) {
+ FAIL(expr, "ill typed bitwise operation");
+ }
+ }
+ IntersectResult(expr, result_type);
+}
+
+
void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
case Token::COMMA: {
@@ -807,34 +842,28 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::OR:
case Token::AND:
FAIL(expr, "logical operator encountered");
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
+ case Token::BIT_OR: {
// BIT_OR allows Any since it is used as a type coercion.
- // BIT_XOR allows Number since it is used as a type coercion (encoding ~).
- Type* expectation =
- expr->op() == Token::BIT_OR
- ? Type::Any()
- : expr->op() == Token::BIT_XOR ? Type::Number() : cache_.kInt32;
- Type* result =
- expr->op() == Token::SHR ? Type::Unsigned32() : cache_.kInt32;
- RECURSE(VisitWithExpectation(expr->left(), expectation,
- "left bit operand expected to be integer"));
- int left_intish = intish_;
- RECURSE(VisitWithExpectation(expr->right(), expectation,
- "right bit operand expected to be integer"));
- int right_intish = intish_;
- if (left_intish > kMaxUncombinedAdditiveSteps) {
- FAIL(expr, "too many consecutive additive ops");
- }
- if (right_intish > kMaxUncombinedAdditiveSteps) {
- FAIL(expr, "too many consecutive additive ops");
- }
- intish_ = 0;
- IntersectResult(expr, result);
+ VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kIntegral32,
+ cache_.kInt32, true);
+ return;
+ }
+ case Token::BIT_XOR: {
+ // BIT_XOR allows Number since it is used as a type coercion (via ~~).
+ VisitIntegerBitwiseOperator(expr, Type::Number(), cache_.kIntegral32,
+ cache_.kInt32, true);
+ return;
+ }
+ case Token::SHR: {
+ VisitIntegerBitwiseOperator(expr, cache_.kIntegral32, cache_.kIntegral32,
+ cache_.kUint32, false);
+ return;
+ }
+ case Token::SHL:
+ case Token::SAR:
+ case Token::BIT_AND: {
+ VisitIntegerBitwiseOperator(expr, cache_.kIntegral32, cache_.kIntegral32,
+ cache_.kInt32, false);
return;
}
case Token::ADD:
@@ -853,7 +882,7 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
Type* right_type = computed_type_;
int right_intish = intish_;
Type* type = Type::Union(left_type, right_type, zone());
- if (type->Is(cache_.kInt32)) {
+ if (type->Is(cache_.kInt32) || type->Is(cache_.kUint32)) {
if (expr->op() == Token::MUL) {
if (!expr->left()->IsLiteral() && !expr->right()->IsLiteral()) {
FAIL(expr, "direct integer multiply forbidden");
@@ -875,7 +904,16 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
IntersectResult(expr, cache_.kInt32);
return;
}
- } else if (type->Is(Type::Number())) {
+ } else if (expr->op() == Token::MUL &&
+ left_type->Is(cache_.kIntegral32) &&
+ right_type->Is(cache_.kFloat64)) {
+ // For unary +, expressed as x * 1.0
+ IntersectResult(expr, cache_.kFloat64);
+ return;
+ } else if (type->Is(cache_.kFloat32) && expr->op() != Token::MOD) {
+ IntersectResult(expr, cache_.kFloat32);
+ return;
+ } else if (type->Is(cache_.kFloat64)) {
IntersectResult(expr, cache_.kFloat64);
return;
} else {
@@ -899,7 +937,8 @@ void AsmTyper::VisitCompareOperation(CompareOperation* expr) {
Type* right_type = computed_type_;
Type* type = Type::Union(left_type, right_type, zone());
expr->set_combined_type(type);
- if (type->Is(Type::Integral32()) || type->Is(Type::UntaggedFloat64())) {
+ if (type->Is(cache_.kInt32) || type->Is(cache_.kUint32) ||
+ type->Is(cache_.kFloat32) || type->Is(cache_.kFloat64)) {
IntersectResult(expr, cache_.kInt32);
} else {
FAIL(expr, "ill-typed comparison operation");
@@ -1072,5 +1111,5 @@ void AsmTyper::VisitWithExpectation(Expression* expr, Type* expected_type,
}
expected_type_ = save;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/typing-asm.h b/deps/v8/src/typing-asm.h
index 74c28fb3cf..a80fec5fba 100644
--- a/deps/v8/src/typing-asm.h
+++ b/deps/v8/src/typing-asm.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-class ZoneTypeCache;
+class TypeCache;
class AsmTyper : public AstVisitor {
public:
@@ -27,6 +27,7 @@ class AsmTyper : public AstVisitor {
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
+ Zone* zone_;
Script* script_;
FunctionLiteral* root_;
bool valid_;
@@ -51,7 +52,7 @@ class AsmTyper : public AstVisitor {
bool in_function_; // In module function?
bool building_function_tables_;
- ZoneTypeCache const& cache_;
+ TypeCache const& cache_;
static const int kErrorMessageLimit = 100;
char error_message_[kErrorMessageLimit];
@@ -83,13 +84,19 @@ class AsmTyper : public AstVisitor {
void VisitWithExpectation(Expression* expr, Type* expected_type,
const char* msg);
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+ void VisitIntegerBitwiseOperator(BinaryOperation* expr, Type* left_expected,
+ Type* right_expected, Type* result_type,
+ bool conversion);
+
+ Zone* zone() const { return zone_; }
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
DISALLOW_COPY_AND_ASSIGN(AsmTyper);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPING_ASM_H_
diff --git a/deps/v8/src/typing-reset.cc b/deps/v8/src/typing-reset.cc
index af7641b485..b67b23507c 100644
--- a/deps/v8/src/typing-reset.cc
+++ b/deps/v8/src/typing-reset.cc
@@ -14,13 +14,12 @@ namespace v8 {
namespace internal {
-TypingReseter::TypingReseter(Isolate* isolate, Zone* zone,
- FunctionLiteral* root)
- : AstExpressionVisitor(isolate, zone, root) {}
+TypingReseter::TypingReseter(Isolate* isolate, FunctionLiteral* root)
+ : AstExpressionVisitor(isolate, root) {}
void TypingReseter::VisitExpression(Expression* expression) {
expression->set_bounds(Bounds::Unbounded());
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/typing-reset.h b/deps/v8/src/typing-reset.h
index b809eb2161..84e51773e7 100644
--- a/deps/v8/src/typing-reset.h
+++ b/deps/v8/src/typing-reset.h
@@ -15,12 +15,12 @@ namespace internal {
class TypingReseter : public AstExpressionVisitor {
public:
- TypingReseter(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+ TypingReseter(Isolate* isolate, FunctionLiteral* root);
protected:
void VisitExpression(Expression* expression) override;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPING_RESET_H_
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index bbfdc74ad2..4077ae7217 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -77,6 +77,11 @@ char* SimpleStringBuilder::Finalize() {
}
+std::ostream& operator<<(std::ostream& os, FeedbackVectorSlot slot) {
+ return os << "#" << slot.id_;
+}
+
+
size_t hash_value(BailoutId id) {
base::hash<int> h;
return h(id.id_);
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index ef35f96964..1fe6a3213c 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -1043,23 +1043,23 @@ class TypeFeedbackId {
};
-template <int dummy_parameter>
-class VectorSlot {
+class FeedbackVectorSlot {
public:
- explicit VectorSlot(int id) : id_(id) {}
+ FeedbackVectorSlot() : id_(kInvalidSlot) {}
+ explicit FeedbackVectorSlot(int id) : id_(id) {}
int ToInt() const { return id_; }
- static VectorSlot Invalid() { return VectorSlot(kInvalidSlot); }
+ static FeedbackVectorSlot Invalid() { return FeedbackVectorSlot(); }
bool IsInvalid() const { return id_ == kInvalidSlot; }
- VectorSlot next() const {
- DCHECK_NE(kInvalidSlot, id_);
- return VectorSlot(id_ + 1);
+ bool operator==(FeedbackVectorSlot that) const {
+ return this->id_ == that.id_;
}
+ bool operator!=(FeedbackVectorSlot that) const { return !(*this == that); }
- bool operator==(VectorSlot that) const { return this->id_ == that.id_; }
- bool operator!=(VectorSlot that) const { return !(*this == that); }
+ friend size_t hash_value(FeedbackVectorSlot slot) { return slot.ToInt(); }
+ friend std::ostream& operator<<(std::ostream& os, FeedbackVectorSlot);
private:
static const int kInvalidSlot = -1;
@@ -1068,23 +1068,14 @@ class VectorSlot {
};
-template <int dummy_parameter>
-size_t hash_value(VectorSlot<dummy_parameter> slot) {
- return slot.ToInt();
-}
-
-
-typedef VectorSlot<0> FeedbackVectorSlot;
-typedef VectorSlot<1> FeedbackVectorICSlot;
-
-
class BailoutId {
public:
explicit BailoutId(int id) : id_(id) { }
int ToInt() const { return id_; }
static BailoutId None() { return BailoutId(kNoneId); }
- static BailoutId Prologue() { return BailoutId(kPrologueId); }
+ static BailoutId ScriptContext() { return BailoutId(kScriptContextId); }
+ static BailoutId FunctionContext() { return BailoutId(kFunctionContextId); }
static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
@@ -1100,19 +1091,20 @@ class BailoutId {
static const int kNoneId = -1;
// Using 0 could disguise errors.
- static const int kPrologueId = 1;
- static const int kFunctionEntryId = 2;
+ static const int kScriptContextId = 1;
+ static const int kFunctionContextId = 2;
+ static const int kFunctionEntryId = 3;
// This AST id identifies the point after the declarations have been visited.
// We need it to capture the environment effects of declarations that emit
// code (function declarations).
- static const int kDeclarationsId = 3;
+ static const int kDeclarationsId = 4;
// Every FunctionState starts with this id.
- static const int kFirstUsableId = 4;
+ static const int kFirstUsableId = 5;
// Every compiled stub starts with this id.
- static const int kStubEntryId = 5;
+ static const int kStubEntryId = 6;
int id_;
};
@@ -1742,6 +1734,42 @@ static inline void WriteDoubleValue(void* p, double value) {
#endif // V8_TARGET_ARCH_MIPS
}
+
+static inline uint16_t ReadUnalignedUInt16(const void* p) {
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+ return *reinterpret_cast<const uint16_t*>(p);
+#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ // Prevent compiler from using load-half (mips lh) on (possibly)
+ // non-16-bit aligned address.
+ union conversion {
+ uint16_t h;
+ uint8_t b[2];
+ } c;
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(p);
+ c.b[0] = *ptr;
+ c.b[1] = *(ptr + 1);
+ return c.h;
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+}
+
+
+static inline void WriteUnalignedUInt16(void* p, uint16_t value) {
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+ *(reinterpret_cast<uint16_t*>(p)) = value;
+#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ // Prevent compiler from using store-half (mips sh) on (possibly)
+ // non-16-bit aligned address.
+ union conversion {
+ uint16_t h;
+ uint8_t b[2];
+ } c;
+ c.h = value;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(p);
+ *ptr = c.b[0];
+ *(ptr + 1) = c.b[1];
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 760a9b564d..9f8e60c294 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -8,13 +8,12 @@
#include "src/base/once.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/frames.h"
-#include "src/hydrogen.h"
#include "src/isolate.h"
-#include "src/lithium-allocator.h"
#include "src/objects.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/sampler.h"
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index f5b3b84735..6016ef1419 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -41,6 +41,7 @@ class V8 : public AllStatic {
static v8::Platform* platform_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_V8_H_
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index 615ec4fe87..b1ae939f84 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -66,6 +66,7 @@ class Memory {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MEMORY_H_
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index 3e189d5cb4..db0ed070fa 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -118,6 +118,7 @@ class ThreadManager {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_V8THREADS_H_
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index f4f7a7a917..b8bcbd03d1 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -44,6 +44,7 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
strong_mode_reference_start_position_(RelocInfo::kNoPosition),
strong_mode_reference_end_position_(RelocInfo::kNoPosition),
local_if_not_shadowed_(NULL),
+ is_from_eval_(false),
force_context_allocation_(false),
is_used_(false),
initialization_flag_(initialization_flag),
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index dcd2e6af6e..a9cd5dcfec 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -124,6 +124,8 @@ class Variable: public ZoneObject {
index_ = index;
}
+ void SetFromEval() { is_from_eval_ = true; }
+
static int CompareIndex(Variable* const* v, Variable* const* w);
void RecordStrongModeReference(int start_position, int end_position) {
@@ -144,6 +146,16 @@ class Variable: public ZoneObject {
int strong_mode_reference_end_position() const {
return strong_mode_reference_end_position_;
}
+ PropertyAttributes DeclarationPropertyAttributes() const {
+ int property_attributes = NONE;
+ if (IsImmutableVariableMode(mode_)) {
+ property_attributes |= READ_ONLY;
+ }
+ if (is_from_eval_) {
+ property_attributes |= EVAL_DECLARED;
+ }
+ return static_cast<PropertyAttributes>(property_attributes);
+ }
private:
Scope* scope_;
@@ -165,6 +177,9 @@ class Variable: public ZoneObject {
// binding scope (exclusive).
Variable* local_if_not_shadowed_;
+ // True if this variable is introduced by a sloppy eval
+ bool is_from_eval_;
+
// Usage info.
bool force_context_allocation_; // set by variable resolver
bool is_used_;
@@ -193,6 +208,7 @@ class ClassVariable : public Variable {
// checks for functions too.
int declaration_group_start_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_VARIABLES_H_
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index 4f3128b918..e4637c91c9 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -202,6 +202,7 @@ inline Vector<char> MutableCStrVector(char* data, int max) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_VECTOR_H_
diff --git a/deps/v8/src/version.h b/deps/v8/src/version.h
index 2596beeb8a..3395d7f4fe 100644
--- a/deps/v8/src/version.h
+++ b/deps/v8/src/version.h
@@ -47,6 +47,7 @@ class Version {
bool candidate, const char* soname);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_VERSION_H_
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index ac3941ea84..d60548d27d 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -78,6 +78,7 @@ Address ExternalCallbackScope::scope_address() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_VM_STATE_INL_H_
diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h
index 9838b8732b..7e723a5282 100644
--- a/deps/v8/src/vm-state.h
+++ b/deps/v8/src/vm-state.h
@@ -33,7 +33,14 @@ class ExternalCallbackScope BASE_EMBEDDED {
inline ExternalCallbackScope(Isolate* isolate, Address callback);
inline ~ExternalCallbackScope();
Address callback() { return callback_; }
- Address* callback_address() { return &callback_; }
+ Address* callback_entrypoint_address() {
+ if (callback_ == nullptr) return nullptr;
+#if USES_FUNCTION_DESCRIPTORS
+ return FUNCTION_ENTRYPOINT_ADDRESS(callback_);
+#else
+ return &callback_;
+#endif
+ }
ExternalCallbackScope* previous() { return previous_scope_; }
inline Address scope_address();
@@ -46,7 +53,8 @@ class ExternalCallbackScope BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_VM_STATE_H_
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index c66e86df3f..17376581b5 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -629,6 +629,7 @@ void Operand::set_disp64(int64_t disp) {
*p = disp;
len_ += sizeof(disp);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index cb93ab878b..c8f99a11a6 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -116,20 +116,6 @@ void CpuFeatures::PrintFeatures() {
// -----------------------------------------------------------------------------
-// Register constants.
-
-const int
- Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rsi, rdi, r8, r9, r11, r12, r14, r15
- 0, 3, 2, 1, 6, 7, 8, 9, 11, 12, 14, 15
-};
-
-const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, 4, 5, 6, 7, -1, 8, 9, -1, 10, 11
-};
-
-
-// -----------------------------------------------------------------------------
// Implementation of Operand
Operand::Operand(Register base, int32_t disp) : rex_(0) {
@@ -759,6 +745,60 @@ void Assembler::bsrl(Register dst, const Operand& src) {
}
+void Assembler::bsrq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::bsrq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::bsfl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBC);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::bsfl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBC);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::bsfq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBC);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::bsfq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBC);
+ emit_operand(dst, src);
+}
+
+
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
@@ -2445,6 +2485,7 @@ void Assembler::orps(XMMRegister dst, const Operand& src) {
void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2454,6 +2495,7 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
void Assembler::xorps(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2537,6 +2579,7 @@ void Assembler::divps(XMMRegister dst, const Operand& src) {
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2547,6 +2590,7 @@ void Assembler::movd(XMMRegister dst, Register src) {
void Assembler::movd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2557,6 +2601,7 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
void Assembler::movd(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
@@ -2567,6 +2612,7 @@ void Assembler::movd(Register dst, XMMRegister src) {
void Assembler::movq(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(dst, src);
@@ -2577,6 +2623,7 @@ void Assembler::movq(XMMRegister dst, Register src) {
void Assembler::movq(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(src, dst);
@@ -2587,6 +2634,7 @@ void Assembler::movq(Register dst, XMMRegister src) {
void Assembler::movq(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
if (dst.low_bits() == 4) {
// Avoid unnecessary SIB byte.
@@ -2699,6 +2747,7 @@ void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
void Assembler::movsd(const Operand& dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
emit_optional_rex_32(src, dst);
@@ -2709,6 +2758,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src) {
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
emit_optional_rex_32(dst, src);
@@ -2719,6 +2769,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
emit_optional_rex_32(dst, src);
@@ -2729,6 +2780,7 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
// Try to avoid an unnecessary SIB byte.
@@ -2757,6 +2809,7 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
// Try to avoid an unnecessary SIB byte.
@@ -2916,6 +2969,7 @@ void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -2925,6 +2979,7 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -2933,7 +2988,19 @@ void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
}
+void Assembler::movss(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3); // single
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10); // load
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::movss(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
emit_optional_rex_32(dst, src);
@@ -2944,6 +3011,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
void Assembler::movss(const Operand& src, XMMRegister dst) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
emit_optional_rex_32(dst, src);
@@ -2954,6 +3022,7 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
void Assembler::psllq(XMMRegister reg, byte imm8) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
@@ -2965,6 +3034,7 @@ void Assembler::psllq(XMMRegister reg, byte imm8) {
void Assembler::psrlq(XMMRegister reg, byte imm8) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
@@ -2998,6 +3068,7 @@ void Assembler::psrld(XMMRegister reg, byte imm8) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3008,6 +3079,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttss2si(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3018,6 +3090,7 @@ void Assembler::cvttss2si(Register dst, XMMRegister src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3028,6 +3101,7 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3038,6 +3112,7 @@ void Assembler::cvttsd2si(Register dst, XMMRegister src) {
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3048,6 +3123,7 @@ void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
void Assembler::cvttsd2siq(Register dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3058,6 +3134,7 @@ void Assembler::cvttsd2siq(Register dst, const Operand& src) {
void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3068,6 +3145,7 @@ void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3087,7 +3165,30 @@ void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
}
+void Assembler::cvtqsi2ss(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtqsi2ss(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtqsi2sd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3098,6 +3199,7 @@ void Assembler::cvtqsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3108,6 +3210,7 @@ void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3118,6 +3221,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3128,6 +3232,7 @@ void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3138,6 +3243,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3148,6 +3254,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3158,6 +3265,7 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3308,6 +3416,7 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3318,6 +3427,7 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3328,6 +3438,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3338,6 +3449,7 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3348,6 +3460,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3369,6 +3482,7 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3377,7 +3491,7 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(0x3a);
emit(0x0b);
emit_sse_operand(dst, src);
- // Mask precision exeption.
+ // Mask precision exception.
emit(static_cast<byte>(mode) | 0x8);
}
@@ -3402,6 +3516,7 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3472,39 +3587,80 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
}
-void Assembler::vucomisd(XMMRegister dst, XMMRegister src) {
+void Assembler::vmovd(XMMRegister dst, Register src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kLIG, k66, k0F, kWIG);
- emit(0x2e);
+ XMMRegister isrc = {src.code()};
+ emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW0);
+ emit(0x6e);
emit_sse_operand(dst, src);
}
-void Assembler::vucomisd(XMMRegister dst, const Operand& src) {
+void Assembler::vmovd(XMMRegister dst, const Operand& src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kLIG, k66, k0F, kWIG);
- emit(0x2e);
+ emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW0);
+ emit(0x6e);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::vmovd(Register dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ XMMRegister idst = {dst.code()};
+ emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW0);
+ emit(0x7e);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::vmovq(XMMRegister dst, Register src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ XMMRegister isrc = {src.code()};
+ emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW1);
+ emit(0x6e);
emit_sse_operand(dst, src);
}
+void Assembler::vmovq(XMMRegister dst, const Operand& src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW1);
+ emit(0x6e);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::vmovq(Register dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ XMMRegister idst = {dst.code()};
+ emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW1);
+ emit(0x7e);
+ emit_sse_operand(src, dst);
+}
+
+
void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
+ XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
emit(op);
emit_sse_operand(dst, src2);
}
void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
+ VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
emit(op);
emit_sse_operand(dst, src2);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 47e4d2bdda..2182dbb3ff 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -40,13 +40,45 @@
#include <deque>
#include "src/assembler.h"
-#include "src/compiler.h"
namespace v8 {
namespace internal {
// Utility functions
+#define GENERAL_REGISTERS(V) \
+ V(rax) \
+ V(rcx) \
+ V(rdx) \
+ V(rbx) \
+ V(rsp) \
+ V(rbp) \
+ V(rsi) \
+ V(rdi) \
+ V(r8) \
+ V(r9) \
+ V(r10) \
+ V(r11) \
+ V(r12) \
+ V(r13) \
+ V(r14) \
+ V(r15)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(rax) \
+ V(rbx) \
+ V(rdx) \
+ V(rcx) \
+ V(rsi) \
+ V(rdi) \
+ V(r8) \
+ V(r9) \
+ V(r11) \
+ V(r12) \
+ V(r14) \
+ V(r15)
+
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -68,226 +100,153 @@ namespace internal {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
//
-
struct Register {
- // The non-allocatable registers are:
- // rsp - stack pointer
- // rbp - frame pointer
- // r10 - fixed scratch register
- // r13 - root register
- static const int kMaxNumAllocatableRegisters = 12;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 16;
-
- static int ToAllocationIndex(Register reg) {
- return kAllocationIndexByRegisterCode[reg.code()];
- }
-
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- Register result = { kRegisterCodeByAllocationIndex[index] };
- return result;
- }
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "rax",
- "rbx",
- "rdx",
- "rcx",
- "rsi",
- "rdi",
- "r8",
- "r9",
- "r11",
- "r12",
- "r14",
- "r15"
- };
- return names[index];
- }
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
- Register r = { code };
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
+ Register r = {code};
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // rax, rbx, rcx and rdx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
- return 1 << code_;
+ DCHECK(is_valid());
+ return 1 << reg_code;
}
+ bool is_byte_register() const { return reg_code <= 3; }
// Return the high bit of the register code as a 0 or 1. Used often
// when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
+ int high_bit() const { return reg_code >> 3; }
// Return the 3 low bits of the register code. Used when encoding registers
// in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
+ int low_bits() const { return reg_code & 0x7; }
// Unfortunately we can't make this private in a struct when initializing
// by assignment.
- int code_;
-
- private:
- static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
- static const int kAllocationIndexByRegisterCode[kNumRegisters];
+ int reg_code;
};
-const int kRegister_rax_Code = 0;
-const int kRegister_rcx_Code = 1;
-const int kRegister_rdx_Code = 2;
-const int kRegister_rbx_Code = 3;
-const int kRegister_rsp_Code = 4;
-const int kRegister_rbp_Code = 5;
-const int kRegister_rsi_Code = 6;
-const int kRegister_rdi_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_r11_Code = 11;
-const int kRegister_r12_Code = 12;
-const int kRegister_r13_Code = 13;
-const int kRegister_r14_Code = 14;
-const int kRegister_r15_Code = 15;
-const int kRegister_no_reg_Code = -1;
-
-const Register rax = { kRegister_rax_Code };
-const Register rcx = { kRegister_rcx_Code };
-const Register rdx = { kRegister_rdx_Code };
-const Register rbx = { kRegister_rbx_Code };
-const Register rsp = { kRegister_rsp_Code };
-const Register rbp = { kRegister_rbp_Code };
-const Register rsi = { kRegister_rsi_Code };
-const Register rdi = { kRegister_rdi_Code };
-const Register r8 = { kRegister_r8_Code };
-const Register r9 = { kRegister_r9_Code };
-const Register r10 = { kRegister_r10_Code };
-const Register r11 = { kRegister_r11_Code };
-const Register r12 = { kRegister_r12_Code };
-const Register r13 = { kRegister_r13_Code };
-const Register r14 = { kRegister_r14_Code };
-const Register r15 = { kRegister_r15_Code };
-const Register no_reg = { kRegister_no_reg_Code };
+
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
+
#ifdef _WIN64
// Windows calling convention
- const Register arg_reg_1 = { kRegister_rcx_Code };
- const Register arg_reg_2 = { kRegister_rdx_Code };
- const Register arg_reg_3 = { kRegister_r8_Code };
- const Register arg_reg_4 = { kRegister_r9_Code };
+const Register arg_reg_1 = {Register::kCode_rcx};
+const Register arg_reg_2 = {Register::kCode_rdx};
+const Register arg_reg_3 = {Register::kCode_r8};
+const Register arg_reg_4 = {Register::kCode_r9};
#else
// AMD64 calling convention
- const Register arg_reg_1 = { kRegister_rdi_Code };
- const Register arg_reg_2 = { kRegister_rsi_Code };
- const Register arg_reg_3 = { kRegister_rdx_Code };
- const Register arg_reg_4 = { kRegister_rcx_Code };
+const Register arg_reg_1 = {Register::kCode_rdi};
+const Register arg_reg_2 = {Register::kCode_rsi};
+const Register arg_reg_3 = {Register::kCode_rdx};
+const Register arg_reg_4 = {Register::kCode_rcx};
#endif // _WIN64
-struct XMMRegister {
- static const int kMaxNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters = 15;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
-
- // TODO(turbofan): Proper support for float32.
- static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
-
- static int ToAllocationIndex(XMMRegister reg) {
- DCHECK(reg.code() != 0);
- return reg.code() - 1;
- }
- static XMMRegister FromAllocationIndex(int index) {
- DCHECK(0 <= index && index < kMaxNumAllocatableRegisters);
- XMMRegister result = { index + 1 };
+#define DOUBLE_REGISTERS(V) \
+ V(xmm0) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7) \
+ V(xmm8) \
+ V(xmm9) \
+ V(xmm10) \
+ V(xmm11) \
+ V(xmm12) \
+ V(xmm13) \
+ V(xmm14) \
+ V(xmm15)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7) \
+ V(xmm8) \
+ V(xmm9) \
+ V(xmm10) \
+ V(xmm11) \
+ V(xmm12) \
+ V(xmm13) \
+ V(xmm14) \
+ V(xmm15)
+
+
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kMaxNumRegisters = Code::kAfterLast;
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister result = {code};
return result;
}
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7",
- "xmm8",
- "xmm9",
- "xmm10",
- "xmm11",
- "xmm12",
- "xmm13",
- "xmm14",
- "xmm15"
- };
- return names[index];
- }
-
- static XMMRegister from_code(int code) {
- DCHECK(code >= 0);
- DCHECK(code < kMaxNumRegisters);
- XMMRegister r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
// Return the high bit of the register code as a 0 or 1. Used often
// when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
+ int high_bit() const { return reg_code >> 3; }
// Return the 3 low bits of the register code. Used when encoding registers
// in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
+ int low_bits() const { return reg_code & 0x7; }
- int code_;
+ // Unfortunately we can't make this private in a struct when initializing
+ // by assignment.
+ int reg_code;
};
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-const XMMRegister xmm8 = { 8 };
-const XMMRegister xmm9 = { 9 };
-const XMMRegister xmm10 = { 10 };
-const XMMRegister xmm11 = { 11 };
-const XMMRegister xmm12 = { 12 };
-const XMMRegister xmm13 = { 13 };
-const XMMRegister xmm14 = { 14 };
-const XMMRegister xmm15 = { 15 };
+#define DECLARE_REGISTER(R) \
+ const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-typedef XMMRegister DoubleRegister;
+typedef DoubleRegister XMMRegister;
enum Condition {
// any value < 0 is considered no_condition
@@ -617,6 +576,11 @@ class Assembler : public AssemblerBase {
static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
static const byte kJzShortOpcode = kJccShortPrefix | zero;
+ // VEX prefix encodings.
+ enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
+ enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
+ enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
+ enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
// ---------------------------------------------------------------------------
// Code generation
@@ -883,8 +847,14 @@ class Assembler : public AssemblerBase {
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
+ void bsrq(Register dst, Register src);
+ void bsrq(Register dst, const Operand& src);
void bsrl(Register dst, Register src);
void bsrl(Register dst, const Operand& src);
+ void bsfq(Register dst, Register src);
+ void bsfq(Register dst, const Operand& src);
+ void bsfl(Register dst, Register src);
+ void bsfl(Register dst, const Operand& src);
// Miscellaneous
void clc();
@@ -1042,6 +1012,13 @@ class Assembler : public AssemblerBase {
void ucomiss(XMMRegister dst, XMMRegister src);
void ucomiss(XMMRegister dst, const Operand& src);
void movaps(XMMRegister dst, XMMRegister src);
+
+ // Don't use this unless it's important to keep the
+ // top half of the destination register unchanged.
+ // Use movaps when moving float values and movd for integer
+ // values in xmm registers.
+ void movss(XMMRegister dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
@@ -1078,7 +1055,7 @@ class Assembler : public AssemblerBase {
// Don't use this unless it's important to keep the
// top half of the destination register unchanged.
- // Used movaps when moving double values and movq for integer
+ // Use movapd when moving double values and movq for integer
// values in xmm registers.
void movsd(XMMRegister dst, XMMRegister src);
@@ -1105,6 +1082,10 @@ class Assembler : public AssemblerBase {
void cvtlsi2sd(XMMRegister dst, const Operand& src);
void cvtlsi2sd(XMMRegister dst, Register src);
+
+ void cvtqsi2ss(XMMRegister dst, const Operand& src);
+ void cvtqsi2ss(XMMRegister dst, Register src);
+
void cvtqsi2sd(XMMRegister dst, const Operand& src);
void cvtqsi2sd(XMMRegister dst, Register src);
@@ -1308,88 +1289,167 @@ class Assembler : public AssemblerBase {
void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
- void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x58, dst, src1, src2);
+ void vmovd(XMMRegister dst, Register src);
+ void vmovd(XMMRegister dst, const Operand& src);
+ void vmovd(Register dst, XMMRegister src);
+ void vmovq(XMMRegister dst, Register src);
+ void vmovq(XMMRegister dst, const Operand& src);
+ void vmovq(Register dst, XMMRegister src);
+
+ void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(0x10, dst, src1, src2);
+ }
+ void vmovsd(XMMRegister dst, const Operand& src) {
+ vsd(0x10, dst, xmm0, src);
+ }
+ void vmovsd(const Operand& dst, XMMRegister src) {
+ vsd(0x11, src, xmm0, dst);
+ }
+
+#define AVX_SP_3(instr, opcode) \
+ AVX_S_3(instr, opcode) \
+ AVX_P_3(instr, opcode)
+
+#define AVX_S_3(instr, opcode) \
+ AVX_3(instr##ss, opcode, vss) \
+ AVX_3(instr##sd, opcode, vsd)
+
+#define AVX_P_3(instr, opcode) \
+ AVX_3(instr##ps, opcode, vps) \
+ AVX_3(instr##pd, opcode, vpd)
+
+#define AVX_3(instr, opcode, impl) \
+ void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ impl(opcode, dst, src1, src2); \
+ } \
+ void instr(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+ impl(opcode, dst, src1, src2); \
}
- void vaddsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x58, dst, src1, src2);
+
+ AVX_SP_3(vsqrt, 0x51);
+ AVX_SP_3(vadd, 0x58);
+ AVX_SP_3(vsub, 0x5c);
+ AVX_SP_3(vmul, 0x59);
+ AVX_SP_3(vdiv, 0x5e);
+ AVX_SP_3(vmin, 0x5d);
+ AVX_SP_3(vmax, 0x5f);
+ AVX_P_3(vand, 0x54);
+ AVX_P_3(vor, 0x56);
+ AVX_P_3(vxor, 0x57);
+ AVX_3(vpcmpeqd, 0x76, vpd);
+ AVX_3(vcvtsd2ss, 0x5a, vsd);
+
+#undef AVX_3
+#undef AVX_S_3
+#undef AVX_P_3
+#undef AVX_SP_3
+
+ void vpsrlq(XMMRegister dst, XMMRegister src, byte imm8) {
+ XMMRegister iop = {2};
+ vpd(0x73, iop, dst, src);
+ emit(imm8);
}
- void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x5c, dst, src1, src2);
+ void vpsllq(XMMRegister dst, XMMRegister src, byte imm8) {
+ XMMRegister iop = {6};
+ vpd(0x73, iop, dst, src);
+ emit(imm8);
}
- void vsubsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x5c, dst, src1, src2);
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
- void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x59, dst, src1, src2);
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
- void vmulsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x59, dst, src1, src2);
+ void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
+ XMMRegister isrc2 = {src2.code()};
+ vsd(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
}
- void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x5e, dst, src1, src2);
+ void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x2a, dst, src1, src2, kF2, k0F, kW0);
}
- void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x5e, dst, src1, src2);
+ void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
+ XMMRegister isrc2 = {src2.code()};
+ vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
}
- void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x5f, dst, src1, src2);
+ void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x2a, dst, src1, src2, kF3, k0F, kW1);
}
- void vmaxsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x5f, dst, src1, src2);
+ void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
+ XMMRegister isrc2 = {src2.code()};
+ vsd(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
}
- void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x5d, dst, src1, src2);
+ void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x2a, dst, src1, src2, kF2, k0F, kW1);
}
- void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x5d, dst, src1, src2);
+ void vcvttsd2si(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
- void vucomisd(XMMRegister dst, XMMRegister src);
- void vucomisd(XMMRegister dst, const Operand& src);
- void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
-
- void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x58, dst, src1, src2);
+ void vcvttsd2si(Register dst, const Operand& src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
- void vaddss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x58, dst, src1, src2);
+ void vcvttsd2siq(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
- void vsubss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x5c, dst, src1, src2);
+ void vcvttsd2siq(Register dst, const Operand& src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
- void vsubss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x5c, dst, src1, src2);
+ void vcvtsd2si(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2d, idst, xmm0, src, kF2, k0F, kW0);
}
- void vmulss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x59, dst, src1, src2);
+ void vucomisd(XMMRegister dst, XMMRegister src) {
+ vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
- void vmulss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x59, dst, src1, src2);
+ void vucomisd(XMMRegister dst, const Operand& src) {
+ vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
- void vdivss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x5e, dst, src1, src2);
+ void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode) {
+ vsd(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
- void vdivss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x5e, dst, src1, src2);
+
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(op, dst, src1, src2, kF2, k0F, kWIG);
}
- void vmaxss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x5f, dst, src1, src2);
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(op, dst, src1, src2, kF2, k0F, kWIG);
}
- void vmaxss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x5f, dst, src1, src2);
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w);
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w);
+
+ void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vss(0x10, dst, src1, src2);
}
- void vminss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x5d, dst, src1, src2);
+ void vmovss(XMMRegister dst, const Operand& src) {
+ vss(0x10, dst, xmm0, src);
}
- void vminss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x5d, dst, src1, src2);
+ void vmovss(const Operand& dst, XMMRegister src) {
+ vss(0x11, src, xmm0, dst);
}
void vucomiss(XMMRegister dst, XMMRegister src);
void vucomiss(XMMRegister dst, const Operand& src);
void vss(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vss(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
+ void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
+ void vmovmskpd(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vpd(0x50, idst, xmm0, src);
+ }
+
+ void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
// BMI instruction
void andnq(Register dst, Register src1, Register src2) {
bmi1q(0xf2, dst, src1, src2);
@@ -1567,33 +1627,6 @@ class Assembler : public AssemblerBase {
void rorxl(Register dst, Register src, byte imm8);
void rorxl(Register dst, const Operand& src, byte imm8);
-#define PACKED_OP_LIST(V) \
- V(and, 0x54) \
- V(xor, 0x57)
-
-#define AVX_PACKED_OP_DECLARE(name, opcode) \
- void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vps(opcode, dst, src1, src2); \
- } \
- void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vps(opcode, dst, src1, src2); \
- } \
- void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vpd(opcode, dst, src1, src2); \
- } \
- void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vpd(opcode, dst, src1, src2); \
- }
-
- PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
- void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
-
- // Debugging
- void Print();
-
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1790,11 +1823,6 @@ class Assembler : public AssemblerBase {
}
// Emit vex prefix
- enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
- enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
- enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
- enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
-
void emit_vex2_byte0() { emit(0xc5); }
inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
SIMDPrefix pp);
@@ -2148,6 +2176,7 @@ class EnsureSpace BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 38d7e5abeb..4efd3bfb23 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -21,12 +21,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- rax : number of arguments excluding receiver
- // -- rdi : called function (only guaranteed when
- // extra_args requires it)
+ // (only guaranteed when the called function
+ // is not marked as DontAdaptArguments)
+ // -- rdi : called function
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -- ...
- // -- rsp[8 * argc] : first argument (argc == rax)
+ // -- rsp[8 * argc] : first argument
// -- rsp[8 * (argc + 1)] : receiver
// -----------------------------------
__ AssertFunction(rdi);
@@ -50,8 +51,21 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
// JumpToExternalReference expects rax to contain the number of arguments
- // including the receiver and the extra arguments.
+ // including the receiver and the extra arguments. But rax is only valid
+ // if the called function is marked as DontAdaptArguments, otherwise we
+ // need to load the argument count from the SharedFunctionInfo.
+ Label argc, done_argc;
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ j(equal, &argc, Label::kNear);
+ __ leap(rax, Operand(rbx, num_extra_args + 1));
+ __ jmp(&done_argc, Label::kNear);
+ __ bind(&argc);
__ addp(rax, Immediate(num_extra_args + 1));
+ __ bind(&done_argc);
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
@@ -135,14 +149,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
- // Fall back to runtime if the original constructor and function differ.
- __ cmpp(rdx, rdi);
+ // Verify that the original constructor is a JSFunction.
+ __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &rt_call);
- // Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movp(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // rdx: original constructor
+ __ movp(rax, FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
DCHECK(kSmiTag == 0);
__ JumpIfSmi(rax, &rt_call);
@@ -151,6 +164,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpObjectType(rax, MAP_TYPE, rbx);
__ j(not_equal, &rt_call);
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ cmpp(rdi, FieldOperand(rax, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &rt_call);
+
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the
// initial map's instance type would be JS_FUNCTION_TYPE.
@@ -178,7 +196,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(rdx);
__ Push(rdi);
- __ Push(rdi); // constructor
+ __ Push(rax); // initial map
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(rdi);
@@ -263,8 +281,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Must restore rsi (context) and rdi (constructor) before calling runtime.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ movp(rdi, Operand(rsp, offset));
- __ Push(rdi); // argument 2/1: constructor function
- __ Push(rdx); // argument 3/2: original constructor
+ __ Push(rdi); // constructor function
+ __ Push(rdx); // original constructor
__ CallRuntime(Runtime::kNewObject, 2);
__ movp(rbx, rax); // store result in rbx
@@ -698,28 +716,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
+ __ Push(kInterpreterBytecodeArrayRegister);
__ CallRuntime(Runtime::kStackGuard, 0);
+ __ Pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -771,6 +777,86 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ bool push_receiver) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rbx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ movp(rcx, rax);
+ if (push_receiver) {
+ __ addp(rcx, Immediate(1)); // Add one for receiver.
+ }
+
+ __ shlp(rcx, Immediate(kPointerSizeLog2));
+ __ negp(rcx);
+ __ addp(rcx, rbx);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ j(always, &loop_check);
+ __ bind(&loop_header);
+ __ Push(Operand(rbx, 0));
+ __ subp(rbx, Immediate(kPointerSize));
+ __ bind(&loop_check);
+ __ cmpp(rbx, rcx);
+ __ j(greater, &loop_header, Label::kNear);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rbx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- rdi : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ PopReturnAddressTo(kScratchRegister);
+
+ Generate_InterpreterPushArgs(masm, true);
+
+ // Call the target.
+ __ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- rdi : the constructor to call (can be any Object)
+ // -- rbx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ PopReturnAddressTo(kScratchRegister);
+
+ // Push slot for the receiver to be constructed.
+ __ Push(Immediate(0));
+
+ Generate_InterpreterPushArgs(masm, false);
+
+ // Push return address in preparation for the tail-call.
+ __ PushReturnAddressFrom(kScratchRegister);
+
+ // Call the constructor (rax, rdx, rdi passed on).
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1338,6 +1424,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
// -- rdi : constructor function
+ // -- rdx : original constructor
// -- rsp[0] : return address
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
@@ -1364,17 +1451,19 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
Label convert, done_convert;
__ JumpIfSmi(rbx, &convert, Label::kNear);
- __ CmpObjectType(rbx, FIRST_NONSTRING_TYPE, rdx);
+ __ CmpObjectType(rbx, FIRST_NONSTRING_TYPE, rcx);
__ j(below, &done_convert);
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
+ __ Push(rdx);
__ Push(rdi);
__ Move(rax, rbx);
__ CallStub(&stub);
__ Move(rbx, rax);
__ Pop(rdi);
+ __ Pop(rdx);
}
__ bind(&done_convert);
}
@@ -1384,9 +1473,14 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rbx : the first argument
// -- rdi : constructor function
+ // -- rdx : original constructor
// -----------------------------------
+ Label allocate, done_allocate, rt_call;
+
+ // Fall back to runtime if the original constructor and constructor differ.
+ __ cmpp(rdx, rdi);
+ __ j(not_equal, &rt_call);
- Label allocate, done_allocate;
__ Allocate(JSValue::kSize, rax, rcx, no_reg, &allocate, TAG_OBJECT);
__ bind(&done_allocate);
@@ -1412,6 +1506,21 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Pop(rbx);
}
__ jmp(&done_allocate);
+
+ // Fallback to the runtime to create new object.
+ __ bind(&rt_call);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rbx);
+ __ Push(rdi);
+ __ Push(rdi); // constructor function
+ __ Push(rdx); // original constructor
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Pop(rdi);
+ __ Pop(rbx);
+ }
+ __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
+ __ Ret();
}
}
@@ -1612,75 +1721,92 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
StackArgumentsAccessor args(rsp, rax);
__ AssertFunction(rdi);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kFunctionKindByteOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ j(not_zero, &class_constructor);
+
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the shared function info.
+ // -- rdi : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ j(not_zero, &done_convert);
{
- __ movp(rcx, args.GetReceiverOperand());
-
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
- // -- rcx : the receiver
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -- rsi : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
- __ j(above_equal, &done_convert);
- __ JumpIfRoot(rcx, Heap::kUndefinedValueRootIndex, &convert_global_proxy,
- Label::kNear);
- __ JumpIfNotRoot(rcx, Heap::kNullValueRootIndex, &convert_to_object,
- Label::kNear);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(rcx);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ movp(rcx, args.GetReceiverOperand());
+ __ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
+ __ j(above_equal, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(rcx, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy, Label::kNear);
+ __ JumpIfNotRoot(rcx, Heap::kNullValueRootIndex, &convert_to_object,
+ Label::kNear);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(rcx);
+ }
+ __ jmp(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ Push(rdi);
+ __ movp(rax, rcx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ movp(rcx, rax);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ jmp(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rax, rax);
- __ Push(rax);
- __ Push(rdi);
- __ movp(rax, rcx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ movp(rcx, rax);
- __ Pop(rdi);
- __ Pop(rax);
- __ SmiToInteger32(rax, rax);
- }
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ movp(args.GetReceiverOperand(), rcx);
}
__ bind(&done_convert);
@@ -1698,11 +1824,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
ParameterCount actual(rax);
ParameterCount expected(rbx);
__ InvokeCode(rdx, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ }
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the target to call (can be any Object)
@@ -1713,7 +1846,7 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(rdi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
@@ -1735,7 +1868,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ movp(args.GetReceiverOperand(), rdi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
@@ -1832,41 +1967,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
- // -- rbx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- rdi : the target to call (can be any Object).
-
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(rdx);
-
- // Find the address of the last argument.
- __ movp(rcx, rax);
- __ addp(rcx, Immediate(1)); // Add one for receiver.
- __ shlp(rcx, Immediate(kPointerSizeLog2));
- __ negp(rcx);
- __ addp(rcx, rbx);
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ j(always, &loop_check);
- __ bind(&loop_header);
- __ Push(Operand(rbx, 0));
- __ subp(rbx, Immediate(kPointerSize));
- __ bind(&loop_check);
- __ cmpp(rbx, rcx);
- __ j(greater, &loop_header, Label::kNear);
-
- // Call the target.
- __ Push(rdx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 0942b2fb3c..b7fb099512 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -180,7 +180,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
bool stash_exponent_copy = !input_reg.is(rsp);
__ movl(scratch1, mantissa_operand);
- __ movsd(xmm0, mantissa_operand);
+ __ Movsd(xmm0, mantissa_operand);
__ movl(rcx, exponent_operand);
if (stash_exponent_copy) __ pushq(rcx);
@@ -200,7 +200,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ jmp(&check_negative);
__ bind(&process_64_bits);
- __ cvttsd2siq(result_reg, xmm0);
+ __ Cvttsd2siq(result_reg, xmm0);
__ jmp(&done, Label::kNear);
// If the double was negative, negate the integer result.
@@ -237,14 +237,14 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
__ JumpIfSmi(rdx, &load_smi_rdx);
__ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
__ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi_rdx);
@@ -288,7 +288,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
- __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ Movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
@@ -304,14 +304,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
- __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type() == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiToInteger32(exponent, exponent);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
- __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
}
if (exponent_type() != INTEGER) {
@@ -324,7 +324,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&int_exponent);
__ bind(&try_arithmetic_simplification);
- __ cvttsd2si(exponent, double_exponent);
+ __ Cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmpl(exponent, Immediate(0x1));
__ j(overflow, &call_runtime);
@@ -337,9 +337,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Test for 0.5.
// Load double_scratch with 0.5.
__ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
- __ movq(double_scratch, scratch);
+ __ Movq(double_scratch, scratch);
// Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
+ __ Ucomisd(double_scratch, double_exponent);
__ j(not_equal, &not_plus_half, Label::kNear);
// Calculates square root of base. Check for the special case of
@@ -347,31 +347,31 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
- __ movq(double_scratch, scratch);
- __ ucomisd(double_scratch, double_base);
+ __ Movq(double_scratch, scratch);
+ __ Ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_sqrt, Label::kNear);
__ j(carry, &continue_sqrt, Label::kNear);
// Set result to Infinity in the special case.
- __ xorps(double_result, double_result);
- __ subsd(double_result, double_scratch);
+ __ Xorpd(double_result, double_result);
+ __ Subsd(double_result, double_scratch);
__ jmp(&done);
__ bind(&continue_sqrt);
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_scratch, double_scratch);
- __ addsd(double_scratch, double_base); // Convert -0 to 0.
- __ sqrtsd(double_result, double_scratch);
+ __ Xorpd(double_scratch, double_scratch);
+ __ Addsd(double_scratch, double_base); // Convert -0 to 0.
+ __ Sqrtsd(double_result, double_scratch);
__ jmp(&done);
// Test for -0.5.
__ bind(&not_plus_half);
// Load double_scratch with -0.5 by substracting 1.
- __ subsd(double_scratch, double_result);
+ __ Subsd(double_scratch, double_result);
// Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
+ __ Ucomisd(double_scratch, double_exponent);
__ j(not_equal, &fast_power, Label::kNear);
// Calculates reciprocal of square root of base. Check for the special
@@ -379,23 +379,23 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
- __ movq(double_scratch, scratch);
- __ ucomisd(double_scratch, double_base);
+ __ Movq(double_scratch, scratch);
+ __ Ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_rsqrt, Label::kNear);
__ j(carry, &continue_rsqrt, Label::kNear);
// Set result to 0 in the special case.
- __ xorps(double_result, double_result);
+ __ Xorpd(double_result, double_result);
__ jmp(&done);
__ bind(&continue_rsqrt);
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_exponent, double_exponent);
- __ addsd(double_exponent, double_base); // Convert -0 to +0.
- __ sqrtsd(double_exponent, double_exponent);
- __ divsd(double_result, double_exponent);
+ __ Xorpd(double_exponent, double_exponent);
+ __ Addsd(double_exponent, double_base); // Convert -0 to +0.
+ __ Sqrtsd(double_exponent, double_exponent);
+ __ Divsd(double_result, double_exponent);
__ jmp(&done);
}
@@ -405,9 +405,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ subp(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), double_exponent);
+ __ Movsd(Operand(rsp, 0), double_exponent);
__ fld_d(Operand(rsp, 0)); // E
- __ movsd(Operand(rsp, 0), double_base);
+ __ Movsd(Operand(rsp, 0), double_base);
__ fld_d(Operand(rsp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
@@ -430,7 +430,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(rsp, 0));
- __ movsd(double_result, Operand(rsp, 0));
+ __ Movsd(double_result, Operand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize));
__ jmp(&done);
@@ -445,8 +445,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const XMMRegister double_scratch2 = double_exponent;
// Back up exponent as we need to check if exponent is negative later.
__ movp(scratch, exponent); // Back up exponent.
- __ movsd(double_scratch, double_base); // Back up base.
- __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
+ __ Movsd(double_scratch, double_base); // Back up base.
+ __ Movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
Label no_neg, while_true, while_false;
@@ -460,26 +460,26 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Above condition means CF==0 && ZF==0. This means that the
// bit that has been shifted out is 0 and the result is not 0.
__ j(above, &while_true, Label::kNear);
- __ movsd(double_result, double_scratch);
+ __ Movsd(double_result, double_scratch);
__ j(zero, &while_false, Label::kNear);
__ bind(&while_true);
__ shrl(scratch, Immediate(1));
- __ mulsd(double_scratch, double_scratch);
+ __ Mulsd(double_scratch, double_scratch);
__ j(above, &while_true, Label::kNear);
- __ mulsd(double_result, double_scratch);
+ __ Mulsd(double_result, double_scratch);
__ j(not_zero, &while_true);
__ bind(&while_false);
// If the exponent is negative, return 1/result.
__ testl(exponent, exponent);
__ j(greater, &done);
- __ divsd(double_scratch2, double_result);
- __ movsd(double_result, double_scratch2);
+ __ Divsd(double_scratch2, double_result);
+ __ Movsd(double_result, double_scratch2);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ xorps(double_scratch2, double_scratch2);
- __ ucomisd(double_scratch2, double_result);
+ __ Xorpd(double_scratch2, double_scratch2);
+ __ Ucomisd(double_scratch2, double_result);
// double_exponent aliased as double_scratch2 has already been overwritten
// and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out.
@@ -497,13 +497,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// as heap number in rax.
__ bind(&done);
__ AllocateHeapNumber(rax, rcx, &call_runtime);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
+ __ Movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
__ bind(&call_runtime);
// Move base to the correct argument register. Exponent is already in xmm1.
- __ movsd(xmm0, double_base);
+ __ Movsd(xmm0, double_base);
DCHECK(double_exponent.is(xmm1));
{
AllowExternalCallThatCantCauseGC scope(masm);
@@ -512,7 +512,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
ExternalReference::power_double_double_function(isolate()), 2);
}
// Return value is in xmm0.
- __ movsd(double_result, xmm0);
+ __ Movsd(double_result, xmm0);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1);
@@ -664,7 +664,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Get the arguments map from the current native context into r9.
Label has_mapped_parameters, instantiate;
__ movp(r9, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(r9, FieldOperand(r9, GlobalObject::kNativeContextOffset));
+ __ movp(r9, FieldOperand(r9, JSGlobalObject::kNativeContextOffset));
__ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -946,7 +946,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the arguments map from the current native context.
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ movp(rdi, FieldOperand(rdi, JSGlobalObject::kNativeContextOffset));
const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
__ movp(rdi, Operand(rdi, offset));
@@ -1565,8 +1565,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// greater-equal. Return -1 for them, so the comparison yields
// false for all conditions except not-equal.
__ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
+ __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ Ucomisd(xmm0, xmm0);
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) {
@@ -1641,7 +1641,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
__ xorl(rax, rax);
__ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
+ __ Ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear);
@@ -1875,104 +1875,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, cont);
-
- // Do not transform the receiver for natives.
- // SharedFunctionInfo is already loaded into rcx.
- __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, cont);
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, StackArgumentsAccessor* args,
- int argc) {
- __ Set(rax, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm,
- StackArgumentsAccessor* args,
- Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(rdi);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Pop(rdi);
- }
- __ movp(args->GetReceiverOperand(), rax);
- __ jmp(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // rdi : the function to call
-
- // wrap_and_call can only be true if we are compiling a monomorphic method.
- Label slow, wrap, cont;
- StackArgumentsAccessor args(rsp, argc);
-
- if (needs_checks) {
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Load the receiver from the stack.
- __ movp(rax, args.GetReceiverOperand());
-
- if (needs_checks) {
- __ JumpIfSmi(rax, &wrap);
-
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(below, &wrap);
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, &args, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, &args, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
// rbx : feedback vector
@@ -2047,17 +1949,17 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
void CallICStub::Generate(MacroAssembler* masm) {
- // rdi - function
- // rdx - slot id
- // rbx - vector
+ // ----------- S t a t e -------------
+ // -- rdi - function
+ // -- rdx - slot id
+ // -- rbx - vector
+ // -----------------------------------
Isolate* isolate = masm->isolate();
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call;
int argc = arg_count();
StackArgumentsAccessor args(rsp, argc);
ParameterCount actual(argc);
@@ -2093,36 +1995,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + kPointerSize),
Smi::FromInt(CallICNexus::kCallCountIncrement));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
-
- // Load the receiver from the stack.
- __ movp(rax, args.GetReceiverOperand());
-
- __ JumpIfSmi(rax, &wrap);
-
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(below, &wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, &args, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, &args, &cont);
- }
+ __ bind(&call);
+ __ Set(rax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
- __ j(equal, &slow_start);
+ __ j(equal, &call);
// Check if we have an allocation site.
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
@@ -2153,7 +2034,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// We have to update statistics for runtime profiling.
__ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(-1));
__ SmiAddConstant(FieldOperand(rbx, generic_offset), Smi::FromInt(1));
- __ jmp(&slow_start);
+ __ jmp(&call);
__ bind(&uninitialized);
@@ -2192,21 +2073,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(rdi);
}
- __ jmp(&have_js_function);
+ __ jmp(&call);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that function is not a smi.
- __ JumpIfSmi(rdi, &slow);
- // Check that function is a JSFunction.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
- __ jmp(&have_js_function);
+ __ jmp(&call);
// Unreachable
__ int3();
@@ -2268,6 +2142,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// rbp: frame pointer of calling JS frame (restored after C call)
// rsp: stack pointer (restored after C call)
// rsi: current context (restored)
+ //
+ // If argv_in_register():
+ // r15: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -2277,7 +2154,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
#else // _WIN64
int arg_stack_space = 0;
#endif // _WIN64
- __ EnterExitFrame(arg_stack_space, save_doubles());
+ if (argv_in_register()) {
+ DCHECK(!save_doubles());
+ __ EnterApiExitFrame(arg_stack_space);
+ // Move argc into r14 (argv is already in r15).
+ __ movp(r14, rax);
+ } else {
+ __ EnterExitFrame(arg_stack_space, save_doubles());
+ }
// rbx: pointer to builtin function (C callee-saved).
// rbp: frame pointer of exit frame (restored after C call).
@@ -2357,7 +2241,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles());
+ __ LeaveExitFrame(save_doubles(), !argv_in_register());
__ ret(0);
// Handling of exception.
@@ -2802,7 +2686,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ Push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode, 1);
if (!result_.is(rax)) {
__ movp(result_, rax);
}
@@ -3112,6 +2996,25 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
}
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in rax.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ testp(rax, rax);
+ __ j(greater_equal, &positive_smi, Label::kNear);
+ __ xorl(rax, rax);
+ __ bind(&positive_smi);
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ PopReturnAddressTo(rcx); // Pop return address.
+ __ Push(rax); // Push argument.
+ __ PushReturnAddressFrom(rcx); // Push return address.
+ __ TailCallRuntime(Runtime::kToLength, 1, 1);
+}
+
+
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in rax.
Label is_number;
@@ -3424,7 +3327,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ JumpIfSmi(rax, &right_smi, Label::kNear);
__ CompareMap(rax, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
@@ -3434,7 +3337,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
__ CompareMap(rdx, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
@@ -3442,7 +3345,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&done);
// Compare operands
- __ ucomisd(xmm0, xmm1);
+ __ Ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear);
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 1344400d48..d4f8b29dbc 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -294,13 +294,15 @@ class RecordWriteStub: public PlatformCodeStub {
Register GetRegThatIsNotRcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(rcx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ Register candidate = Register::from_code(i);
+ if (candidate.IsAllocatable()) {
+ if (candidate.is(rcx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
}
UNREACHABLE();
return no_reg;
@@ -360,6 +362,7 @@ class RecordWriteStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 4f08c7e7a6..5c297f1a07 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -51,7 +51,7 @@ UnaryMathFunction CreateExpFunction() {
__ popq(rbx);
__ popq(rax);
- __ movsd(xmm0, result);
+ __ Movsd(xmm0, result);
__ Ret();
CodeDesc desc;
@@ -74,7 +74,7 @@ UnaryMathFunction CreateSqrtFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// xmm0: raw double input.
// Move double input into registers.
- __ sqrtsd(xmm0, xmm0);
+ __ Sqrtsd(xmm0, xmm0);
__ Ret();
CodeDesc desc;
@@ -95,7 +95,7 @@ ModuloFunction CreateModuloFunction() {
byte* buffer = static_cast<byte*>(
base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
- Assembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
@@ -107,8 +107,8 @@ ModuloFunction CreateModuloFunction() {
// Compute x mod y.
// Load y and x (use argument backing store as temporary storage).
- __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
- __ movsd(Operand(rsp, kRegisterSize), xmm0);
+ __ Movsd(Operand(rsp, kRegisterSize * 2), xmm1);
+ __ Movsd(Operand(rsp, kRegisterSize), xmm0);
__ fld_d(Operand(rsp, kRegisterSize * 2));
__ fld_d(Operand(rsp, kRegisterSize));
@@ -147,13 +147,13 @@ ModuloFunction CreateModuloFunction() {
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
__ movq(rcx, kNaNValue);
__ movq(Operand(rsp, kRegisterSize), rcx);
- __ movsd(xmm0, Operand(rsp, kRegisterSize));
+ __ Movsd(xmm0, Operand(rsp, kRegisterSize));
__ jmp(&return_result);
// If result is valid, return that.
__ bind(&valid_result);
__ fstp_d(Operand(rsp, kRegisterSize));
- __ movsd(xmm0, Operand(rsp, kRegisterSize));
+ __ Movsd(xmm0, Operand(rsp, kRegisterSize));
// Clean up FPU stack and exceptions and return xmm0
__ bind(&return_result);
@@ -333,8 +333,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ JumpIfNotSmi(rbx, &convert_hole);
__ SmiToInteger32(rbx, rbx);
__ Cvtlsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
- xmm0);
+ __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), xmm0);
__ jmp(&entry);
__ bind(&convert_hole);
@@ -604,38 +603,38 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label done;
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
- __ xorpd(result, result);
- __ ucomisd(double_scratch, input);
+ __ Movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
+ __ Xorpd(result, result);
+ __ Ucomisd(double_scratch, input);
__ j(above_equal, &done);
- __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
+ __ Ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
+ __ Movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
__ j(above_equal, &done);
- __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
- __ mulsd(double_scratch, input);
- __ addsd(double_scratch, result);
- __ movq(temp2, double_scratch);
- __ subsd(double_scratch, result);
- __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
+ __ Movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
+ __ Movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
+ __ Mulsd(double_scratch, input);
+ __ Addsd(double_scratch, result);
+ __ Movq(temp2, double_scratch);
+ __ Subsd(double_scratch, result);
+ __ Movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
__ leaq(temp1, Operand(temp2, 0x1ff800));
__ andq(temp2, Immediate(0x7ff));
__ shrq(temp1, Immediate(11));
- __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
+ __ Mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shlq(temp1, Immediate(52));
__ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ subsd(double_scratch, input);
- __ movsd(input, double_scratch);
- __ subsd(result, double_scratch);
- __ mulsd(input, double_scratch);
- __ mulsd(result, input);
- __ movq(input, temp1);
- __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
- __ subsd(result, double_scratch);
- __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
- __ mulsd(result, input);
+ __ Subsd(double_scratch, input);
+ __ Movsd(input, double_scratch);
+ __ Subsd(result, double_scratch);
+ __ Mulsd(input, double_scratch);
+ __ Mulsd(result, input);
+ __ Movq(input, temp1);
+ __ Mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
+ __ Subsd(result, double_scratch);
+ __ Addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
+ __ Mulsd(result, input);
__ bind(&done);
}
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 728d04048e..09af38ddea 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -108,6 +108,7 @@ class StackArgumentsAccessor BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 72c92f0a39..620f614aa5 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -7,6 +7,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -95,7 +96,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -117,7 +118,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -138,14 +139,16 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::NumAllocatableRegisters();
+ const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
__ subp(rsp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(rsp, offset), xmm_reg);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Movsd(Operand(rsp, offset), xmm_reg);
}
// We push all registers onto the stack, even though we do not need
@@ -210,7 +213,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ popq(Operand(rbx, dst_offset));
}
@@ -274,10 +277,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(rbx, src_offset));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Movsd(xmm_reg, Operand(rbx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 5534887f5a..d6cf513392 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -351,6 +351,11 @@ class DisassemblerX64 {
bool rex_w() { return (rex_ & 0x08) != 0; }
+ bool vex_w() {
+ DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+ return vex_byte0_ == VEX3_PREFIX ? (vex_byte2_ & 0x80) != 0 : false;
+ }
+
bool vex_128() {
DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
@@ -947,10 +952,43 @@ int DisassemblerX64::AVXInstruction(byte* data) {
default:
UnimplementedInstruction();
}
+ } else if (vex_66() && vex_0f3a()) {
+ int mod, regop, rm, vvvv = vex_vreg();
+ get_modrm(*current, &mod, &regop, &rm);
+ switch (opcode) {
+ case 0x0b:
+ AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
+ default:
+ UnimplementedInstruction();
+ }
} else if (vex_f3() && vex_0f()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x10:
+ AppendToBuffer("vmovss %s,", NameOfXMMRegister(regop));
+ if (mod == 3) {
+ AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ }
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x11:
+ AppendToBuffer("vmovss ");
+ current += PrintRightXMMOperand(current);
+ if (mod == 3) {
+ AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ }
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ case 0x2a:
+ AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2ss" : "vcvtlsi2ss",
+ NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ current += PrintRightOperand(current);
+ break;
case 0x58:
AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -961,6 +999,11 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5a:
+ AppendToBuffer("vcvtss2sd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5c:
AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -988,6 +1031,41 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x10:
+ AppendToBuffer("vmovsd %s,", NameOfXMMRegister(regop));
+ if (mod == 3) {
+ AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ }
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x11:
+ AppendToBuffer("vmovsd ");
+ current += PrintRightXMMOperand(current);
+ if (mod == 3) {
+ AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ }
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ case 0x2a:
+ AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2sd" : "vcvtlsi2sd",
+ NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ current += PrintRightOperand(current);
+ break;
+ case 0x2c:
+ AppendToBuffer("vcvttsd2si%s %s,", vex_w() ? "q" : "",
+ NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x2d:
+ AppendToBuffer("vcvtsd2si%s %s,", vex_w() ? "q" : "",
+ NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x51:
+ AppendToBuffer("vsqrtsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x58:
AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -998,6 +1076,11 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5a:
+ AppendToBuffer("vcvtsd2ss %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5c:
AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1133,6 +1216,15 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x28:
+ AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x29:
+ AppendToBuffer("vmovaps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
case 0x2e:
AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1154,20 +1246,59 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x28:
+ AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x29:
+ AppendToBuffer("vmovapd ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
case 0x2e:
AppendToBuffer("vucomisd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x50:
+ AppendToBuffer("vmovmskpd %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x54:
AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x56:
+ AppendToBuffer("vorpd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x57:
AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x6e:
+ AppendToBuffer("vmov%c %s,", vex_w() ? 'q' : 'd',
+ NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ break;
+ case 0x73:
+ AppendToBuffer("%s %s,", regop == 6 ? "vpsllq" : "vpsrlq",
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%u", *current++);
+ break;
+ case 0x76:
+ AppendToBuffer("vpcmpeqd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x7e:
+ AppendToBuffer("vmov%c ", vex_w() ? 'q' : 'd');
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
default:
UnimplementedInstruction();
}
@@ -1385,7 +1516,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// roundsd xmm, xmm/m64, imm8
AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", (*current) & 3);
+ AppendToBuffer(",0x%x", (*current) & 3);
current += 1;
} else if (third_byte == 0x16) {
get_modrm(*current, &mod, &rm, &regop);
@@ -1726,7 +1857,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
- } else if (opcode == 0xBD) {
+ } else if (opcode == 0xB8 || opcode == 0xBC || opcode == 0xBD) {
+ // POPCNT, CTZ, CLZ.
AppendToBuffer("%s%c ", mnemonic, operand_size_code());
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
@@ -1780,6 +1912,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "movzxb";
case 0xB7:
return "movzxw";
+ case 0xBC:
+ return "bsf";
case 0xBD:
return "bsr";
case 0xBE:
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 1d9cf1ec13..d213ecb7dc 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -71,6 +71,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_FRAMES_X64_H_
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index a062df590f..e69d38d1f3 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -78,14 +78,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx};
@@ -116,6 +108,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return rax; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return rax; }
@@ -229,6 +225,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -392,16 +395,39 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- rax, // argument count (including receiver)
+ rax, // argument count (not including receiver)
rbx, // address of first argument
rdi // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rax, // argument count (not including receiver)
+ rdx, // original constructor
+ rdi, // constructor
+ rbx, // address of first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rax, // argument count (argc)
+ r15, // address of first argument (argv)
+ rbx // the runtime function to call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index ea837dca4b..3c8cab2d83 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -10,6 +10,7 @@
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/heap/heap.h"
+#include "src/register-configuration.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -22,8 +23,8 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
has_frame_(false),
root_array_available_(true) {
if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -715,7 +716,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the builtins object into target register.
movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
+ movp(target, FieldOperand(target, JSGlobalObject::kNativeContextOffset));
movp(target, ContextOperand(target, native_context_index));
}
@@ -729,7 +730,8 @@ void MacroAssembler::GetBuiltinEntry(Register target,
}
-#define REG(Name) { kRegister_ ## Name ## _Code }
+#define REG(Name) \
+ { Register::kCode_##Name }
static const Register saved_regs[] = {
REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
@@ -759,7 +761,7 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(rsp, i * kDoubleSize), reg);
+ Movsd(Operand(rsp, i * kDoubleSize), reg);
}
}
}
@@ -772,7 +774,7 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
if (fp_mode == kSaveFPRegs) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(rsp, i * kDoubleSize));
+ Movsd(reg, Operand(rsp, i * kDoubleSize));
}
addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
@@ -785,15 +787,165 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
}
+void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtss2sd(dst, src, src);
+ } else {
+ cvtss2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtss2sd(dst, dst, src);
+ } else {
+ cvtss2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtsd2ss(dst, src, src);
+ } else {
+ cvtsd2ss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtsd2ss(dst, dst, src);
+ } else {
+ cvtsd2ss(dst, src);
+ }
+}
+
+
void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
- xorps(dst, dst);
- cvtlsi2sd(dst, src);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorpd(dst, dst, dst);
+ vcvtlsi2sd(dst, dst, src);
+ } else {
+ xorpd(dst, dst);
+ cvtlsi2sd(dst, src);
+ }
}
void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
- xorps(dst, dst);
- cvtlsi2sd(dst, src);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorpd(dst, dst, dst);
+ vcvtlsi2sd(dst, dst, src);
+ } else {
+ xorpd(dst, dst);
+ cvtlsi2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorps(dst, dst, dst);
+ vcvtqsi2ss(dst, dst, src);
+ } else {
+ xorps(dst, dst);
+ cvtqsi2ss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorps(dst, dst, dst);
+ vcvtqsi2ss(dst, dst, src);
+ } else {
+ xorps(dst, dst);
+ cvtqsi2ss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorpd(dst, dst, dst);
+ vcvtqsi2sd(dst, dst, src);
+ } else {
+ xorpd(dst, dst);
+ cvtqsi2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorpd(dst, dst, dst);
+ vcvtqsi2sd(dst, dst, src);
+ } else {
+ xorpd(dst, dst);
+ cvtqsi2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtsd2si(dst, src);
+ } else {
+ cvtsd2si(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttsd2si(dst, src);
+ } else {
+ cvttsd2si(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttsd2si(dst, src);
+ } else {
+ cvttsd2si(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttsd2siq(dst, src);
+ } else {
+ cvttsd2siq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttsd2siq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttsd2siq(dst, src);
+ } else {
+ cvttsd2siq(dst, src);
+ }
}
@@ -2391,15 +2543,15 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
- xorps(dst, dst);
+ Xorpd(dst, dst);
} else {
unsigned pop = base::bits::CountPopulation32(src);
DCHECK_NE(0u, pop);
if (pop == 32) {
- pcmpeqd(dst, dst);
+ Pcmpeqd(dst, dst);
} else {
movl(kScratchRegister, Immediate(src));
- movq(dst, kScratchRegister);
+ Movq(dst, kScratchRegister);
}
}
}
@@ -2407,20 +2559,20 @@ void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
if (src == 0) {
- xorps(dst, dst);
+ Xorpd(dst, dst);
} else {
unsigned nlz = base::bits::CountLeadingZeros64(src);
unsigned ntz = base::bits::CountTrailingZeros64(src);
unsigned pop = base::bits::CountPopulation64(src);
DCHECK_NE(0u, pop);
if (pop == 64) {
- pcmpeqd(dst, dst);
+ Pcmpeqd(dst, dst);
} else if (pop + ntz == 64) {
- pcmpeqd(dst, dst);
- psllq(dst, ntz);
+ Pcmpeqd(dst, dst);
+ Psllq(dst, ntz);
} else if (pop + nlz == 64) {
- pcmpeqd(dst, dst);
- psrlq(dst, nlz);
+ Pcmpeqd(dst, dst);
+ Psrlq(dst, nlz);
} else {
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
@@ -2428,13 +2580,224 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
Move(dst, lower);
} else {
movq(kScratchRegister, src);
- movq(dst, kScratchRegister);
+ Movq(dst, kScratchRegister);
}
}
}
}
+void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovaps(dst, src);
+ } else {
+ movaps(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovapd(dst, src);
+ } else {
+ movapd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovsd(dst, dst, src);
+ } else {
+ movsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movsd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovsd(dst, src);
+ } else {
+ movsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovsd(dst, src);
+ } else {
+ movsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movss(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovss(dst, dst, src);
+ } else {
+ movss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovss(dst, src);
+ } else {
+ movss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovss(dst, src);
+ } else {
+ movss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movd(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovd(dst, src);
+ } else {
+ movd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovd(dst, src);
+ } else {
+ movd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movd(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovd(dst, src);
+ } else {
+ movd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movq(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovq(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movq(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovq(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovmskpd(dst, src);
+ } else {
+ movmskpd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundsd(dst, dst, src, mode);
+ } else {
+ roundsd(dst, src, mode);
+ }
+}
+
+
+void MacroAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsqrtsd(dst, dst, src);
+ } else {
+ sqrtsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsqrtsd(dst, dst, src);
+ } else {
+ sqrtsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vucomiss(src1, src2);
+ } else {
+ ucomiss(src1, src2);
+ }
+}
+
+
+void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vucomiss(src1, src2);
+ } else {
+ ucomiss(src1, src2);
+ }
+}
+
+
+void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vucomisd(src1, src2);
+ } else {
+ ucomisd(src1, src2);
+ }
+}
+
+
+void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vucomisd(src1, src2);
+ } else {
+ ucomisd(src1, src2);
+ }
+}
+
+
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
@@ -2743,7 +3106,7 @@ void MacroAssembler::Call(Handle<Code> code_object,
void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
if (imm8 == 0) {
- movd(dst, src);
+ Movd(dst, src);
return;
}
DCHECK_EQ(1, imm8);
@@ -2763,14 +3126,12 @@ void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
pinsrd(dst, src, imm8);
return;
}
- movd(xmm0, src);
+ Movd(xmm0, src);
if (imm8 == 1) {
punpckldq(dst, xmm0);
} else {
DCHECK_EQ(0, imm8);
- psrlq(dst, 32);
- punpckldq(xmm0, dst);
- movaps(dst, xmm0);
+ Movss(dst, xmm0);
}
}
@@ -2782,14 +3143,12 @@ void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
pinsrd(dst, src, imm8);
return;
}
- movd(xmm0, src);
+ Movd(xmm0, src);
if (imm8 == 1) {
punpckldq(dst, xmm0);
} else {
DCHECK_EQ(0, imm8);
- psrlq(dst, 32);
- punpckldq(xmm0, dst);
- movaps(dst, xmm0);
+ Movss(dst, xmm0);
}
}
@@ -2824,6 +3183,134 @@ void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
}
+void MacroAssembler::Lzcntq(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(LZCNT)) {
+ CpuFeatureScope scope(this, LZCNT);
+ lzcntq(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsrq(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 127); // 127^63 == 64
+ bind(&not_zero_src);
+ xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
+}
+
+
+void MacroAssembler::Lzcntq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(LZCNT)) {
+ CpuFeatureScope scope(this, LZCNT);
+ lzcntq(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsrq(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 127); // 127^63 == 64
+ bind(&not_zero_src);
+ xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
+}
+
+
+void MacroAssembler::Tzcntq(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcntq(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsfq(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
+ Set(dst, 64);
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Tzcntq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcntq(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsfq(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
+ Set(dst, 64);
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Tzcntl(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcntl(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsfl(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 32); // The result of tzcnt is 32 if src = 0.
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Tzcntl(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcntl(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsfl(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 32); // The result of tzcnt is 32 if src = 0.
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Popcntl(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcntl(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+void MacroAssembler::Popcntl(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcntl(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+void MacroAssembler::Popcntq(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcntq(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+void MacroAssembler::Popcntq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcntq(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
void MacroAssembler::Pushad() {
Push(rax);
Push(rcx);
@@ -3043,7 +3530,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
SmiToInteger32(kScratchRegister, maybe_number);
Cvtlsi2sd(xmm_scratch, kScratchRegister);
bind(&done);
- movsd(FieldOperand(elements, index, times_8,
+ Movsd(FieldOperand(elements, index, times_8,
FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
}
@@ -3082,8 +3569,8 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
Register result_reg) {
Label done;
Label conv_failure;
- xorps(temp_xmm_reg, temp_xmm_reg);
- cvtsd2si(result_reg, input_reg);
+ Xorpd(temp_xmm_reg, temp_xmm_reg);
+ Cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
cmpl(result_reg, Immediate(1));
@@ -3095,7 +3582,7 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
jmp(&done, Label::kNear);
bind(&conv_failure);
Set(result_reg, 0);
- ucomisd(input_reg, temp_xmm_reg);
+ Ucomisd(input_reg, temp_xmm_reg);
j(below, &done, Label::kNear);
Set(result_reg, 255);
bind(&done);
@@ -3108,7 +3595,7 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
cmpq(src, Immediate(0xffffffff));
Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
}
- cvtqsi2sd(dst, src);
+ Cvtqsi2sd(dst, src);
}
@@ -3123,15 +3610,15 @@ void MacroAssembler::SlowTruncateToI(Register result_reg,
void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Register input_reg) {
Label done;
- movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- cvttsd2siq(result_reg, xmm0);
+ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ Cvttsd2siq(result_reg, xmm0);
cmpq(result_reg, Immediate(1));
j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
subp(rsp, Immediate(kDoubleSize));
- movsd(MemOperand(rsp, 0), xmm0);
+ Movsd(MemOperand(rsp, 0), xmm0);
SlowTruncateToI(result_reg, rsp, 0);
addp(rsp, Immediate(kDoubleSize));
} else {
@@ -3147,12 +3634,12 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
- cvttsd2siq(result_reg, input_reg);
+ Cvttsd2siq(result_reg, input_reg);
cmpq(result_reg, Immediate(1));
j(no_overflow, &done, Label::kNear);
subp(rsp, Immediate(kDoubleSize));
- movsd(MemOperand(rsp, 0), input_reg);
+ Movsd(MemOperand(rsp, 0), input_reg);
SlowTruncateToI(result_reg, rsp, 0);
addp(rsp, Immediate(kDoubleSize));
@@ -3167,9 +3654,9 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
MinusZeroMode minus_zero_mode,
Label* lost_precision, Label* is_nan,
Label* minus_zero, Label::Distance dst) {
- cvttsd2si(result_reg, input_reg);
+ Cvttsd2si(result_reg, input_reg);
Cvtlsi2sd(xmm0, result_reg);
- ucomisd(xmm0, input_reg);
+ Ucomisd(xmm0, input_reg);
j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst); // NaN.
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
@@ -3178,7 +3665,7 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
// only have to test if we got -0 as an input.
testl(result_reg, result_reg);
j(not_zero, &done, Label::kNear);
- movmskpd(result_reg, input_reg);
+ Movmskpd(result_reg, input_reg);
// Bit 0 contains the sign of the double in input_reg.
// If input was positive, we are ok and return 0, otherwise
// jump to minus_zero.
@@ -3451,6 +3938,43 @@ void MacroAssembler::DebugBreak() {
}
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ LoadSharedFunctionInfoSpecialField(
+ rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
+
+ ParameterCount expected(rbx);
+ InvokeFunction(function, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ Move(rdi, function);
+ InvokeFunction(rdi, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ DCHECK(function.is(rdi));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ // Advances rdx to the end of the Code object header, to the start of
+ // the executable code.
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ InvokeCode(rdx, expected, actual, flag, call_wrapper);
+}
+
+
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
@@ -3484,55 +4008,6 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- DCHECK(function.is(rdi));
- movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
- LoadSharedFunctionInfoSpecialField(rbx, rdx,
- SharedFunctionInfo::kFormalParameterCountOffset);
- // Advances rdx to the end of the Code object header, to the start of
- // the executable code.
- movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- DCHECK(function.is(rdi));
- movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
- // Advances rdx to the end of the Code object header, to the start of
- // the executable code.
- movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-
- InvokeCode(rdx, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- Move(rdi, function);
- InvokeFunction(rdi, expected, actual, flag, call_wrapper);
-}
-
-
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
@@ -3710,13 +4185,16 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
- arg_stack_space * kRegisterSize;
+ int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ arg_stack_space * kRegisterSize;
subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ DoubleRegister reg =
+ DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
+ Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
subp(rsp, Immediate(arg_stack_space * kRegisterSize));
@@ -3753,25 +4231,34 @@ void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Registers:
// r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ DoubleRegister reg =
+ DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
+ Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}
}
- // Get the return address from the stack and restore the frame pointer.
- movp(rcx, Operand(rbp, kFPOnStackSize));
- movp(rbp, Operand(rbp, 0 * kPointerSize));
- // Drop everything up to and including the arguments and the receiver
- // from the caller stack.
- leap(rsp, Operand(r15, 1 * kPointerSize));
+ if (pop_arguments) {
+ // Get the return address from the stack and restore the frame pointer.
+ movp(rcx, Operand(rbp, kFPOnStackSize));
+ movp(rbp, Operand(rbp, 0 * kPointerSize));
+
+ // Drop everything up to and including the arguments and the receiver
+ // from the caller stack.
+ leap(rsp, Operand(r15, 1 * kPointerSize));
- PushReturnAddressFrom(rcx);
+ PushReturnAddressFrom(rcx);
+ } else {
+ // Otherwise just leave the exit frame.
+ leave();
+ }
LeaveExitFrameEpilogue(true);
}
@@ -3823,7 +4310,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
movp(scratch, FieldOperand(scratch, offset));
- movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -4481,7 +4968,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
movp(dst, GlobalObjectOperand());
- movp(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
+ movp(dst, FieldOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
@@ -4494,7 +4981,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Load the global or builtins object from the current context.
movp(scratch,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
movp(scratch, Operand(scratch,
@@ -4523,7 +5010,7 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
movp(function,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ movp(function, FieldOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
movp(function, Operand(function, Context::SlotOffset(index)));
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 1fca0e3594..c7f7f40778 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -16,17 +16,18 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_rax_Code};
-const Register kReturnRegister1 = {kRegister_rdx_Code};
-const Register kJSFunctionRegister = {kRegister_rdi_Code};
-const Register kContextRegister = {kRegister_rsi_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_rax_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_r11_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_r12_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_r14_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_r15_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_rbx_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_rax_Code};
+const Register kReturnRegister0 = {Register::kCode_rax};
+const Register kReturnRegister1 = {Register::kCode_rdx};
+const Register kJSFunctionRegister = {Register::kCode_rdi};
+const Register kContextRegister = {Register::kCode_rsi};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_r11};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_rax};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_rbx};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_rax};
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
@@ -342,8 +343,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
- // argument in register rsi.
- void LeaveExitFrame(bool save_doubles = false);
+ // argument in register rsi (if pop_arguments == true).
+ void LeaveExitFrame(bool save_doubles = false, bool pop_arguments = true);
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
@@ -806,12 +807,30 @@ class MacroAssembler: public Assembler {
void Set(Register dst, int64_t x);
void Set(const Operand& dst, intptr_t x);
+ void Cvtss2sd(XMMRegister dst, XMMRegister src);
+ void Cvtss2sd(XMMRegister dst, const Operand& src);
+ void Cvtsd2ss(XMMRegister dst, XMMRegister src);
+ void Cvtsd2ss(XMMRegister dst, const Operand& src);
+
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
- // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ // xorpd to clear the dst register before cvtsi2sd to solve this issue.
void Cvtlsi2sd(XMMRegister dst, Register src);
void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtqsi2ss(XMMRegister dst, Register src);
+ void Cvtqsi2ss(XMMRegister dst, const Operand& src);
+
+ void Cvtqsi2sd(XMMRegister dst, Register src);
+ void Cvtqsi2sd(XMMRegister dst, const Operand& src);
+
+ void Cvtsd2si(Register dst, XMMRegister src);
+
+ void Cvttsd2si(Register dst, XMMRegister src);
+ void Cvttsd2si(Register dst, const Operand& src);
+ void Cvttsd2siq(Register dst, XMMRegister src);
+ void Cvttsd2siq(Register dst, const Operand& src);
+
// Move if the registers are not identical.
void Move(Register target, Register source);
@@ -894,6 +913,65 @@ class MacroAssembler: public Assembler {
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
+ void macro_name(XMMRegister dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, dst, src); \
+ } else { \
+ name(dst, src); \
+ } \
+ }
+#define AVX_OP2_X(macro_name, name) \
+ AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
+#define AVX_OP2_O(macro_name, name) \
+ AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
+#define AVX_OP2_XO(macro_name, name) \
+ AVX_OP2_X(macro_name, name) \
+ AVX_OP2_O(macro_name, name)
+
+ AVX_OP2_XO(Addsd, addsd)
+ AVX_OP2_XO(Subsd, subsd)
+ AVX_OP2_XO(Mulsd, mulsd)
+ AVX_OP2_XO(Divsd, divsd)
+ AVX_OP2_X(Andpd, andpd)
+ AVX_OP2_X(Orpd, orpd)
+ AVX_OP2_X(Xorpd, xorpd)
+ AVX_OP2_X(Pcmpeqd, pcmpeqd)
+ AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
+ AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
+
+#undef AVX_OP2_O
+#undef AVX_OP2_X
+#undef AVX_OP2_XO
+#undef AVX_OP2_WITH_TYPE
+
+ void Movsd(XMMRegister dst, XMMRegister src);
+ void Movsd(XMMRegister dst, const Operand& src);
+ void Movsd(const Operand& dst, XMMRegister src);
+ void Movss(XMMRegister dst, XMMRegister src);
+ void Movss(XMMRegister dst, const Operand& src);
+ void Movss(const Operand& dst, XMMRegister src);
+
+ void Movd(XMMRegister dst, Register src);
+ void Movd(XMMRegister dst, const Operand& src);
+ void Movd(Register dst, XMMRegister src);
+ void Movq(XMMRegister dst, Register src);
+ void Movq(Register dst, XMMRegister src);
+
+ void Movaps(XMMRegister dst, XMMRegister src);
+ void Movapd(XMMRegister dst, XMMRegister src);
+ void Movmskpd(Register dst, XMMRegister src);
+
+ void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void Sqrtsd(XMMRegister dst, XMMRegister src);
+ void Sqrtsd(XMMRegister dst, const Operand& src);
+
+ void Ucomiss(XMMRegister src1, XMMRegister src2);
+ void Ucomiss(XMMRegister src1, const Operand& src2);
+ void Ucomisd(XMMRegister src1, XMMRegister src2);
+ void Ucomisd(XMMRegister src1, const Operand& src2);
+
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
@@ -936,9 +1014,24 @@ class MacroAssembler: public Assembler {
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void Lzcntq(Register dst, Register src);
+ void Lzcntq(Register dst, const Operand& src);
+
void Lzcntl(Register dst, Register src);
void Lzcntl(Register dst, const Operand& src);
+ void Tzcntq(Register dst, Register src);
+ void Tzcntq(Register dst, const Operand& src);
+
+ void Tzcntl(Register dst, Register src);
+ void Tzcntl(Register dst, const Operand& src);
+
+ void Popcntl(Register dst, Register src);
+ void Popcntl(Register dst, const Operand& src);
+
+ void Popcntq(Register dst, Register src);
+ void Popcntq(Register dst, const Operand& src);
+
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
@@ -1640,6 +1733,7 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index 35cbdc7888..99649ec018 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -41,6 +41,7 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() { }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index 4543047080..ef8876c15a 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -554,6 +554,7 @@ Operand::Operand(Immediate imm) {
set_modrm(0, ebp);
set_dispr(imm.x_, imm.rmode_);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_ASSEMBLER_X87_INL_H_
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index 323d2434f6..baadd87206 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -1178,6 +1178,14 @@ void Assembler::bsr(Register dst, const Operand& src) {
}
+void Assembler::bsf(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBC);
+ emit_operand(dst, src);
+}
+
+
void Assembler::hlt() {
EnsureSpace ensure_space(this);
EMIT(0xF4);
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 1f454bcd90..aa5195c951 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -40,12 +40,48 @@
#include <deque>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/isolate.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+#define GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(ebx) \
+ V(esp) \
+ V(ebp) \
+ V(esi) \
+ V(edi)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(ebx) \
+ V(esi) \
+ V(edi)
+
+#define DOUBLE_REGISTERS(V) \
+ V(stX_0) \
+ V(stX_1) \
+ V(stX_2) \
+ V(stX_3) \
+ V(stX_4) \
+ V(stX_5) \
+ V(stX_6) \
+ V(stX_7)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(stX_0) \
+ V(stX_1) \
+ V(stX_2) \
+ V(stX_3) \
+ V(stX_4) \
+ V(stX_5)
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -68,145 +104,87 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
- static const int kMaxNumAllocatableRegisters = 6;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 8;
-
- static inline const char* AllocationIndexToString(int index);
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static inline int ToAllocationIndex(Register reg);
-
- static inline Register FromAllocationIndex(int index);
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
DCHECK(code >= 0);
DCHECK(code < kNumRegisters);
- Register r = { code };
+ Register r = {code};
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // eax, ebx, ecx and edx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
+ bool is_byte_register() const { return reg_code <= 3; }
+
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-const int kRegister_eax_Code = 0;
-const int kRegister_ecx_Code = 1;
-const int kRegister_edx_Code = 2;
-const int kRegister_ebx_Code = 3;
-const int kRegister_esp_Code = 4;
-const int kRegister_ebp_Code = 5;
-const int kRegister_esi_Code = 6;
-const int kRegister_edi_Code = 7;
-const int kRegister_no_reg_Code = -1;
-
-const Register eax = { kRegister_eax_Code };
-const Register ecx = { kRegister_ecx_Code };
-const Register edx = { kRegister_edx_Code };
-const Register ebx = { kRegister_ebx_Code };
-const Register esp = { kRegister_esp_Code };
-const Register ebp = { kRegister_ebp_Code };
-const Register esi = { kRegister_esi_Code };
-const Register edi = { kRegister_edi_Code };
-const Register no_reg = { kRegister_no_reg_Code };
-
-
-inline const char* Register::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- // This is the mapping of allocation indices to registers.
- const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
- return kNames[index];
-}
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
-inline int Register::ToAllocationIndex(Register reg) {
- DCHECK(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
- return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
-}
-
-
-inline Register Register::FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return (index >= 4) ? from_code(index + 2) : from_code(index);
-}
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
-struct X87Register {
+ static const int kMaxNumRegisters = Code::kAfterLast;
static const int kMaxNumAllocatableRegisters = 6;
- static const int kMaxNumRegisters = 8;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
-
- // TODO(turbofan): Proper support for float32.
- static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
-
-
- static int ToAllocationIndex(X87Register reg) {
- return reg.code_;
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "stX_0", "stX_1", "stX_2", "stX_3", "stX_4",
- "stX_5", "stX_6", "stX_7"
- };
- return names[index];
- }
-
- static X87Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- X87Register result;
- result.code_ = index;
+ static DoubleRegister from_code(int code) {
+ DoubleRegister result = {code};
return result;
}
- bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumRegisters;
- }
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
int code() const {
DCHECK(is_valid());
- return code_;
- }
-
- bool is(X87Register reg) const {
- return code_ == reg.code_;
+ return reg_code;
}
- int code_;
-};
-
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
-typedef X87Register DoubleRegister;
+ const char* ToString();
+ int reg_code;
+};
-const X87Register stX_0 = { 0 };
-const X87Register stX_1 = { 1 };
-const X87Register stX_2 = { 2 };
-const X87Register stX_3 = { 3 };
-const X87Register stX_4 = { 4 };
-const X87Register stX_5 = { 5 };
-const X87Register stX_6 = { 6 };
-const X87Register stX_7 = { 7 };
+#define DECLARE_REGISTER(R) \
+ const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
+typedef DoubleRegister X87Register;
enum Condition {
// any value < 0 is considered no_condition
@@ -793,6 +771,8 @@ class Assembler : public AssemblerBase {
void bts(const Operand& dst, Register src);
void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
void bsr(Register dst, const Operand& src);
+ void bsf(Register dst, Register src) { bsf(dst, Operand(src)); }
+ void bsf(Register dst, const Operand& src);
// Miscellaneous
void hlt();
@@ -1097,6 +1077,7 @@ class EnsureSpace BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_ASSEMBLER_X87_H_
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index bb9829be34..12b41084b4 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -22,12 +22,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- eax : number of arguments excluding receiver
- // -- edi : called function (only guaranteed when
- // extra_args requires it)
+ // (only guaranteed when the called function
+ // is not marked as DontAdaptArguments)
+ // -- edi : called function
// -- esp[0] : return address
// -- esp[4] : last argument
// -- ...
- // -- esp[4 * argc] : first argument (argc == eax)
+ // -- esp[4 * argc] : first argument
// -- esp[4 * (argc +1)] : receiver
// -----------------------------------
__ AssertFunction(edi);
@@ -52,8 +53,22 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
// JumpToExternalReference expects eax to contain the number of arguments
- // including the receiver and the extra arguments.
+ // including the receiver and the extra arguments. But eax is only valid
+ // if the called function is marked as DontAdaptArguments, otherwise we
+ // need to load the argument count from the SharedFunctionInfo.
+ Label argc, done_argc;
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(ebx);
+ __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ j(equal, &argc, Label::kNear);
+ __ lea(eax, Operand(ebx, num_extra_args + 1));
+ __ jmp(&done_argc, Label::kNear);
+ __ bind(&argc);
__ add(eax, Immediate(num_extra_args + 1));
+ __ bind(&done_argc);
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -136,14 +151,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
__ j(not_equal, &rt_call);
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(edx, edi);
+ // Verify that the original constructor is a JSFunction.
+ __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &rt_call);
- // Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // edx: original constructor
+ __ mov(eax, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
__ JumpIfSmi(eax, &rt_call);
// edi: constructor
@@ -151,6 +165,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpObjectType(eax, MAP_TYPE, ebx);
__ j(not_equal, &rt_call);
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &rt_call);
+
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the
// initial map's instance type would be JS_FUNCTION_TYPE.
@@ -179,7 +198,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(edx);
__ push(edi);
- __ push(edi); // constructor
+ __ push(eax); // initial map
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ pop(edi);
@@ -265,8 +284,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// runtime.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ mov(edi, Operand(esp, offset));
- __ push(edi); // argument 2/1: constructor function
- __ push(edx); // argument 3/2: original constructor
+ __ push(edi); // constructor function
+ __ push(edx); // original constructor
__ CallRuntime(Runtime::kNewObject, 2);
__ mov(ebx, eax); // store result in ebx
@@ -627,21 +646,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
@@ -650,7 +655,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
+ __ push(kInterpreterBytecodeArrayRegister);
__ CallRuntime(Runtime::kStackGuard, 0);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -665,24 +672,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Since the dispatch table root might be set after builtins are generated,
// load directly from the roots table.
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ add(kInterpreterDispatchTableRegister,
- Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+ __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
// Push context as a stack located parameter to the bytecode handler.
- DCHECK_EQ(-1, kInterpreterContextSpillSlot);
- __ push(esi);
+ DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+ __ push(ebx);
// Dispatch to the first bytecode handler for the function.
- __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(esi, Operand(kInterpreterDispatchTableRegister, esi,
- times_pointer_size, 0));
+ __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
+ // Restore undefined_value in accumulator (eax)
+ // TODO(rmcilroy): Remove this once we move the dispatch table back into a
+ // register.
+ __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
- __ add(esi, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(esi);
+ __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(ebx);
}
@@ -708,13 +716,99 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register array_limit) {
+ // ----------- S t a t e -------------
+ // -- ebx : Pointer to the last argument in the args array.
+ // -- array_limit : Pointer to one before the first argument in the
+ // args array.
+ // -----------------------------------
+ Label loop_header, loop_check;
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ __ Push(Operand(ebx, 0));
+ __ sub(ebx, Immediate(kPointerSize));
+ __ bind(&loop_check);
+ __ cmp(ebx, array_limit);
+ __ j(greater, &loop_header, Label::kNear);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- edi : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(edx);
+
+ // Find the address of the last argument.
+ __ mov(ecx, eax);
+ __ add(ecx, Immediate(1)); // Add one for receiver.
+ __ shl(ecx, kPointerSizeLog2);
+ __ neg(ecx);
+ __ add(ecx, ebx);
+
+ Generate_InterpreterPushArgs(masm, ecx);
+
+ // Call the target.
+ __ Push(edx); // Re-push return address.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the original constructor
+ // -- edi : the constructor
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+
+ // Save number of arguments on the stack below where arguments are going
+ // to be pushed.
+ __ mov(ecx, eax);
+ __ neg(ecx);
+ __ mov(Operand(esp, ecx, times_pointer_size, -kPointerSize), eax);
+ __ mov(eax, ecx);
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(ecx);
+
+ // Find the address of the last argument.
+ __ shl(eax, kPointerSizeLog2);
+ __ add(eax, ebx);
+
+ // Push padding for receiver.
+ __ Push(Immediate(0));
+
+ Generate_InterpreterPushArgs(masm, eax);
+
+ // Restore number of arguments from slot on stack.
+ __ mov(eax, Operand(esp, -kPointerSize));
+
+ // Re-push return address.
+ __ Push(ecx);
+
+ // Call the constructor with unmodified eax, edi, ebi values.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
-
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function.
@@ -1270,6 +1364,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
+ // -- edx : original constructor
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
@@ -1295,16 +1390,18 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
Label convert, done_convert;
__ JumpIfSmi(ebx, &convert, Label::kNear);
- __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, ecx);
__ j(below, &done_convert);
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
__ Push(edi);
+ __ Push(edx);
__ Move(eax, ebx);
__ CallStub(&stub);
__ Move(ebx, eax);
+ __ Pop(edx);
__ Pop(edi);
}
__ bind(&done_convert);
@@ -1315,9 +1412,15 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ebx : the first argument
// -- edi : constructor function
+ // -- edx : original constructor
// -----------------------------------
- Label allocate, done_allocate;
+ Label allocate, done_allocate, rt_call;
+
+ // Fall back to runtime if the original constructor and constructor differ.
+ __ cmp(edx, edi);
+ __ j(not_equal, &rt_call);
+
__ Allocate(JSValue::kSize, eax, ecx, no_reg, &allocate, TAG_OBJECT);
__ bind(&done_allocate);
@@ -1344,6 +1447,21 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Pop(ebx);
}
__ jmp(&done_allocate);
+
+ // Fallback to the runtime to create new object.
+ __ bind(&rt_call);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx);
+ __ Push(edi);
+ __ Push(edi); // constructor function
+ __ Push(edx); // original constructor
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Pop(edi);
+ __ Pop(ebx);
+ }
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+ __ Ret();
}
}
@@ -1410,74 +1528,85 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(edi);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
+ SharedFunctionInfo::kClassConstructorBitsWithinByte);
+ __ j(not_zero, &class_constructor);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
(1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_zero, &done_convert);
{
- __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
-
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- ecx : the receiver
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
- __ j(above_equal, &done_convert);
- __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, &convert_global_proxy,
- Label::kNear);
- __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
- Label::kNear);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(ecx);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+ __ j(above_equal, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy, Label::kNear);
+ __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+ Label::kNear);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(ecx);
+ }
+ __ jmp(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ Push(eax);
+ __ Push(edi);
+ __ mov(eax, ecx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ecx, eax);
+ __ Pop(edi);
+ __ Pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ jmp(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ Push(eax);
- __ Push(edi);
- __ mov(eax, ecx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(ecx, eax);
- __ Pop(edi);
- __ Pop(eax);
- __ SmiUntag(eax);
- }
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
}
__ bind(&done_convert);
@@ -1496,11 +1625,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
ParameterCount expected(ebx);
__ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), expected,
actual, JUMP_FUNCTION, NullCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ }
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
@@ -1510,7 +1646,7 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
@@ -1531,7 +1667,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
@@ -1626,41 +1764,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- ebx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- edi : the target to call (can be any Object).
-
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(edx);
-
- // Find the address of the last argument.
- __ mov(ecx, eax);
- __ add(ecx, Immediate(1)); // Add one for receiver.
- __ shl(ecx, kPointerSizeLog2);
- __ neg(ecx);
- __ add(ecx, ebx);
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ Push(Operand(ebx, 0));
- __ sub(ebx, Immediate(kPointerSize));
- __ bind(&loop_check);
- __ cmp(ebx, ecx);
- __ j(greater, &loop_header, Label::kNear);
-
- // Call the target.
- __ Push(edx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 0d59b18068..9d066483cf 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -594,7 +594,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Get the arguments map from the current native context into edi.
Label has_mapped_parameters, instantiate;
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ __ mov(edi, FieldOperand(edi, JSGlobalObject::kNativeContextOffset));
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -798,7 +798,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the arguments map from the current native context.
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ __ mov(edi, FieldOperand(edi, JSGlobalObject::kNativeContextOffset));
const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
__ mov(edi, Operand(edi, offset));
@@ -1736,97 +1736,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, cont);
-
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, cont);
-}
-
-
-static void EmitSlowCase(Isolate* isolate, MacroAssembler* masm, int argc) {
- __ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(edi);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(edi);
- }
- __ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
- __ jmp(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // edi : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Load the receiver from the stack.
- __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(eax, &wrap);
-
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &wrap);
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm->isolate(), masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
@@ -1919,9 +1828,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call;
int argc = arg_count();
ParameterCount actual(argc);
@@ -1955,36 +1862,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + kPointerSize),
Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
-
- // Load the receiver from the stack.
- __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
- __ JumpIfSmi(eax, &wrap);
-
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(isolate, masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call);
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
- __ j(equal, &slow_start);
+ __ j(equal, &call);
// Check if we have an allocation site.
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -2016,7 +1902,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// We have to update statistics for runtime profiling.
__ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
__ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
- __ jmp(&slow_start);
+ __ jmp(&call);
__ bind(&uninitialized);
@@ -2053,23 +1939,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ pop(edi);
}
- __ jmp(&have_js_function);
+ __ jmp(&call);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
- __ jmp(&have_js_function);
+ __ jmp(&call);
// Unreachable
__ int3();
@@ -2137,11 +2014,23 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// esp: stack pointer (restored after C call)
// esi: current context (C callee-saved)
// edi: JS function of the caller (C callee-saved)
+ //
+ // If argv_in_register():
+ // ecx: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles());
+ if (argv_in_register()) {
+ DCHECK(!save_doubles());
+ __ EnterApiExitFrame(3);
+
+ // Move argc and argv into the correct registers.
+ __ mov(esi, ecx);
+ __ mov(edi, eax);
+ } else {
+ __ EnterExitFrame(save_doubles());
+ }
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@@ -2186,7 +2075,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles());
+ __ LeaveExitFrame(save_doubles(), !argv_in_register());
__ ret(0);
// Handling of exception.
@@ -2578,7 +2467,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode, 1);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -2890,6 +2779,25 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
}
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in eax.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, eax);
+ __ j(greater_equal, &positive_smi, Label::kNear);
+ __ xor_(eax, eax);
+ __ bind(&positive_smi);
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ pop(ecx); // Pop return address.
+ __ push(eax); // Push argument.
+ __ push(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kToLength, 1, 1);
+}
+
+
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in eax.
Label is_number;
@@ -4241,13 +4149,14 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Register key, Register vector,
Register slot, Register feedback,
- Label* miss) {
+ bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next, next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
+ Label pop_and_miss;
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
__ push(receiver);
__ push(vector);
@@ -4279,16 +4188,18 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
__ jmp(Operand::StaticVariable(virtual_register));
// Polymorphic, we have to loop from 2 to N
-
- // TODO(mvstanton): I think there is a bug here, we are assuming the
- // array has more than one map/handler pair, but we call this function in the
- // keyed store with a string key case, where it might be just an array of two
- // elements.
-
__ bind(&start_polymorphic);
__ push(key);
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(2)));
+
+ if (!is_polymorphic) {
+ // If is_polymorphic is false, we may only have a two element array.
+ // Check against length now in that case.
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(greater_equal, &pop_and_miss);
+ }
+
__ bind(&next_loop);
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -4310,6 +4221,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
__ j(less, &next_loop);
// We exhausted our array of map handler pairs.
+ __ bind(&pop_and_miss);
__ pop(key);
__ pop(vector);
__ pop(receiver);
@@ -4328,7 +4240,7 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
// The store ic value is on the stack.
DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister()));
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
// feedback initially contains the feedback array
Label compare_smi_map;
@@ -4390,7 +4302,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&try_array);
__ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &not_array);
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true,
+ &miss);
__ bind(&not_array);
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
@@ -4435,13 +4348,16 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
Label transition_call;
Label pop_and_miss;
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
+ ExternalReference virtual_slot =
+ ExternalReference::virtual_slot_register(masm->isolate());
__ push(receiver);
__ push(vector);
Register receiver_map = receiver;
Register cached_map = vector;
+ Register value = StoreDescriptor::ValueRegister();
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
@@ -4450,11 +4366,17 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
// Polymorphic, we have to loop from 0 to N - 1
__ push(key);
- // On the stack we have:
- // key (esp)
- // vector
- // receiver
- // value
+ // Current stack layout:
+ // - esp[0] -- key
+ // - esp[4] -- vector
+ // - esp[8] -- receiver
+ // - esp[12] -- value
+ // - esp[16] -- return address
+ //
+ // Required stack layout for handler call:
+ // - esp[0] -- return address
+ // - receiver, key, value, vector, slot in registers.
+ // - handler in virtual register.
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(0)));
__ bind(&next_loop);
@@ -4473,32 +4395,39 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ pop(receiver);
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
__ mov(Operand::StaticVariable(virtual_register), feedback);
- __ pop(feedback); // Pop "value".
+ __ pop(value);
__ jmp(Operand::StaticVariable(virtual_register));
__ bind(&transition_call);
- // Oh holy hell this will be tough.
- // The map goes in vector register.
- __ mov(receiver, FieldOperand(cached_map, WeakCell::kValueOffset));
- // The weak cell may have been cleared.
- __ JumpIfSmi(receiver, &pop_and_miss);
- // slot goes on the stack, and holds return address.
- __ xchg(slot, Operand(esp, 4 * kPointerSize));
- // Get the handler in value.
+ // Current stack layout:
+ // - esp[0] -- key
+ // - esp[4] -- vector
+ // - esp[8] -- receiver
+ // - esp[12] -- value
+ // - esp[16] -- return address
+ //
+ // Required stack layout for handler call:
+ // - esp[0] -- return address
+ // - receiver, key, value, map, vector in registers.
+ // - handler and slot in virtual registers.
+ __ mov(Operand::StaticVariable(virtual_slot), slot);
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize + 2 * kPointerSize));
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), feedback);
+
+ __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ // The weak cell may have been cleared.
+ __ JumpIfSmi(cached_map, &pop_and_miss);
+ DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map);
+
// Pop key into place.
__ pop(key);
- // Put the return address on top of stack, vector goes in slot.
- __ xchg(slot, Operand(esp, 0));
- // put the map on the stack, receiver holds receiver.
- __ xchg(receiver, Operand(esp, 1 * kPointerSize));
- // put the vector on the stack, slot holds value.
- __ xchg(slot, Operand(esp, 2 * kPointerSize));
- // feedback (value) = value, slot = handler.
- __ xchg(feedback, slot);
- __ jmp(slot);
+ __ pop(vector);
+ __ pop(receiver);
+ __ pop(value);
+ __ jmp(Operand::StaticVariable(virtual_register));
__ bind(&prepare_next);
__ add(counter, Immediate(Smi::FromInt(3)));
@@ -4565,7 +4494,8 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// at least one map/handler pair.
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
+ &miss);
__ bind(&miss);
__ pop(value);
diff --git a/deps/v8/src/x87/code-stubs-x87.h b/deps/v8/src/x87/code-stubs-x87.h
index 25fc4d7718..a6a2a13057 100644
--- a/deps/v8/src/x87/code-stubs-x87.h
+++ b/deps/v8/src/x87/code-stubs-x87.h
@@ -309,13 +309,15 @@ class RecordWriteStub: public PlatformCodeStub {
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(ecx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ Register candidate = Register::from_code(i);
+ if (candidate.IsAllocatable()) {
+ if (candidate.is(ecx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
}
UNREACHABLE();
return no_reg;
@@ -374,6 +376,7 @@ class RecordWriteStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_CODE_STUBS_X87_H_
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index 5df3f1f026..7f99fe332b 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -41,8 +41,27 @@ UnaryMathFunction CreateExpFunction() {
UnaryMathFunction CreateSqrtFunction() {
- // No SSE2 support
- return &std::sqrt;
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::sqrt;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // Load double input into registers.
+ __ fld_d(MemOperand(esp, 4));
+ __ X87SetFPUCW(0x027F);
+ __ fsqrt();
+ __ X87SetFPUCW(0x037F);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
+
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
diff --git a/deps/v8/src/x87/codegen-x87.h b/deps/v8/src/x87/codegen-x87.h
index c23e8668da..e786b84f04 100644
--- a/deps/v8/src/x87/codegen-x87.h
+++ b/deps/v8/src/x87/codegen-x87.h
@@ -28,6 +28,7 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_CODEGEN_X87_H_
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 3a5d2640be..6352cf8045 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -7,6 +7,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
#include "src/x87/frames-x87.h"
@@ -181,7 +182,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < X87Register::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -203,7 +204,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < X87Register::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -233,8 +234,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize =
- kDoubleSize * X87Register::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * X87Register::kMaxNumRegisters;
// Reserve space for x87 fp registers.
__ sub(esp, Immediate(kDoubleRegsSize));
@@ -312,10 +312,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
int double_regs_offset = FrameDescription::double_registers_offset();
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
// Fill in the double input registers.
for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize;
__ fld_d(Operand(esp, src_offset));
__ fstp_d(Operand(ebx, dst_offset));
}
diff --git a/deps/v8/src/x87/frames-x87.h b/deps/v8/src/x87/frames-x87.h
index 89e6ebda8c..1b900784cc 100644
--- a/deps/v8/src/x87/frames-x87.h
+++ b/deps/v8/src/x87/frames-x87.h
@@ -80,6 +80,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_FRAMES_X87_H_
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 3696235165..9f37b85c87 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -35,12 +35,10 @@ const Register VectorStoreTransitionDescriptor::SlotRegister() {
}
-const Register VectorStoreTransitionDescriptor::VectorRegister() {
- return no_reg;
-}
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return ebx; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return no_reg; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
@@ -85,14 +83,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister()};
- // The other three parameters are on the stack in ia32.
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx};
@@ -116,6 +106,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return eax; }
@@ -237,6 +231,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -400,16 +401,39 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- eax, // argument count (including receiver)
+ eax, // argument count (not including receiver)
ebx, // address of first argument
edi // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // argument count (not including receiver)
+ edx, // original constructor
+ edi, // constructor
+ ebx, // address of first argument
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // argument count (argc)
+ ecx, // address of first argument (argv)
+ ebx // the runtime function to call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index c34a47a251..f5ecf5f677 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -25,8 +25,8 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
has_frame_(false) {
if (isolate() != NULL) {
// TODO(titzer): should we just use a null handle here instead?
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -943,22 +943,27 @@ void MacroAssembler::EnterApiExitFrame(int argc) {
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Optionally restore FPU state.
if (save_doubles) {
const int offset = -2 * kPointerSize;
frstor(MemOperand(ebp, offset - 108));
}
- // Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kPointerSize));
- mov(ebp, Operand(ebp, 0 * kPointerSize));
+ if (pop_arguments) {
+ // Get the return address from the stack and restore the frame pointer.
+ mov(ecx, Operand(ebp, 1 * kPointerSize));
+ mov(ebp, Operand(ebp, 0 * kPointerSize));
- // Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kPointerSize));
+ // Pop the arguments and the receiver from the caller stack.
+ lea(esp, Operand(esi, 1 * kPointerSize));
- // Push the return address to get ready to return.
- push(ecx);
+ // Push the return address to get ready to return.
+ push(ecx);
+ } else {
+ // Otherwise just leave the exit frame.
+ leave();
+ }
LeaveExitFrameEpilogue(true);
}
@@ -1033,7 +1038,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
mov(scratch1, FieldOperand(scratch1, offset));
- mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
+ mov(scratch1, FieldOperand(scratch1, JSGlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -2047,7 +2052,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the JavaScript builtin function from the builtins object.
mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
+ mov(target, FieldOperand(target, JSGlobalObject::kNativeContextOffset));
mov(target, ContextOperand(target, native_context_index));
}
@@ -2090,7 +2095,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
mov(dst, GlobalObjectOperand());
- mov(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
+ mov(dst, FieldOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
@@ -2102,7 +2107,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Label* no_map_match) {
// Load the global or builtins object from the current context.
mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ mov(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
mov(scratch, Operand(scratch,
@@ -2125,8 +2130,7 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
mov(function,
Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- mov(function,
- FieldOperand(function, GlobalObject::kNativeContextOffset));
+ mov(function, FieldOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
mov(function, Operand(function, Context::SlotOffset(index)));
}
@@ -2312,6 +2316,27 @@ void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
}
+void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
+ // TODO(intel): Add support for TZCNT (with ABM/BMI1).
+ Label not_zero_src;
+ bsf(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Move(dst, Immediate(32)); // The result of tzcnt is 32 if src = 0.
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Popcnt(Register dst, const Operand& src) {
+ // TODO(intel): Add support for POPCNT (with POPCNT)
+ // if (CpuFeatures::IsSupported(POPCNT)) {
+ // CpuFeatureScope scope(this, POPCNT);
+ // popcnt(dst, src);
+ // return;
+ // }
+ UNREACHABLE();
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index f1a8f82fe8..4535f8f9f4 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -14,20 +14,20 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_eax_Code};
-const Register kReturnRegister1 = {kRegister_edx_Code};
-const Register kJSFunctionRegister = {kRegister_edi_Code};
-const Register kContextRegister = {kRegister_esi_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_eax_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_edx_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_ecx_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_edi_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_ebx_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_ebx_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_eax_Code};
+const Register kReturnRegister0 = {Register::kCode_eax};
+const Register kReturnRegister1 = {Register::kCode_edx};
+const Register kJSFunctionRegister = {Register::kCode_edi};
+const Register kContextRegister = {Register::kCode_esi};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
// Spill slots used by interpreter dispatch calling convention.
-const int kInterpreterContextSpillSlot = -1;
+const int kInterpreterDispatchTableSpillSlot = -1;
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
@@ -256,8 +256,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
- // argument in register esi.
- void LeaveExitFrame(bool save_doubles);
+ // argument in register esi (if pop_arguments == true).
+ void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
@@ -810,6 +810,12 @@ class MacroAssembler: public Assembler {
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
void Lzcnt(Register dst, const Operand& src);
+ void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
+ void Tzcnt(Register dst, const Operand& src);
+
+ void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
+ void Popcnt(Register dst, const Operand& src);
+
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
@@ -1083,6 +1089,7 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_MACRO_ASSEMBLER_X87_H_
diff --git a/deps/v8/src/x87/simulator-x87.h b/deps/v8/src/x87/simulator-x87.h
index a780e839d2..3071842f20 100644
--- a/deps/v8/src/x87/simulator-x87.h
+++ b/deps/v8/src/x87/simulator-x87.h
@@ -43,6 +43,7 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() { }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_SIMULATOR_X87_H_
diff --git a/deps/v8/src/zone-allocator.h b/deps/v8/src/zone-allocator.h
index 30abe21804..f46151ebc3 100644
--- a/deps/v8/src/zone-allocator.h
+++ b/deps/v8/src/zone-allocator.h
@@ -66,6 +66,7 @@ class zone_allocator {
typedef zone_allocator<bool> ZoneBoolAllocator;
typedef zone_allocator<int> ZoneIntAllocator;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ZONE_ALLOCATOR_H_
diff --git a/deps/v8/src/zone-type-cache.h b/deps/v8/src/zone-type-cache.h
deleted file mode 100644
index bdc4388009..0000000000
--- a/deps/v8/src/zone-type-cache.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ZONE_TYPE_CACHE_H_
-#define V8_ZONE_TYPE_CACHE_H_
-
-
-#include "src/types.h"
-
-namespace v8 {
-namespace internal {
-
-class ZoneTypeCache final {
- private:
- // This has to be first for the initialization magic to work.
- Zone zone_;
-
- public:
- ZoneTypeCache() = default;
-
- Type* const kInt8 =
- CreateNative(CreateRange<int8_t>(), Type::UntaggedSigned8());
- Type* const kUint8 =
- CreateNative(CreateRange<uint8_t>(), Type::UntaggedUnsigned8());
- Type* const kUint8Clamped = kUint8;
- Type* const kInt16 =
- CreateNative(CreateRange<int16_t>(), Type::UntaggedSigned16());
- Type* const kUint16 =
- CreateNative(CreateRange<uint16_t>(), Type::UntaggedUnsigned16());
- Type* const kInt32 = CreateNative(Type::Signed32(), Type::UntaggedSigned32());
- Type* const kUint32 =
- CreateNative(Type::Unsigned32(), Type::UntaggedUnsigned32());
- Type* const kFloat32 = CreateNative(Type::Number(), Type::UntaggedFloat32());
- Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
-
- Type* const kSingletonZero = CreateRange(0.0, 0.0);
- Type* const kSingletonOne = CreateRange(1.0, 1.0);
- Type* const kZeroOrOne = CreateRange(0.0, 1.0);
- Type* const kZeroish =
- Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
- Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
- Type* const kWeakint = Type::Union(kInteger, Type::MinusZeroOrNaN(), zone());
- Type* const kWeakintFunc1 = Type::Function(kWeakint, Type::Number(), zone());
-
- Type* const kRandomFunc0 = Type::Function(Type::OrderedNumber(), zone());
- Type* const kAnyFunc0 = Type::Function(Type::Any(), zone());
- Type* const kAnyFunc1 = Type::Function(Type::Any(), Type::Any(), zone());
- Type* const kAnyFunc2 =
- Type::Function(Type::Any(), Type::Any(), Type::Any(), zone());
- Type* const kAnyFunc3 = Type::Function(Type::Any(), Type::Any(), Type::Any(),
- Type::Any(), zone());
- Type* const kNumberFunc0 = Type::Function(Type::Number(), zone());
- Type* const kNumberFunc1 =
- Type::Function(Type::Number(), Type::Number(), zone());
- Type* const kNumberFunc2 =
- Type::Function(Type::Number(), Type::Number(), Type::Number(), zone());
- Type* const kImulFunc = Type::Function(Type::Signed32(), Type::Integral32(),
- Type::Integral32(), zone());
- Type* const kClz32Func =
- Type::Function(CreateRange(0, 32), Type::Number(), zone());
-
-#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- Type* const k##TypeName##Array = CreateArray(k##TypeName);
- TYPED_ARRAYS(TYPED_ARRAY)
-#undef TYPED_ARRAY
-
- private:
- Type* CreateArray(Type* element) { return Type::Array(element, zone()); }
-
- Type* CreateArrayFunction(Type* array) {
- Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
- Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
- Type* arg3 = arg2;
- return Type::Function(array, arg1, arg2, arg3, zone());
- }
-
- Type* CreateNative(Type* semantic, Type* representation) {
- return Type::Intersect(semantic, representation, zone());
- }
-
- template <typename T>
- Type* CreateRange() {
- return CreateRange(std::numeric_limits<T>::min(),
- std::numeric_limits<T>::max());
- }
-
- Type* CreateRange(double min, double max) {
- return Type::Range(min, max, zone());
- }
-
- Zone* zone() { return &zone_; }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ZONE_TYPE_CACHE_H_