summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/accessors.cc187
-rw-r--r--deps/v8/src/accessors.h5
-rw-r--r--deps/v8/src/allocation-site-scopes.cc3
-rw-r--r--deps/v8/src/allocation-tracker.cc3
-rw-r--r--deps/v8/src/allocation.cc3
-rw-r--r--deps/v8/src/api-natives.cc106
-rw-r--r--deps/v8/src/api.cc1297
-rw-r--r--deps/v8/src/api.h15
-rw-r--r--deps/v8/src/arguments.cc80
-rw-r--r--deps/v8/src/arguments.h18
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h27
-rw-r--r--deps/v8/src/arm/assembler-arm.cc674
-rw-r--r--deps/v8/src/arm/assembler-arm.h139
-rw-r--r--deps/v8/src/arm/builtins-arm.cc147
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc229
-rw-r--r--deps/v8/src/arm/codegen-arm.cc3
-rw-r--r--deps/v8/src/arm/constants-arm.cc3
-rw-r--r--deps/v8/src/arm/constants-arm.h5
-rw-r--r--deps/v8/src/arm/cpu-arm.cc3
-rw-r--r--deps/v8/src/arm/debug-arm.cc52
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc5
-rw-r--r--deps/v8/src/arm/disasm-arm.cc4
-rw-r--r--deps/v8/src/arm/frames-arm.cc14
-rw-r--r--deps/v8/src/arm/frames-arm.h54
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc911
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc308
-rw-r--r--deps/v8/src/arm/lithium-arm.cc79
-rw-r--r--deps/v8/src/arm/lithium-arm.h83
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc338
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h6
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc3
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc76
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h10
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc3
-rw-r--r--deps/v8/src/arm/simulator-arm.cc29
-rw-r--r--deps/v8/src/arm/simulator-arm.h2
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h25
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc101
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h40
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc119
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc238
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h6
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc3
-rw-r--r--deps/v8/src/arm64/constants-arm64.h2
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc3
-rw-r--r--deps/v8/src/arm64/debug-arm64.cc50
-rw-r--r--deps/v8/src/arm64/decoder-arm64.cc3
-rw-r--r--deps/v8/src/arm64/delayed-masm-arm64.cc3
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc5
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc24
-rw-r--r--deps/v8/src/arm64/frames-arm64.cc9
-rw-r--r--deps/v8/src/arm64/frames-arm64.h30
-rw-r--r--deps/v8/src/arm64/full-codegen-arm64.cc887
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc30
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h14
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc3
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc323
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc80
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h83
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc335
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.h22
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.cc3
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc24
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h4
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.cc3
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.h1
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc175
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h19
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc5
-rw-r--r--deps/v8/src/arm64/utils-arm64.h43
-rw-r--r--deps/v8/src/array-iterator.js20
-rw-r--r--deps/v8/src/array.js283
-rw-r--r--deps/v8/src/arraybuffer.js29
-rw-r--r--deps/v8/src/assembler.cc227
-rw-r--r--deps/v8/src/assembler.h176
-rw-r--r--deps/v8/src/ast-literal-reindexer.cc311
-rw-r--r--deps/v8/src/ast-literal-reindexer.h46
-rw-r--r--deps/v8/src/ast-numbering.cc70
-rw-r--r--deps/v8/src/ast-value-factory.cc18
-rw-r--r--deps/v8/src/ast-value-factory.h28
-rw-r--r--deps/v8/src/ast.cc252
-rw-r--r--deps/v8/src/ast.h509
-rw-r--r--deps/v8/src/background-parsing-task.cc4
-rw-r--r--deps/v8/src/bailout-reason.cc4
-rw-r--r--deps/v8/src/bailout-reason.h2
-rw-r--r--deps/v8/src/base/atomicops_internals_arm_gcc.h6
-rw-r--r--deps/v8/src/base/bits.h24
-rw-r--r--deps/v8/src/base/macros.h9
-rw-r--r--deps/v8/src/base/safe_math_impl.h4
-rw-r--r--deps/v8/src/base/sys-info.cc2
-rw-r--r--deps/v8/src/basic-block-profiler.h3
-rw-r--r--deps/v8/src/bignum-dtoa.cc3
-rw-r--r--deps/v8/src/bignum.cc3
-rw-r--r--deps/v8/src/bootstrapper.cc498
-rw-r--r--deps/v8/src/bootstrapper.h2
-rw-r--r--deps/v8/src/builtins.cc144
-rw-r--r--deps/v8/src/builtins.h154
-rw-r--r--deps/v8/src/cached-powers.cc3
-rw-r--r--deps/v8/src/char-predicates.cc4
-rw-r--r--deps/v8/src/code-factory.cc89
-rw-r--r--deps/v8/src/code-factory.h20
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc246
-rw-r--r--deps/v8/src/code-stubs.cc132
-rw-r--r--deps/v8/src/code-stubs.h435
-rw-r--r--deps/v8/src/codegen.cc17
-rw-r--r--deps/v8/src/codegen.h5
-rw-r--r--deps/v8/src/collection-iterator.js31
-rw-r--r--deps/v8/src/collection.js146
-rw-r--r--deps/v8/src/compilation-cache.cc32
-rw-r--r--deps/v8/src/compilation-cache.h10
-rw-r--r--deps/v8/src/compilation-dependencies.cc4
-rw-r--r--deps/v8/src/compiler.cc263
-rw-r--r--deps/v8/src/compiler.h71
-rw-r--r--deps/v8/src/compiler/access-builder.cc80
-rw-r--r--deps/v8/src/compiler/access-builder.h44
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc92
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc61
-rw-r--r--deps/v8/src/compiler/arm/linkage-arm.cc6
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc116
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h1
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc539
-rw-r--r--deps/v8/src/compiler/arm64/linkage-arm64.cc12
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc1823
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h133
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc9
-rw-r--r--deps/v8/src/compiler/change-lowering.cc32
-rw-r--r--deps/v8/src/compiler/coalesced-live-ranges.cc148
-rw-r--r--deps/v8/src/compiler/coalesced-live-ranges.h109
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h8
-rw-r--r--deps/v8/src/compiler/code-generator.cc148
-rw-r--r--deps/v8/src/compiler/code-generator.h4
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc308
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h23
-rw-r--r--deps/v8/src/compiler/common-operator.cc115
-rw-r--r--deps/v8/src/compiler/common-operator.h39
-rw-r--r--deps/v8/src/compiler/control-builders.cc13
-rw-r--r--deps/v8/src/compiler/control-builders.h3
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc27
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h15
-rw-r--r--deps/v8/src/compiler/control-reducer.cc603
-rw-r--r--deps/v8/src/compiler/control-reducer.h43
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc145
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h52
-rw-r--r--deps/v8/src/compiler/frame-elider.cc4
-rw-r--r--deps/v8/src/compiler/frame-states.cc40
-rw-r--r--deps/v8/src/compiler/frame-states.h83
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc6
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc63
-rw-r--r--deps/v8/src/compiler/graph-reducer.h46
-rw-r--r--deps/v8/src/compiler/graph-trimmer.cc48
-rw-r--r--deps/v8/src/compiler/graph-trimmer.h57
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc12
-rw-r--r--deps/v8/src/compiler/graph.cc16
-rw-r--r--deps/v8/src/compiler/graph.h16
-rw-r--r--deps/v8/src/compiler/greedy-allocator.cc350
-rw-r--r--deps/v8/src/compiler/greedy-allocator.h111
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc64
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h1
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc104
-rw-r--r--deps/v8/src/compiler/ia32/linkage-ia32.cc2
-rw-r--r--deps/v8/src/compiler/instruction-codes.h55
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc86
-rw-r--r--deps/v8/src/compiler/instruction.cc31
-rw-r--r--deps/v8/src/compiler/instruction.h29
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc38
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h4
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc78
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h36
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.cc69
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.h44
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc492
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-graph.cc15
-rw-r--r--deps/v8/src/compiler/js-graph.h7
-rw-r--r--deps/v8/src/compiler/js-inlining.cc276
-rw-r--r--deps/v8/src/compiler/js-inlining.h18
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc260
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h24
-rw-r--r--deps/v8/src/compiler/js-operator.cc262
-rw-r--r--deps/v8/src/compiler/js-operator.h231
-rw-r--r--deps/v8/src/compiler/js-type-feedback.cc161
-rw-r--r--deps/v8/src/compiler/js-type-feedback.h47
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc808
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h21
-rw-r--r--deps/v8/src/compiler/linkage-impl.h44
-rw-r--r--deps/v8/src/compiler/linkage.cc98
-rw-r--r--deps/v8/src/compiler/linkage.h25
-rw-r--r--deps/v8/src/compiler/load-elimination.cc4
-rw-r--r--deps/v8/src/compiler/load-elimination.h4
-rw-r--r--deps/v8/src/compiler/loop-analysis.h3
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc9
-rw-r--r--deps/v8/src/compiler/machine-operator.cc86
-rw-r--r--deps/v8/src/compiler/machine-operator.h63
-rw-r--r--deps/v8/src/compiler/machine-type.h38
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc115
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h4
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc75
-rw-r--r--deps/v8/src/compiler/mips/linkage-mips.cc5
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc155
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h4
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc90
-rw-r--r--deps/v8/src/compiler/mips64/linkage-mips64.cc5
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/node-marker.cc27
-rw-r--r--deps/v8/src/compiler/node-marker.h36
-rw-r--r--deps/v8/src/compiler/node-matchers.h5
-rw-r--r--deps/v8/src/compiler/node-properties.cc50
-rw-r--r--deps/v8/src/compiler/node-properties.h10
-rw-r--r--deps/v8/src/compiler/node.cc305
-rw-r--r--deps/v8/src/compiler/node.h283
-rw-r--r--deps/v8/src/compiler/opcodes.cc6
-rw-r--r--deps/v8/src/compiler/opcodes.h24
-rw-r--r--deps/v8/src/compiler/operator-properties.cc34
-rw-r--r--deps/v8/src/compiler/osr.cc101
-rw-r--r--deps/v8/src/compiler/pipeline.cc253
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc76
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h2
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc113
-rw-r--r--deps/v8/src/compiler/ppc/linkage-ppc.cc2
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc79
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h57
-rw-r--r--deps/v8/src/compiler/register-allocator.cc601
-rw-r--r--deps/v8/src/compiler/register-allocator.h72
-rw-r--r--deps/v8/src/compiler/register-configuration.cc7
-rw-r--r--deps/v8/src/compiler/representation-change.h6
-rw-r--r--deps/v8/src/compiler/schedule.cc6
-rw-r--r--deps/v8/src/compiler/scheduler.cc4
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc86
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h6
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc61
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h9
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc56
-rw-r--r--deps/v8/src/compiler/simplified-operator.h4
-rw-r--r--deps/v8/src/compiler/source-position.cc7
-rw-r--r--deps/v8/src/compiler/source-position.h10
-rw-r--r--deps/v8/src/compiler/typer.cc557
-rw-r--r--deps/v8/src/compiler/typer.h56
-rw-r--r--deps/v8/src/compiler/verifier.cc118
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc108
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h1
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc210
-rw-r--r--deps/v8/src/compiler/x64/linkage-x64.cc11
-rw-r--r--deps/v8/src/compiler/x87/OWNERS2
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc1852
-rw-r--r--deps/v8/src/compiler/x87/instruction-codes-x87.h122
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc1355
-rw-r--r--deps/v8/src/compiler/x87/linkage-x87.cc65
-rw-r--r--deps/v8/src/contexts.cc87
-rw-r--r--deps/v8/src/contexts.h52
-rw-r--r--deps/v8/src/conversions.cc3
-rw-r--r--deps/v8/src/counters.cc3
-rw-r--r--deps/v8/src/cpu-profiler.cc3
-rw-r--r--deps/v8/src/d8-debug.cc4
-rw-r--r--deps/v8/src/d8.cc742
-rw-r--r--deps/v8/src/d8.gyp7
-rw-r--r--deps/v8/src/d8.h149
-rw-r--r--deps/v8/src/date.cc3
-rw-r--r--deps/v8/src/date.js112
-rw-r--r--deps/v8/src/dateparser.cc3
-rw-r--r--deps/v8/src/debug.cc611
-rw-r--r--deps/v8/src/debug.h61
-rw-r--r--deps/v8/src/deoptimizer.cc3204
-rw-r--r--deps/v8/src/deoptimizer.h596
-rw-r--r--deps/v8/src/disassembler.cc7
-rw-r--r--deps/v8/src/diy-fp.cc3
-rw-r--r--deps/v8/src/dtoa.cc3
-rw-r--r--deps/v8/src/elements-kind.cc31
-rw-r--r--deps/v8/src/elements-kind.h24
-rw-r--r--deps/v8/src/elements.cc1702
-rw-r--r--deps/v8/src/elements.h152
-rw-r--r--deps/v8/src/execution.cc34
-rw-r--r--deps/v8/src/execution.h3
-rw-r--r--deps/v8/src/expression-classifier.h255
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc49
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h5
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.cc10
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.h5
-rw-r--r--deps/v8/src/extensions/gc-extension.cc15
-rw-r--r--deps/v8/src/extensions/gc-extension.h5
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc31
-rw-r--r--deps/v8/src/extensions/statistics-extension.h5
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.cc10
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.h5
-rw-r--r--deps/v8/src/factory.cc313
-rw-r--r--deps/v8/src/factory.h93
-rw-r--r--deps/v8/src/fast-dtoa.cc3
-rw-r--r--deps/v8/src/fixed-dtoa.cc3
-rw-r--r--deps/v8/src/flag-definitions.h69
-rw-r--r--deps/v8/src/flags.cc3
-rw-r--r--deps/v8/src/frames.cc293
-rw-r--r--deps/v8/src/frames.h93
-rw-r--r--deps/v8/src/full-codegen.cc517
-rw-r--r--deps/v8/src/full-codegen.h123
-rw-r--r--deps/v8/src/func-name-inferrer.cc3
-rw-r--r--deps/v8/src/generator.js31
-rw-r--r--deps/v8/src/global-handles.cc10
-rw-r--r--deps/v8/src/globals.h153
-rw-r--r--deps/v8/src/handles-inl.h14
-rw-r--r--deps/v8/src/handles.cc4
-rw-r--r--deps/v8/src/handles.h30
-rw-r--r--deps/v8/src/harmony-array-includes.js4
-rw-r--r--deps/v8/src/harmony-array.js179
-rw-r--r--deps/v8/src/harmony-atomics.js143
-rw-r--r--deps/v8/src/harmony-concat-spreadable.js16
-rw-r--r--deps/v8/src/harmony-object.js17
-rw-r--r--deps/v8/src/harmony-reflect.js4
-rw-r--r--deps/v8/src/harmony-regexp.js2
-rw-r--r--deps/v8/src/harmony-sharedarraybuffer.js56
-rw-r--r--deps/v8/src/harmony-spread.js8
-rw-r--r--deps/v8/src/harmony-tostring.js4
-rw-r--r--deps/v8/src/harmony-typedarray.js389
-rw-r--r--deps/v8/src/hashmap.h8
-rw-r--r--deps/v8/src/heap-profiler.cc3
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc40
-rw-r--r--deps/v8/src/heap-snapshot-generator.h3
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc201
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h87
-rw-r--r--deps/v8/src/heap/gc-tracer.cc229
-rw-r--r--deps/v8/src/heap/gc-tracer.h81
-rw-r--r--deps/v8/src/heap/heap-inl.h34
-rw-r--r--deps/v8/src/heap/heap.cc1172
-rw-r--r--deps/v8/src/heap/heap.h316
-rw-r--r--deps/v8/src/heap/identity-map.cc6
-rw-r--r--deps/v8/src/heap/incremental-marking.cc55
-rw-r--r--deps/v8/src/heap/incremental-marking.h25
-rw-r--r--deps/v8/src/heap/mark-compact.cc969
-rw-r--r--deps/v8/src/heap/mark-compact.h101
-rw-r--r--deps/v8/src/heap/memory-reducer.cc241
-rw-r--r--deps/v8/src/heap/memory-reducer.h170
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h108
-rw-r--r--deps/v8/src/heap/objects-visiting.cc16
-rw-r--r--deps/v8/src/heap/objects-visiting.h2
-rw-r--r--deps/v8/src/heap/spaces-inl.h95
-rw-r--r--deps/v8/src/heap/spaces.cc53
-rw-r--r--deps/v8/src/heap/spaces.h104
-rw-r--r--deps/v8/src/heap/store-buffer.cc63
-rw-r--r--deps/v8/src/hydrogen-bce.cc11
-rw-r--r--deps/v8/src/hydrogen-bch.cc3
-rw-r--r--deps/v8/src/hydrogen-canonicalize.cc3
-rw-r--r--deps/v8/src/hydrogen-check-elimination.cc3
-rw-r--r--deps/v8/src/hydrogen-dce.cc3
-rw-r--r--deps/v8/src/hydrogen-dehoist.cc3
-rw-r--r--deps/v8/src/hydrogen-environment-liveness.cc3
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.cc3
-rw-r--r--deps/v8/src/hydrogen-gvn.cc3
-rw-r--r--deps/v8/src/hydrogen-infer-representation.cc3
-rw-r--r--deps/v8/src/hydrogen-infer-types.cc3
-rw-r--r--deps/v8/src/hydrogen-instructions.cc112
-rw-r--r--deps/v8/src/hydrogen-instructions.h393
-rw-r--r--deps/v8/src/hydrogen-load-elimination.cc3
-rw-r--r--deps/v8/src/hydrogen-mark-deoptimize.cc3
-rw-r--r--deps/v8/src/hydrogen-mark-unreachable.cc3
-rw-r--r--deps/v8/src/hydrogen-osr.cc3
-rw-r--r--deps/v8/src/hydrogen-range-analysis.cc3
-rw-r--r--deps/v8/src/hydrogen-redundant-phi.cc3
-rw-r--r--deps/v8/src/hydrogen-removable-simulates.cc3
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc3
-rw-r--r--deps/v8/src/hydrogen-sce.cc3
-rw-r--r--deps/v8/src/hydrogen-store-elimination.cc3
-rw-r--r--deps/v8/src/hydrogen-types.cc3
-rw-r--r--deps/v8/src/hydrogen-uint32-analysis.cc3
-rw-r--r--deps/v8/src/hydrogen.cc485
-rw-r--r--deps/v8/src/hydrogen.h32
-rw-r--r--deps/v8/src/i18n.cc5
-rw-r--r--deps/v8/src/i18n.js611
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h12
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc23
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h32
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc99
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc236
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc3
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc3
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc52
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc5
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc2
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc9
-rw-r--r--deps/v8/src/ia32/frames-ia32.h30
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc895
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc294
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc268
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h6
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc3
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc77
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h81
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc6
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc3
-rw-r--r--deps/v8/src/ic/access-compiler.cc4
-rw-r--r--deps/v8/src/ic/access-compiler.h10
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc4
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc4
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc222
-rw-r--r--deps/v8/src/ic/arm/ic-compiler-arm.cc4
-rw-r--r--deps/v8/src/ic/arm/stub-cache-arm.cc8
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc4
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc4
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc263
-rw-r--r--deps/v8/src/ic/arm64/ic-compiler-arm64.cc4
-rw-r--r--deps/v8/src/ic/arm64/stub-cache-arm64.cc8
-rw-r--r--deps/v8/src/ic/call-optimization.cc4
-rw-r--r--deps/v8/src/ic/handler-compiler.cc37
-rw-r--r--deps/v8/src/ic/handler-compiler.h3
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc4
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc11
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc4
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc224
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc12
-rw-r--r--deps/v8/src/ic/ic-compiler.cc100
-rw-r--r--deps/v8/src/ic/ic-compiler.h10
-rw-r--r--deps/v8/src/ic/ic-inl.h61
-rw-r--r--deps/v8/src/ic/ic-state.cc30
-rw-r--r--deps/v8/src/ic/ic-state.h68
-rw-r--r--deps/v8/src/ic/ic.cc787
-rw-r--r--deps/v8/src/ic/ic.h158
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc4
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc4
-rw-r--r--deps/v8/src/ic/mips/ic-compiler-mips.cc4
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc222
-rw-r--r--deps/v8/src/ic/mips/stub-cache-mips.cc8
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc4
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc4
-rw-r--r--deps/v8/src/ic/mips64/ic-compiler-mips64.cc4
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc221
-rw-r--r--deps/v8/src/ic/mips64/stub-cache-mips64.cc14
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc4
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc10
-rw-r--r--deps/v8/src/ic/ppc/ic-compiler-ppc.cc4
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc228
-rw-r--r--deps/v8/src/ic/ppc/stub-cache-ppc.cc8
-rw-r--r--deps/v8/src/ic/stub-cache.cc4
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc4
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc4
-rw-r--r--deps/v8/src/ic/x64/ic-compiler-x64.cc4
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc226
-rw-r--r--deps/v8/src/ic/x64/stub-cache-x64.cc8
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc4
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc11
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc4
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc224
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc13
-rw-r--r--deps/v8/src/icu_util.cc3
-rw-r--r--deps/v8/src/interface-descriptors.cc363
-rw-r--r--deps/v8/src/interface-descriptors.h210
-rw-r--r--deps/v8/src/interpreter-irregexp.cc3
-rw-r--r--deps/v8/src/isolate.cc174
-rw-r--r--deps/v8/src/isolate.h46
-rw-r--r--deps/v8/src/iterator-prototype.js21
-rw-r--r--deps/v8/src/json-parser.h35
-rw-r--r--deps/v8/src/json-stringifier.h13
-rw-r--r--deps/v8/src/json.js80
-rw-r--r--deps/v8/src/jsregexp.cc402
-rw-r--r--deps/v8/src/jsregexp.h68
-rw-r--r--deps/v8/src/layout-descriptor-inl.h4
-rw-r--r--deps/v8/src/layout-descriptor.cc4
-rw-r--r--deps/v8/src/libplatform/default-platform.cc67
-rw-r--r--deps/v8/src/libplatform/default-platform.h12
-rw-r--r--deps/v8/src/list-inl.h42
-rw-r--r--deps/v8/src/list.h10
-rw-r--r--deps/v8/src/lithium-allocator.cc3
-rw-r--r--deps/v8/src/lithium-codegen.cc103
-rw-r--r--deps/v8/src/lithium-codegen.h6
-rw-r--r--deps/v8/src/lithium.cc21
-rw-r--r--deps/v8/src/lithium.h22
-rw-r--r--deps/v8/src/liveedit.cc45
-rw-r--r--deps/v8/src/liveedit.h2
-rw-r--r--deps/v8/src/log-utils.cc3
-rw-r--r--deps/v8/src/log-utils.h2
-rw-r--r--deps/v8/src/log.cc89
-rw-r--r--deps/v8/src/log.h10
-rw-r--r--deps/v8/src/lookup-inl.h62
-rw-r--r--deps/v8/src/lookup.cc183
-rw-r--r--deps/v8/src/lookup.h128
-rw-r--r--deps/v8/src/macro-assembler.h24
-rw-r--r--deps/v8/src/macros.py18
-rw-r--r--deps/v8/src/math.js69
-rw-r--r--deps/v8/src/messages.cc147
-rw-r--r--deps/v8/src/messages.h237
-rw-r--r--deps/v8/src/messages.js300
-rw-r--r--deps/v8/src/mips/assembler-mips.cc381
-rw-r--r--deps/v8/src/mips/assembler-mips.h79
-rw-r--r--deps/v8/src/mips/builtins-mips.cc135
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc202
-rw-r--r--deps/v8/src/mips/codegen-mips.cc3
-rw-r--r--deps/v8/src/mips/constants-mips.cc35
-rw-r--r--deps/v8/src/mips/constants-mips.h202
-rw-r--r--deps/v8/src/mips/cpu-mips.cc3
-rw-r--r--deps/v8/src/mips/debug-mips.cc49
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc5
-rw-r--r--deps/v8/src/mips/disasm-mips.cc532
-rw-r--r--deps/v8/src/mips/frames-mips.cc9
-rw-r--r--deps/v8/src/mips/frames-mips.h30
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc879
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc295
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc302
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h6
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc3
-rw-r--r--deps/v8/src/mips/lithium-mips.cc79
-rw-r--r--deps/v8/src/mips/lithium-mips.h83
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc24
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc3
-rw-r--r--deps/v8/src/mips/simulator-mips.cc1189
-rw-r--r--deps/v8/src/mips/simulator-mips.h30
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h49
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc489
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h100
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc136
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc204
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc3
-rw-r--r--deps/v8/src/mips64/constants-mips64.cc53
-rw-r--r--deps/v8/src/mips64/constants-mips64.h243
-rw-r--r--deps/v8/src/mips64/cpu-mips64.cc3
-rw-r--r--deps/v8/src/mips64/debug-mips64.cc48
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc5
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc572
-rw-r--r--deps/v8/src/mips64/frames-mips64.cc9
-rw-r--r--deps/v8/src/mips64/frames-mips64.h30
-rw-r--r--deps/v8/src/mips64/full-codegen-mips64.cc888
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc295
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.cc557
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.h9
-rw-r--r--deps/v8/src/mips64/lithium-gap-resolver-mips64.cc3
-rw-r--r--deps/v8/src/mips64/lithium-mips64.cc112
-rw-r--r--deps/v8/src/mips64/lithium-mips64.h147
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc270
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h20
-rw-r--r--deps/v8/src/mips64/regexp-macro-assembler-mips64.cc3
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc1261
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h94
-rw-r--r--deps/v8/src/mirror-debugger.js55
-rw-r--r--deps/v8/src/modules.cc4
-rw-r--r--deps/v8/src/object-observe.js49
-rw-r--r--deps/v8/src/objects-debug.cc43
-rw-r--r--deps/v8/src/objects-inl.h1115
-rw-r--r--deps/v8/src/objects-printer.cc72
-rw-r--r--deps/v8/src/objects.cc4838
-rw-r--r--deps/v8/src/objects.h1444
-rw-r--r--deps/v8/src/optimizing-compile-dispatcher.cc4
-rw-r--r--deps/v8/src/parser.cc1443
-rw-r--r--deps/v8/src/parser.h267
-rw-r--r--deps/v8/src/pattern-rewriter.cc423
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc29
-rw-r--r--deps/v8/src/pending-compilation-error-handler.h13
-rw-r--r--deps/v8/src/perf-jit.cc148
-rw-r--r--deps/v8/src/perf-jit.h113
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h185
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc179
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h177
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc174
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc262
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc4
-rw-r--r--deps/v8/src/ppc/constants-ppc.cc4
-rw-r--r--deps/v8/src/ppc/constants-ppc.h8
-rw-r--r--deps/v8/src/ppc/cpu-ppc.cc4
-rw-r--r--deps/v8/src/ppc/debug-ppc.cc53
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc8
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc4
-rw-r--r--deps/v8/src/ppc/frames-ppc.cc18
-rw-r--r--deps/v8/src/ppc/frames-ppc.h37
-rw-r--r--deps/v8/src/ppc/full-codegen-ppc.cc934
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc296
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.cc311
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.h8
-rw-r--r--deps/v8/src/ppc/lithium-gap-resolver-ppc.cc4
-rw-r--r--deps/v8/src/ppc/lithium-ppc.cc80
-rw-r--r--deps/v8/src/ppc/lithium-ppc.h81
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc159
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h32
-rw-r--r--deps/v8/src/ppc/regexp-macro-assembler-ppc.cc4
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc42
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h3
-rw-r--r--deps/v8/src/preparse-data-format.h5
-rw-r--r--deps/v8/src/preparse-data.cc10
-rw-r--r--deps/v8/src/preparse-data.h43
-rw-r--r--deps/v8/src/preparser.cc135
-rw-r--r--deps/v8/src/preparser.h1227
-rw-r--r--deps/v8/src/prettyprinter.cc130
-rw-r--r--deps/v8/src/prettyprinter.h1
-rw-r--r--deps/v8/src/profile-generator.cc3
-rw-r--r--deps/v8/src/prologue.js232
-rw-r--r--deps/v8/src/promise.js15
-rw-r--r--deps/v8/src/property.cc3
-rw-r--r--deps/v8/src/proxy.js32
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc3
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc9
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.h1
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc3
-rw-r--r--deps/v8/src/regexp-macro-assembler.h3
-rw-r--r--deps/v8/src/regexp-stack.cc3
-rw-r--r--deps/v8/src/regexp.js86
-rw-r--r--deps/v8/src/rewriter.cc5
-rw-r--r--deps/v8/src/runtime-profiler.cc9
-rw-r--r--deps/v8/src/runtime.js110
-rw-r--r--deps/v8/src/runtime/runtime-array.cc165
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc824
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc120
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc100
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc64
-rw-r--r--deps/v8/src/runtime/runtime-date.cc30
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc494
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc75
-rw-r--r--deps/v8/src/runtime/runtime-function.cc70
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc6
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc23
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc65
-rw-r--r--deps/v8/src/runtime/runtime-json.cc4
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc62
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc4
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc4
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc4
-rw-r--r--deps/v8/src/runtime/runtime-object.cc626
-rw-r--r--deps/v8/src/runtime/runtime-observe.cc6
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc4
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc17
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc76
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc20
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc36
-rw-r--r--deps/v8/src/runtime/runtime-test.cc38
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc72
-rw-r--r--deps/v8/src/runtime/runtime-uri.cc4
-rw-r--r--deps/v8/src/runtime/runtime-utils.h12
-rw-r--r--deps/v8/src/runtime/runtime.h211
-rw-r--r--deps/v8/src/safepoint-table.cc4
-rw-r--r--deps/v8/src/sampler.cc3
-rw-r--r--deps/v8/src/scanner-character-streams.cc92
-rw-r--r--deps/v8/src/scanner-character-streams.h24
-rw-r--r--deps/v8/src/scanner.cc152
-rw-r--r--deps/v8/src/scanner.h10
-rw-r--r--deps/v8/src/scopeinfo.cc271
-rw-r--r--deps/v8/src/scopeinfo.h36
-rw-r--r--deps/v8/src/scopes.cc308
-rw-r--r--deps/v8/src/scopes.h78
-rw-r--r--deps/v8/src/snapshot/natives-external.cc3
-rw-r--r--deps/v8/src/snapshot/serialize.cc199
-rw-r--r--deps/v8/src/snapshot/serialize.h24
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc10
-rw-r--r--deps/v8/src/snapshot/snapshot-empty.cc3
-rw-r--r--deps/v8/src/snapshot/snapshot-external.cc3
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc3
-rw-r--r--deps/v8/src/snapshot/snapshot.h3
-rw-r--r--deps/v8/src/string-builder.cc24
-rw-r--r--deps/v8/src/string-builder.h2
-rw-r--r--deps/v8/src/string-iterator.js35
-rw-r--r--deps/v8/src/string-search.cc3
-rw-r--r--deps/v8/src/string-stream.cc21
-rw-r--r--deps/v8/src/string-stream.h31
-rw-r--r--deps/v8/src/string.js135
-rw-r--r--deps/v8/src/strings-storage.cc4
-rw-r--r--deps/v8/src/strtod.cc3
-rw-r--r--deps/v8/src/symbol.js23
-rw-r--r--deps/v8/src/templates.js16
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.cc4
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.js43
-rw-r--r--deps/v8/src/third_party/kernel/tools/perf/util/jitdump.h83
-rw-r--r--deps/v8/src/third_party/valgrind/LICENSE54
-rw-r--r--deps/v8/src/third_party/vtune/LICENSE59
-rw-r--r--deps/v8/src/token.cc3
-rw-r--r--deps/v8/src/transitions.cc42
-rw-r--r--deps/v8/src/transitions.h5
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h8
-rw-r--r--deps/v8/src/type-feedback-vector.cc267
-rw-r--r--deps/v8/src/type-feedback-vector.h89
-rw-r--r--deps/v8/src/type-info.cc26
-rw-r--r--deps/v8/src/type-info.h8
-rw-r--r--deps/v8/src/typedarray.js101
-rw-r--r--deps/v8/src/types.cc6
-rw-r--r--deps/v8/src/types.h22
-rw-r--r--deps/v8/src/typing.cc43
-rw-r--r--deps/v8/src/uri.js42
-rw-r--r--deps/v8/src/utils.cc3
-rw-r--r--deps/v8/src/utils.h51
-rw-r--r--deps/v8/src/v8.cc3
-rw-r--r--deps/v8/src/v8natives.js362
-rw-r--r--deps/v8/src/variables.cc17
-rw-r--r--deps/v8/src/variables.h63
-rw-r--r--deps/v8/src/vector.h29
-rw-r--r--deps/v8/src/version.cc3
-rw-r--r--deps/v8/src/weak-collection.js38
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h6
-rw-r--r--deps/v8/src/x64/assembler-x64.cc23
-rw-r--r--deps/v8/src/x64/assembler-x64.h31
-rw-r--r--deps/v8/src/x64/builtins-x64.cc111
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc217
-rw-r--r--deps/v8/src/x64/codegen-x64.cc3
-rw-r--r--deps/v8/src/x64/cpu-x64.cc3
-rw-r--r--deps/v8/src/x64/debug-x64.cc50
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc5
-rw-r--r--deps/v8/src/x64/disasm-x64.cc2
-rw-r--r--deps/v8/src/x64/frames-x64.cc9
-rw-r--r--deps/v8/src/x64/frames-x64.h30
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc865
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc295
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc284
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h6
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc3
-rw-r--r--deps/v8/src/x64/lithium-x64.cc77
-rw-r--r--deps/v8/src/x64/lithium-x64.h82
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc8
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc3
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h12
-rw-r--r--deps/v8/src/x87/assembler-x87.cc76
-rw-r--r--deps/v8/src/x87/assembler-x87.h49
-rw-r--r--deps/v8/src/x87/builtins-x87.cc99
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc238
-rw-r--r--deps/v8/src/x87/codegen-x87.cc3
-rw-r--r--deps/v8/src/x87/cpu-x87.cc3
-rw-r--r--deps/v8/src/x87/debug-x87.cc52
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc5
-rw-r--r--deps/v8/src/x87/disasm-x87.cc17
-rw-r--r--deps/v8/src/x87/frames-x87.cc9
-rw-r--r--deps/v8/src/x87/frames-x87.h30
-rw-r--r--deps/v8/src/x87/full-codegen-x87.cc895
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc294
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.cc260
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.h6
-rw-r--r--deps/v8/src/x87/lithium-gap-resolver-x87.cc3
-rw-r--r--deps/v8/src/x87/lithium-x87.cc77
-rw-r--r--deps/v8/src/x87/lithium-x87.h81
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc8
-rw-r--r--deps/v8/src/x87/regexp-macro-assembler-x87.cc3
-rw-r--r--deps/v8/src/zone-containers.h6
720 files changed, 54276 insertions, 35127 deletions
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 9b24ee37ec..9850cd388e 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -32,6 +32,7 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_property_attributes(attributes);
info->set_all_can_read(false);
info->set_all_can_write(false);
+ info->set_is_special_data_property(true);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
Handle<Object> set = v8::FromCData(isolate, setter);
@@ -126,31 +127,6 @@ bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
}
-bool SetPropertyOnInstanceIfInherited(
- Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
- v8::Local<v8::Name> name, Handle<Object> value) {
- Handle<Object> holder = Utils::OpenHandle(*info.Holder());
- Handle<Object> receiver = Utils::OpenHandle(*info.This());
- if (*holder == *receiver) return false;
- if (receiver->IsJSObject()) {
- Handle<JSObject> object = Handle<JSObject>::cast(receiver);
- // This behaves sloppy since we lost the actual strict-mode.
- // TODO(verwaest): Fix by making ExecutableAccessorInfo behave like data
- // properties.
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return true;
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- object = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- }
- if (!object->map()->is_extensible()) return true;
- JSObject::SetOwnPropertyIgnoreAttributes(object, Utils::OpenHandle(*name),
- value, NONE).Check();
- }
- return true;
-}
-
-
//
// Accessors::ArgumentsIterator
//
@@ -174,8 +150,6 @@ void Accessors::ArgumentsIteratorSetter(
Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<Object> value = Utils::OpenHandle(*val);
- if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
-
LookupIterator it(object, Utils::OpenHandle(*name));
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype());
@@ -199,21 +173,6 @@ Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
//
-// The helper function will 'flatten' Number objects.
-Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
- Handle<Object> value) {
- if (value->IsNumber() || !value->IsJSValue()) return value;
- Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
- DCHECK(wrapper->GetIsolate()->native_context()->number_function()->
- has_initial_map());
- if (wrapper->map() == isolate->number_function()->initial_map()) {
- return handle(wrapper->value(), isolate);
- }
-
- return value;
-}
-
-
void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -226,44 +185,55 @@ void Accessors::ArrayLengthGetter(
}
+// Tries to non-observably convert |value| to a valid array length.
+// Returns false if it fails.
+static bool FastAsArrayLength(Isolate* isolate, Handle<Object> value,
+ uint32_t* length) {
+ if (value->ToArrayLength(length)) return true;
+ // We don't support AsArrayLength, so use AsArrayIndex for now. This just
+ // misses out on kMaxUInt32.
+ if (value->IsString()) return String::cast(*value)->AsArrayIndex(length);
+ return false;
+}
+
+
void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
+
Handle<JSObject> object = Utils::OpenHandle(*info.This());
- Handle<Object> value = Utils::OpenHandle(*val);
- if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
- return;
- }
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ Handle<Object> length_obj = Utils::OpenHandle(*val);
- value = FlattenNumber(isolate, value);
+ uint32_t length = 0;
+ if (!FastAsArrayLength(isolate, length_obj, &length)) {
+ Handle<Object> uint32_v;
+ if (!Execution::ToUint32(isolate, length_obj).ToHandle(&uint32_v)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
- Handle<JSArray> array_handle = Handle<JSArray>::cast(object);
- MaybeHandle<Object> maybe;
- Handle<Object> uint32_v;
- maybe = Execution::ToUint32(isolate, value);
- if (!maybe.ToHandle(&uint32_v)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
- Handle<Object> number_v;
- maybe = Execution::ToNumber(isolate, value);
- if (!maybe.ToHandle(&number_v)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
+ Handle<Object> number_v;
+ if (!Execution::ToNumber(isolate, length_obj).ToHandle(&number_v)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
- if (uint32_v->Number() == number_v->Number()) {
- maybe = JSArray::SetElementsLength(array_handle, uint32_v);
- if (maybe.is_null()) isolate->OptionalRescheduleException(false);
- return;
+ if (uint32_v->Number() != number_v->Number()) {
+ Handle<Object> exception = isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidArrayLength);
+ return isolate->ScheduleThrow(*exception);
+ }
+
+ CHECK(uint32_v->ToArrayLength(&length));
}
- Handle<Object> exception =
- isolate->factory()->NewRangeError(MessageTemplate::kInvalidArrayLength);
- isolate->ScheduleThrow(*exception);
+ if (JSArray::ObservableSetLength(array, length).is_null()) {
+ isolate->OptionalRescheduleException(false);
+ }
}
@@ -706,8 +676,9 @@ void Accessors::ScriptIsEmbedderDebugScriptGetter(
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
- bool is_embedder_debug_script =
- Script::cast(JSValue::cast(object)->value())->is_embedder_debug_script();
+ bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
+ ->origin_options()
+ .IsEmbedderDebugScript();
Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script);
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
@@ -970,9 +941,6 @@ void Accessors::FunctionPrototypeSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
- if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
- return;
- }
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionPrototype(isolate, object, value).is_null()) {
@@ -1061,8 +1029,6 @@ void Accessors::FunctionLengthSetter(
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
- if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
-
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionLength(isolate, object, value).is_null()) {
@@ -1120,8 +1086,6 @@ void Accessors::FunctionNameSetter(
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
- if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
-
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionName(isolate, object, value).is_null()) {
@@ -1151,22 +1115,41 @@ static Handle<Object> ArgumentsForInlinedFunction(
int inlined_frame_index) {
Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory();
- SlotRefValueBuilder slot_refs(
- frame, inlined_frame_index,
- inlined_function->shared()->internal_formal_parameter_count());
- int args_count = slot_refs.args_length();
+ TranslatedState translated_values(frame);
+ translated_values.Prepare(false, frame->fp());
+
+ int argument_count = 0;
+ TranslatedFrame* translated_frame =
+ translated_values.GetArgumentsInfoFromJSFrameIndex(inlined_frame_index,
+ &argument_count);
+ TranslatedFrame::iterator iter = translated_frame->begin();
+
+ // Skip the function.
+ iter++;
+
+ // Skip the receiver.
+ iter++;
+ argument_count--;
+
Handle<JSObject> arguments =
- factory->NewArgumentsObject(inlined_function, args_count);
- Handle<FixedArray> array = factory->NewFixedArray(args_count);
- slot_refs.Prepare(isolate);
- for (int i = 0; i < args_count; ++i) {
- Handle<Object> value = slot_refs.GetNext(isolate, 0);
+ factory->NewArgumentsObject(inlined_function, argument_count);
+ Handle<FixedArray> array = factory->NewFixedArray(argument_count);
+ bool should_deoptimize = false;
+ for (int i = 0; i < argument_count; ++i) {
+ // If we materialize any object, we should deopt because we might alias
+ // an object that was eliminated by escape analysis.
+ should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
+ Handle<Object> value = iter->GetValue();
array->set(i, *value);
+ iter++;
}
- slot_refs.Finish(isolate);
arguments->set_elements(*array);
+ if (should_deoptimize) {
+ translated_values.StoreMaterializedValuesAndDeopt();
+ }
+
// Return the freshly allocated arguments object.
return arguments;
}
@@ -1437,9 +1420,19 @@ static void ModuleGetExport(
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext());
- int slot = info.Data()->Int32Value();
- Object* value = context->get(slot);
Isolate* isolate = instance->GetIsolate();
+ int slot = info.Data()
+ ->Int32Value(info.GetIsolate()->GetCurrentContext())
+ .FromMaybe(-1);
+ if (slot < 0 || slot >= context->length()) {
+ Handle<String> name = v8::Utils::OpenHandle(*property);
+
+ Handle<Object> exception = isolate->factory()->NewReferenceError(
+ MessageTemplate::kNotDefined, name);
+ isolate->ScheduleThrow(*exception);
+ return;
+ }
+ Object* value = context->get(slot);
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
@@ -1459,9 +1452,18 @@ static void ModuleSetExport(
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext());
- int slot = info.Data()->Int32Value();
+ Isolate* isolate = instance->GetIsolate();
+ int slot = info.Data()
+ ->Int32Value(info.GetIsolate()->GetCurrentContext())
+ .FromMaybe(-1);
+ if (slot < 0 || slot >= context->length()) {
+ Handle<String> name = v8::Utils::OpenHandle(*property);
+ Handle<Object> exception = isolate->factory()->NewReferenceError(
+ MessageTemplate::kNotDefined, name);
+ isolate->ScheduleThrow(*exception);
+ return;
+ }
Object* old_value = context->get(slot);
- Isolate* isolate = context->GetIsolate();
if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
@@ -1493,4 +1495,5 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index d37b6b770c..227af745b7 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -98,11 +98,6 @@ class Accessors : public AllStatic {
static Handle<ExecutableAccessorInfo> CloneAccessor(
Isolate* isolate,
Handle<ExecutableAccessorInfo> accessor);
-
-
- private:
- // Helper functions.
- static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/allocation-site-scopes.cc b/deps/v8/src/allocation-site-scopes.cc
index 5b513f6fef..4c8cda52ed 100644
--- a/deps/v8/src/allocation-site-scopes.cc
+++ b/deps/v8/src/allocation-site-scopes.cc
@@ -76,4 +76,5 @@ bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
return false;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc
index 227d969c2e..f8617cfc1e 100644
--- a/deps/v8/src/allocation-tracker.cc
+++ b/deps/v8/src/allocation-tracker.cc
@@ -337,4 +337,5 @@ void AllocationTracker::UnresolvedLocation::HandleWeakScript(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 8a03a9cf91..851cd61ffc 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -108,4 +108,5 @@ void AlignedFree(void *ptr) {
#endif
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index c95f2ce255..5993859710 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/isolate.h"
#include "src/lookup.h"
+#include "src/messages.h"
namespace v8 {
namespace internal {
@@ -36,11 +37,12 @@ MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
}
-MaybeHandle<Object> DefineAccessorProperty(
- Isolate* isolate, Handle<JSObject> object, Handle<Name> name,
- Handle<Object> getter, Handle<Object> setter, Smi* attributes) {
- DCHECK(PropertyDetails::AttributesField::is_valid(
- static_cast<PropertyAttributes>(attributes->value())));
+MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
if (!getter->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, getter,
@@ -55,10 +57,8 @@ MaybeHandle<Object> DefineAccessorProperty(
Handle<FunctionTemplateInfo>::cast(setter)),
Object);
}
- RETURN_ON_EXCEPTION(isolate,
- JSObject::DefineAccessor(
- object, name, getter, setter,
- static_cast<PropertyAttributes>(attributes->value())),
+ RETURN_ON_EXCEPTION(isolate, JSObject::DefineAccessor(object, name, getter,
+ setter, attributes),
Object);
return object;
}
@@ -66,46 +66,29 @@ MaybeHandle<Object> DefineAccessorProperty(
MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
Handle<JSObject> object,
- Handle<Name> key,
+ Handle<Name> name,
Handle<Object> prop_data,
- Smi* unchecked_attributes) {
- DCHECK((unchecked_attributes->value() &
- ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- // Compute attributes.
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(unchecked_attributes->value());
-
+ PropertyAttributes attributes) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
- Instantiate(isolate, prop_data, key), Object);
+ Instantiate(isolate, prop_data, name), Object);
+
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
#ifdef DEBUG
- bool duplicate;
- if (key->IsName()) {
- LookupIterator it(object, Handle<Name>::cast(key),
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- DCHECK(maybe.IsJust());
- duplicate = it.IsFound();
- } else {
- uint32_t index = 0;
- key->ToArrayIndex(&index);
- Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index);
- if (!maybe.IsJust()) return MaybeHandle<Object>();
- duplicate = maybe.FromJust();
- }
- if (duplicate) {
- Handle<Object> args[1] = {key};
- THROW_NEW_ERROR(isolate, NewTypeError("duplicate_template_property",
- HandleVector(args, 1)),
- Object);
+ Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+ DCHECK(maybe.IsJust());
+ if (it.IsFound()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kDuplicateTemplateProperty, name),
+ Object);
}
#endif
- RETURN_ON_EXCEPTION(
- isolate, Runtime::DefineObjectProperty(object, key, value, attributes),
- Object);
- return object;
+ return Object::AddDataProperty(&it, value, attributes, STRICT,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED);
}
@@ -160,27 +143,28 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
HandleScope scope(isolate);
// Disable access checks while instantiating the object.
AccessCheckDisableScope access_check_scope(isolate, obj);
- for (int i = 0; i < properties.length();) {
- int length = Smi::cast(properties.get(i))->value();
- if (length == 3) {
- auto name = handle(Name::cast(properties.get(i + 1)), isolate);
- auto prop_data = handle(properties.get(i + 2), isolate);
- auto attributes = Smi::cast(properties.get(i + 3));
+
+ int i = 0;
+ for (int c = 0; c < data->number_of_properties(); c++) {
+ auto name = handle(Name::cast(properties.get(i++)), isolate);
+ PropertyDetails details(Smi::cast(properties.get(i++)));
+ PropertyAttributes attributes = details.attributes();
+ PropertyKind kind = details.kind();
+
+ if (kind == kData) {
+ auto prop_data = handle(properties.get(i++), isolate);
+
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
prop_data, attributes),
JSObject);
} else {
- DCHECK(length == 4);
- auto name = handle(Name::cast(properties.get(i + 1)), isolate);
- auto getter = handle(properties.get(i + 2), isolate);
- auto setter = handle(properties.get(i + 3), isolate);
- auto attributes = Smi::cast(properties.get(i + 4));
+ auto getter = handle(properties.get(i++), isolate);
+ auto setter = handle(properties.get(i++), isolate);
RETURN_ON_EXCEPTION(isolate,
DefineAccessorProperty(isolate, obj, name, getter,
setter, attributes),
JSObject);
}
- i += length + 1;
}
return obj;
}
@@ -321,8 +305,8 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
list = NeanderArray(isolate).value();
templ->set_property_list(*list);
}
+ templ->set_number_of_properties(templ->number_of_properties() + 1);
NeanderArray array(list);
- array.add(isolate, isolate->factory()->NewNumberFromInt(length));
for (int i = 0; i < length; i++) {
Handle<Object> value =
data[i].is_null()
@@ -371,10 +355,9 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
const int kSize = 3;
- DCHECK(Smi::IsValid(static_cast<int>(attributes)));
- auto attribute_handle =
- handle(Smi::FromInt(static_cast<int>(attributes)), isolate);
- Handle<Object> data[kSize] = {name, value, attribute_handle};
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ auto details_handle = handle(details.AsSmi(), isolate);
+ Handle<Object> data[kSize] = {name, details_handle, value};
AddPropertyToPropertyList(isolate, info, kSize, data);
}
@@ -386,10 +369,9 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
const int kSize = 4;
- DCHECK(Smi::IsValid(static_cast<int>(attributes)));
- auto attribute_handle =
- handle(Smi::FromInt(static_cast<int>(attributes)), isolate);
- Handle<Object> data[kSize] = {name, getter, setter, attribute_handle};
+ PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
+ auto details_handle = handle(details.AsSmi(), isolate);
+ Handle<Object> data[kSize] = {name, details_handle, getter, setter};
AddPropertyToPropertyList(isolate, info, kSize, data);
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 4f06873036..5ff8ccbae5 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -188,14 +188,16 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
i::Handle<i::Object> source_map_url(script->source_mapping_url(), isolate);
v8::Isolate* v8_isolate =
reinterpret_cast<v8::Isolate*>(script->GetIsolate());
+ ScriptOriginOptions options(script->origin_options());
v8::ScriptOrigin origin(
Utils::ToLocal(scriptName),
v8::Integer::New(v8_isolate, script->line_offset()->value()),
v8::Integer::New(v8_isolate, script->column_offset()->value()),
- v8::Boolean::New(v8_isolate, script->is_shared_cross_origin()),
+ v8::Boolean::New(v8_isolate, options.IsSharedCrossOrigin()),
v8::Integer::New(v8_isolate, script->id()->value()),
- v8::Boolean::New(v8_isolate, script->is_embedder_debug_script()),
- Utils::ToLocal(source_map_url));
+ v8::Boolean::New(v8_isolate, options.IsEmbedderDebugScript()),
+ Utils::ToLocal(source_map_url),
+ v8::Boolean::New(v8_isolate, options.IsOpaque()));
return origin;
}
@@ -211,6 +213,12 @@ void i::FatalProcessOutOfMemory(const char* location) {
// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
// The default fatal error handler is called and execution is stopped.
void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
+ i::Isolate* isolate = i::Isolate::Current();
+ char last_few_messages[Heap::kTraceRingBufferSize + 1];
+ char js_stacktrace[Heap::kStacktraceBufferSize + 1];
+ memset(last_few_messages, 0, Heap::kTraceRingBufferSize + 1);
+ memset(js_stacktrace, 0, Heap::kStacktraceBufferSize + 1);
+
i::HeapStats heap_stats;
int start_marker;
heap_stats.start_marker = &start_marker;
@@ -252,13 +260,19 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.size_per_type = size_per_type;
int os_error;
heap_stats.os_error = &os_error;
+ heap_stats.last_few_messages = last_few_messages;
+ heap_stats.js_stacktrace = js_stacktrace;
int end_marker;
heap_stats.end_marker = &end_marker;
- i::Isolate* isolate = i::Isolate::Current();
if (isolate->heap()->HasBeenSetUp()) {
// BUG(1718): Don't use the take_snapshot since we don't support
// HeapIterator here without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
+ char* first_newline = strchr(last_few_messages, '\n');
+ if (first_newline == NULL || first_newline[1] == '\0')
+ first_newline = last_few_messages;
+ PrintF("\n<--- Last few GCs --->\n%s\n", first_newline);
+ PrintF("\n<--- JS stacktrace --->\n%s\n", js_stacktrace);
}
Utils::ApiCheck(false, location, "Allocation failed - process out of memory");
// If the fatal error handler returns, we stop execution.
@@ -299,24 +313,32 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
}
-bool RunExtraCode(Isolate* isolate, const char* utf8_source) {
+bool RunExtraCode(Isolate* isolate, Local<Context> context,
+ const char* utf8_source) {
// Run custom script if provided.
base::ElapsedTimer timer;
timer.Start();
- TryCatch try_catch;
- Local<String> source_string = String::NewFromUtf8(isolate, utf8_source);
- if (try_catch.HasCaught()) return false;
- ScriptOrigin origin(String::NewFromUtf8(isolate, "<embedded script>"));
+ TryCatch try_catch(isolate);
+ Local<String> source_string;
+ if (!String::NewFromUtf8(isolate, utf8_source, NewStringType::kNormal)
+ .ToLocal(&source_string)) {
+ return false;
+ }
+ Local<String> resource_name =
+ String::NewFromUtf8(isolate, "<embedded script>", NewStringType::kNormal)
+ .ToLocalChecked();
+ ScriptOrigin origin(resource_name);
ScriptCompiler::Source source(source_string, origin);
- Local<Script> script = ScriptCompiler::Compile(isolate, &source);
- if (try_catch.HasCaught()) return false;
- script->Run();
+ Local<Script> script;
+ if (!ScriptCompiler::Compile(context, &source).ToLocal(&script)) return false;
+ if (script->Run(context).IsEmpty()) return false;
if (i::FLAG_profile_deserialization) {
i::PrintF("Executing custom snapshot script took %0.3f ms\n",
timer.Elapsed().InMillisecondsF());
}
timer.Stop();
- return !try_catch.HasCaught();
+ CHECK(!try_catch.HasCaught());
+ return true;
}
@@ -351,13 +373,13 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
i::Snapshot::Metadata metadata;
{
HandleScope handle_scope(isolate);
- Handle<Context> new_context = Context::New(isolate);
+ Local<Context> new_context = Context::New(isolate);
internal_isolate->set_creating_default_snapshot(false);
context.Reset(isolate, new_context);
if (custom_source != NULL) {
metadata.set_embeds_script(true);
Context::Scope context_scope(new_context);
- if (!RunExtraCode(isolate, custom_source)) context.Reset();
+ if (!RunExtraCode(isolate, new_context, custom_source)) context.Reset();
}
}
if (!context.IsEmpty()) {
@@ -787,7 +809,7 @@ v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
}
-void Context::SetEmbedderData(int index, v8::Handle<Value> value) {
+void Context::SetEmbedderData(int index, v8::Local<Value> value) {
const char* location = "v8::Context::SetEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
if (data.is_null()) return;
@@ -882,12 +904,12 @@ void NeanderArray::set(int index, i::Object* value) {
static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
+ that->set_number_of_properties(0);
that->set_tag(i::Smi::FromInt(type));
}
-void Template::Set(v8::Handle<Name> name,
- v8::Handle<Data> value,
+void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
v8::PropertyAttribute attribute) {
auto templ = Utils::OpenHandle(this);
i::Isolate* isolate = templ->GetIsolate();
@@ -924,7 +946,7 @@ void Template::SetAccessorProperty(
// --- F u n c t i o n T e m p l a t e ---
static void InitializeFunctionTemplate(
i::Handle<i::FunctionTemplateInfo> info) {
- info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
+ InitializeTemplate(info, Consts::FUNCTION_TEMPLATE);
info->set_flag(0);
}
@@ -950,7 +972,7 @@ static void EnsureNotInstantiated(i::Handle<i::FunctionTemplateInfo> info,
}
-void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
+void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::Inherit");
i::Isolate* isolate = info->GetIsolate();
@@ -960,12 +982,8 @@ void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
static Local<FunctionTemplate> FunctionTemplateNew(
- i::Isolate* isolate,
- FunctionCallback callback,
- v8::Handle<Value> data,
- v8::Handle<Signature> signature,
- int length,
- bool do_not_cache) {
+ i::Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
+ v8::Local<Signature> signature, int length, bool do_not_cache) {
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
i::Handle<i::FunctionTemplateInfo> obj =
@@ -993,12 +1011,11 @@ static Local<FunctionTemplate> FunctionTemplateNew(
return Utils::ToLocal(obj);
}
-Local<FunctionTemplate> FunctionTemplate::New(
- Isolate* isolate,
- FunctionCallback callback,
- v8::Handle<Value> data,
- v8::Handle<Signature> signature,
- int length) {
+Local<FunctionTemplate> FunctionTemplate::New(Isolate* isolate,
+ FunctionCallback callback,
+ v8::Local<Value> data,
+ v8::Local<Signature> signature,
+ int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Changes to the environment cannot be captured in the snapshot. Expect no
// function templates when the isolate is created for serialization.
@@ -1011,25 +1028,24 @@ Local<FunctionTemplate> FunctionTemplate::New(
Local<Signature> Signature::New(Isolate* isolate,
- Handle<FunctionTemplate> receiver) {
+ Local<FunctionTemplate> receiver) {
return Utils::SignatureToLocal(Utils::OpenHandle(*receiver));
}
Local<AccessorSignature> AccessorSignature::New(
- Isolate* isolate,
- Handle<FunctionTemplate> receiver) {
+ Isolate* isolate, Local<FunctionTemplate> receiver) {
return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
}
-Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
- Handle<FunctionTemplate> types[1] = { type };
+Local<TypeSwitch> TypeSwitch::New(Local<FunctionTemplate> type) {
+ Local<FunctionTemplate> types[1] = {type};
return TypeSwitch::New(1, types);
}
-Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
+Local<TypeSwitch> TypeSwitch::New(int argc, Local<FunctionTemplate> types[]) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "TypeSwitch::New");
ENTER_V8(isolate);
@@ -1045,7 +1061,7 @@ Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
}
-int TypeSwitch::match(v8::Handle<Value> value) {
+int TypeSwitch::match(v8::Local<Value> value) {
i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
LOG_API(info->GetIsolate(), "TypeSwitch::match");
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
@@ -1065,7 +1081,7 @@ int TypeSwitch::match(v8::Handle<Value> value) {
void FunctionTemplate::SetCallHandler(FunctionCallback callback,
- v8::Handle<Value> data) {
+ v8::Local<Value> data) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetCallHandler");
i::Isolate* isolate = info->GetIsolate();
@@ -1085,11 +1101,9 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
- i::Handle<i::AccessorInfo> obj,
- v8::Handle<Name> name,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
+ i::Handle<i::AccessorInfo> obj, v8::Local<Name> name,
+ v8::AccessControl settings, v8::PropertyAttribute attributes,
+ v8::Local<AccessorSignature> signature) {
obj->set_name(*Utils::OpenHandle(*name));
if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
@@ -1101,15 +1115,11 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
}
-template<typename Getter, typename Setter>
+template <typename Getter, typename Setter>
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<Name> name,
- Getter getter,
- Setter setter,
- v8::Handle<Value> data,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
+ v8::Local<Name> name, Getter getter, Setter setter, v8::Local<Value> data,
+ v8::AccessControl settings, v8::PropertyAttribute attributes,
+ v8::Local<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::ExecutableAccessorInfo> obj =
isolate->factory()->NewExecutableAccessorInfo();
@@ -1152,7 +1162,7 @@ void FunctionTemplate::SetLength(int length) {
}
-void FunctionTemplate::SetClassName(Handle<String> name) {
+void FunctionTemplate::SetClassName(Local<String> name) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetClassName");
auto isolate = info->GetIsolate();
@@ -1201,7 +1211,7 @@ void FunctionTemplate::RemovePrototype() {
Local<ObjectTemplate> ObjectTemplate::New(
- Isolate* isolate, v8::Handle<FunctionTemplate> constructor) {
+ Isolate* isolate, v8::Local<FunctionTemplate> constructor) {
return New(reinterpret_cast<i::Isolate*>(isolate), constructor);
}
@@ -1212,8 +1222,7 @@ Local<ObjectTemplate> ObjectTemplate::New() {
Local<ObjectTemplate> ObjectTemplate::New(
- i::Isolate* isolate,
- v8::Handle<FunctionTemplate> constructor) {
+ i::Isolate* isolate, v8::Local<FunctionTemplate> constructor) {
// Changes to the environment cannot be captured in the snapshot. Expect no
// object templates when the isolate is created for serialization.
DCHECK(!isolate->serializer_enabled());
@@ -1291,7 +1300,7 @@ static bool TemplateSetAccessor(
void Template::SetNativeDataProperty(v8::Local<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter,
- v8::Handle<Value> data,
+ v8::Local<Value> data,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
AccessControl settings) {
@@ -1303,7 +1312,7 @@ void Template::SetNativeDataProperty(v8::Local<String> name,
void Template::SetNativeDataProperty(v8::Local<Name> name,
AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter,
- v8::Handle<Value> data,
+ v8::Local<Value> data,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
AccessControl settings) {
@@ -1312,25 +1321,23 @@ void Template::SetNativeDataProperty(v8::Local<Name> name,
}
-void ObjectTemplate::SetAccessor(v8::Handle<String> name,
+void ObjectTemplate::SetAccessor(v8::Local<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter,
- v8::Handle<Value> data,
- AccessControl settings,
+ v8::Local<Value> data, AccessControl settings,
PropertyAttribute attribute,
- v8::Handle<AccessorSignature> signature) {
+ v8::Local<AccessorSignature> signature) {
TemplateSetAccessor(
this, name, getter, setter, data, settings, attribute, signature);
}
-void ObjectTemplate::SetAccessor(v8::Handle<Name> name,
+void ObjectTemplate::SetAccessor(v8::Local<Name> name,
AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter,
- v8::Handle<Value> data,
- AccessControl settings,
+ v8::Local<Value> data, AccessControl settings,
PropertyAttribute attribute,
- v8::Handle<AccessorSignature> signature) {
+ v8::Local<AccessorSignature> signature) {
TemplateSetAccessor(
this, name, getter, setter, data, settings, attribute, signature);
}
@@ -1342,7 +1349,7 @@ static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
Getter getter, Setter setter,
Query query, Deleter remover,
Enumerator enumerator,
- Handle<Value> data,
+ Local<Value> data,
PropertyHandlerFlags flags) {
i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
ENTER_V8(isolate);
@@ -1377,7 +1384,7 @@ static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyGetterCallback getter, NamedPropertySetterCallback setter,
NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
- NamedPropertyEnumeratorCallback enumerator, Handle<Value> data) {
+ NamedPropertyEnumeratorCallback enumerator, Local<Value> data) {
ObjectTemplateSetNamedPropertyHandler(
this, getter, setter, query, remover, enumerator, data,
PropertyHandlerFlags::kOnlyInterceptStrings);
@@ -1404,9 +1411,7 @@ void ObjectTemplate::MarkAsUndetectable() {
void ObjectTemplate::SetAccessCheckCallbacks(
NamedSecurityCallback named_callback,
- IndexedSecurityCallback indexed_callback,
- Handle<Value> data,
- bool turned_on_by_default) {
+ IndexedSecurityCallback indexed_callback, Local<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -1427,7 +1432,7 @@ void ObjectTemplate::SetAccessCheckCallbacks(
info->set_data(*Utils::OpenHandle(*data));
cons->set_access_check_info(*info);
- cons->set_needs_access_check(turned_on_by_default);
+ cons->set_needs_access_check(true);
}
@@ -1462,7 +1467,7 @@ void ObjectTemplate::SetHandler(
void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
- Handle<Value> data) {
+ Local<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -1525,6 +1530,12 @@ ScriptCompiler::CachedData::~CachedData() {
}
+bool ScriptCompiler::ExternalSourceStream::SetBookmark() { return false; }
+
+
+void ScriptCompiler::ExternalSourceStream::ResetToBookmark() { UNREACHABLE(); }
+
+
ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream,
Encoding encoding)
: impl_(new i::StreamedSource(stream, encoding)) {}
@@ -1562,7 +1573,8 @@ Local<Script> UnboundScript::BindToCurrentContext() {
pending_error_handler_.ReportMessageAt(
scope_info->StrongModeFreeVariableStartPosition(i),
scope_info->StrongModeFreeVariableEndPosition(i),
- "strong_unbound_global", name_string, i::kReferenceError);
+ i::MessageTemplate::kStrongUnboundGlobal, name_string,
+ i::kReferenceError);
i::Handle<i::Script> script(i::Script::cast(function_info->script()));
pending_error_handler_.ThrowPendingError(isolate, script);
isolate->ReportPendingMessages();
@@ -1605,7 +1617,7 @@ int UnboundScript::GetLineNumber(int code_pos) {
}
-Handle<Value> UnboundScript::GetScriptName() {
+Local<Value> UnboundScript::GetScriptName() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
@@ -1614,12 +1626,12 @@ Handle<Value> UnboundScript::GetScriptName() {
i::Object* name = i::Script::cast(obj->script())->name();
return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
} else {
- return Handle<String>();
+ return Local<String>();
}
}
-Handle<Value> UnboundScript::GetSourceURL() {
+Local<Value> UnboundScript::GetSourceURL() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
@@ -1628,12 +1640,12 @@ Handle<Value> UnboundScript::GetSourceURL() {
i::Object* url = i::Script::cast(obj->script())->source_url();
return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
} else {
- return Handle<String>();
+ return Local<String>();
}
}
-Handle<Value> UnboundScript::GetSourceMappingURL() {
+Local<Value> UnboundScript::GetSourceMappingURL() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
@@ -1642,7 +1654,7 @@ Handle<Value> UnboundScript::GetSourceMappingURL() {
i::Object* url = i::Script::cast(obj->script())->source_mapping_url();
return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
} else {
- return Handle<String>();
+ return Local<String>();
}
}
@@ -1686,15 +1698,6 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
PREPARE_FOR_EXECUTION_WITH_ISOLATE(
isolate, "v8::ScriptCompiler::CompileUnbound()", UnboundScript);
- // Support the old API for a transition period:
- // - kProduceToCache -> kProduceParserCache
- // - kNoCompileOptions + cached_data != NULL -> kConsumeParserCache
- if (options == kProduceDataToCache) {
- options = kProduceParserCache;
- } else if (options == kNoCompileOptions && source->cached_data) {
- options = kConsumeParserCache;
- }
-
// Don't try to produce any kind of cache when the debugger is loaded.
if (isolate->debug()->is_loaded() &&
(options == kProduceParserCache || options == kProduceCodeCache)) {
@@ -1717,8 +1720,6 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::Object> source_map_url;
int line_offset = 0;
int column_offset = 0;
- bool is_embedder_debug_script = false;
- bool is_shared_cross_origin = false;
if (!source->resource_name.IsEmpty()) {
name_obj = Utils::OpenHandle(*(source->resource_name));
}
@@ -1729,21 +1730,13 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
column_offset =
static_cast<int>(source->resource_column_offset->Value());
}
- if (!source->resource_is_shared_cross_origin.IsEmpty()) {
- is_shared_cross_origin =
- source->resource_is_shared_cross_origin->IsTrue();
- }
- if (!source->resource_is_embedder_debug_script.IsEmpty()) {
- is_embedder_debug_script =
- source->resource_is_embedder_debug_script->IsTrue();
- }
if (!source->source_map_url.IsEmpty()) {
source_map_url = Utils::OpenHandle(*(source->source_map_url));
}
result = i::Compiler::CompileScript(
- str, name_obj, line_offset, column_offset, is_embedder_debug_script,
- is_shared_cross_origin, source_map_url, isolate->native_context(), NULL,
- &script_data, options, i::NOT_NATIVES_CODE, is_module);
+ str, name_obj, line_offset, column_offset, source->resource_options,
+ source_map_url, isolate->native_context(), NULL, &script_data, options,
+ i::NOT_NATIVES_CODE, is_module);
has_pending_exception = result.is_null();
if (has_pending_exception && script_data != NULL) {
// This case won't happen during normal operation; we have compiled
@@ -1960,7 +1953,7 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
StreamedSource* v8_source,
- Handle<String> full_source_string,
+ Local<String> full_source_string,
const ScriptOrigin& origin) {
PREPARE_FOR_EXECUTION(context, "v8::ScriptCompiler::Compile()", Script);
i::StreamedSource* source = v8_source->impl();
@@ -1977,14 +1970,7 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
script->set_column_offset(i::Smi::FromInt(
static_cast<int>(origin.ResourceColumnOffset()->Value())));
}
- if (!origin.ResourceIsSharedCrossOrigin().IsEmpty()) {
- script->set_is_shared_cross_origin(
- origin.ResourceIsSharedCrossOrigin()->IsTrue());
- }
- if (!origin.ResourceIsEmbedderDebugScript().IsEmpty()) {
- script->set_is_embedder_debug_script(
- origin.ResourceIsEmbedderDebugScript()->IsTrue());
- }
+ script->set_origin_options(origin.Options());
if (!origin.SourceMapUrl().IsEmpty()) {
script->set_source_mapping_url(
*Utils::OpenHandle(*(origin.SourceMapUrl())));
@@ -2021,7 +2007,7 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
StreamedSource* v8_source,
- Handle<String> full_source_string,
+ Local<String> full_source_string,
const ScriptOrigin& origin) {
auto context = v8_isolate->GetCurrentContext();
RETURN_TO_LOCAL_UNCHECKED(
@@ -2036,8 +2022,7 @@ uint32_t ScriptCompiler::CachedDataVersionTag() {
}
-MaybeLocal<Script> Script::Compile(Local<Context> context,
- Handle<String> source,
+MaybeLocal<Script> Script::Compile(Local<Context> context, Local<String> source,
ScriptOrigin* origin) {
if (origin) {
ScriptCompiler::Source script_source(source, *origin);
@@ -2048,7 +2033,7 @@ MaybeLocal<Script> Script::Compile(Local<Context> context,
}
-Local<Script> Script::Compile(v8::Handle<String> source,
+Local<Script> Script::Compile(v8::Local<String> source,
v8::ScriptOrigin* origin) {
auto str = Utils::OpenHandle(*source);
auto context = ContextFromHeapObject(str);
@@ -2056,10 +2041,12 @@ Local<Script> Script::Compile(v8::Handle<String> source,
}
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::Handle<String> file_name) {
+Local<Script> Script::Compile(v8::Local<String> source,
+ v8::Local<String> file_name) {
+ auto str = Utils::OpenHandle(*source);
+ auto context = ContextFromHeapObject(str);
ScriptOrigin origin(file_name);
- return Compile(source, &origin);
+ return Compile(context, source, &origin).FromMaybe(Local<Script>());
}
@@ -2145,7 +2132,7 @@ bool v8::TryCatch::HasTerminated() const {
}
-v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
+v8::Local<v8::Value> v8::TryCatch::ReThrow() {
if (!HasCaught()) return v8::Local<v8::Value>();
rethrow_ = true;
return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate_));
@@ -2251,18 +2238,18 @@ ScriptOrigin Message::GetScriptOrigin() const {
}
-v8::Handle<Value> Message::GetScriptResourceName() const {
+v8::Local<Value> Message::GetScriptResourceName() const {
return GetScriptOrigin().ResourceName();
}
-v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
+v8::Local<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
auto message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
- if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
+ if (!stackFramesObj->IsJSArray()) return v8::Local<v8::StackTrace>();
auto stackTrace = i::Handle<i::JSArray>::cast(stackFramesObj);
return scope.Escape(Utils::StackTraceToLocal(stackTrace));
}
@@ -2363,7 +2350,18 @@ bool Message::IsSharedCrossOrigin() const {
auto self = Utils::OpenHandle(this);
auto script = i::Handle<i::JSValue>::cast(
i::Handle<i::Object>(self->script(), isolate));
- return i::Script::cast(script->value())->is_shared_cross_origin();
+ return i::Script::cast(script->value())
+ ->origin_options()
+ .IsSharedCrossOrigin();
+}
+
+bool Message::IsOpaque() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
+ auto self = Utils::OpenHandle(this);
+ auto script = i::Handle<i::JSValue>::cast(
+ i::Handle<i::Object>(self->script(), isolate));
+ return i::Script::cast(script->value())->origin_options().IsOpaque();
}
@@ -2520,7 +2518,7 @@ Local<NativeWeakMap> NativeWeakMap::New(Isolate* v8_isolate) {
}
-void NativeWeakMap::Set(Handle<Value> v8_key, Handle<Value> v8_value) {
+void NativeWeakMap::Set(Local<Value> v8_key, Local<Value> v8_value) {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
ENTER_V8(isolate);
@@ -2537,11 +2535,12 @@ void NativeWeakMap::Set(Handle<Value> v8_key, Handle<Value> v8_value) {
DCHECK(false);
return;
}
- i::Runtime::WeakCollectionSet(weak_collection, key, value);
+ int32_t hash = i::Object::GetOrCreateHash(isolate, key)->value();
+ i::Runtime::WeakCollectionSet(weak_collection, key, value, hash);
}
-Local<Value> NativeWeakMap::Get(Handle<Value> v8_key) {
+Local<Value> NativeWeakMap::Get(Local<Value> v8_key) {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
ENTER_V8(isolate);
@@ -2563,7 +2562,7 @@ Local<Value> NativeWeakMap::Get(Handle<Value> v8_key) {
}
-bool NativeWeakMap::Has(Handle<Value> v8_key) {
+bool NativeWeakMap::Has(Local<Value> v8_key) {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
ENTER_V8(isolate);
@@ -2584,7 +2583,7 @@ bool NativeWeakMap::Has(Handle<Value> v8_key) {
}
-bool NativeWeakMap::Delete(Handle<Value> v8_key) {
+bool NativeWeakMap::Delete(Local<Value> v8_key) {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
ENTER_V8(isolate);
@@ -2682,7 +2681,8 @@ bool Value::IsArray() const {
bool Value::IsArrayBuffer() const {
- return Utils::OpenHandle(this)->IsJSArrayBuffer();
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->IsJSArrayBuffer() && !i::JSArrayBuffer::cast(*obj)->is_shared();
}
@@ -2703,6 +2703,7 @@ bool Value::IsTypedArray() const {
i::JSTypedArray::cast(*obj)->type() == i::kExternal##Type##Array; \
}
+
TYPED_ARRAYS(VALUE_IS_TYPED_ARRAY)
#undef VALUE_IS_TYPED_ARRAY
@@ -2713,6 +2714,12 @@ bool Value::IsDataView() const {
}
+bool Value::IsSharedArrayBuffer() const {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->IsJSArrayBuffer() && i::JSArrayBuffer::cast(*obj)->is_shared();
+}
+
+
bool Value::IsObject() const {
return Utils::OpenHandle(this)->IsJSObject();
}
@@ -3073,6 +3080,20 @@ void v8::Array::CheckCast(Value* that) {
}
+void v8::Map::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsJSMap(), "v8::Map::Cast()",
+ "Could not convert to Map");
+}
+
+
+void v8::Set::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsJSSet(), "v8::Set::Cast()",
+ "Could not convert to Set");
+}
+
+
void v8::Promise::CheckCast(Value* that) {
Utils::ApiCheck(that->IsPromise(),
"v8::Promise::Cast()",
@@ -3089,9 +3110,9 @@ void v8::Promise::Resolver::CheckCast(Value* that) {
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSArrayBuffer(),
- "v8::ArrayBuffer::Cast()",
- "Could not convert to ArrayBuffer");
+ Utils::ApiCheck(
+ obj->IsJSArrayBuffer() && !i::JSArrayBuffer::cast(*obj)->is_shared(),
+ "v8::ArrayBuffer::Cast()", "Could not convert to ArrayBuffer");
}
@@ -3134,6 +3155,15 @@ void v8::DataView::CheckCast(Value* that) {
}
+void v8::SharedArrayBuffer::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(
+ obj->IsJSArrayBuffer() && i::JSArrayBuffer::cast(*obj)->is_shared(),
+ "v8::SharedArrayBuffer::Cast()",
+ "Could not convert to SharedArrayBuffer");
+}
+
+
void v8::Date::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
i::Isolate* isolate = NULL;
@@ -3330,7 +3360,7 @@ Local<Uint32> Value::ToArrayIndex() const {
}
-Maybe<bool> Value::Equals(Local<Context> context, Handle<Value> that) const {
+Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
if (self->IsSmi() && other->IsSmi()) {
@@ -3350,7 +3380,7 @@ Maybe<bool> Value::Equals(Local<Context> context, Handle<Value> that) const {
}
-bool Value::Equals(Handle<Value> that) const {
+bool Value::Equals(Local<Value> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
if (self->IsSmi() && other->IsSmi()) {
@@ -3365,7 +3395,7 @@ bool Value::Equals(Handle<Value> that) const {
}
-bool Value::StrictEquals(Handle<Value> that) const {
+bool Value::StrictEquals(Local<Value> that) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
if (obj->IsSmi()) {
@@ -3396,7 +3426,7 @@ bool Value::StrictEquals(Handle<Value> that) const {
}
-bool Value::SameValue(Handle<Value> that) const {
+bool Value::SameValue(Local<Value> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
return self->SameValue(*other);
@@ -3417,7 +3447,7 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
}
-bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value) {
+bool v8::Object::Set(v8::Local<Value> key, v8::Local<Value> value) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return Set(context, key, value).FromMaybe(false);
}
@@ -3428,89 +3458,137 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
- has_pending_exception = i::JSObject::SetElement(
- self, index, value_obj, NONE, i::SLOPPY).is_null();
+ has_pending_exception =
+ i::JSReceiver::SetElement(self, index, value_obj, i::SLOPPY).is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
-bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
+bool v8::Object::Set(uint32_t index, v8::Local<Value> value) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return Set(context, index, value).FromMaybe(false);
}
-Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
- v8::Local<Value> key, v8::Local<Value> value,
- v8::PropertyAttribute attribs) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
- auto self = Utils::OpenHandle(this);
- auto key_obj = Utils::OpenHandle(*key);
- auto value_obj = Utils::OpenHandle(*value);
- has_pending_exception = i::Runtime::DefineObjectProperty(
- self,
- key_obj,
- value_obj,
- static_cast<PropertyAttributes>(attribs)).is_null();
+Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
+ v8::Local<Name> key,
+ v8::Local<Value> value) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
+ bool);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+
+ i::LookupIterator it = i::LookupIterator::PropertyOrElement(
+ isolate, self, key_obj, i::LookupIterator::OWN);
+ Maybe<bool> result = i::JSObject::CreateDataProperty(&it, value_obj);
+ has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(true);
+ return result;
}
-bool v8::Object::ForceSet(v8::Handle<Value> key, v8::Handle<Value> value,
- v8::PropertyAttribute attribs) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- return ForceSet(context, key, value, attribs).FromMaybe(false);
+Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
+ uint32_t index,
+ v8::Local<Value> value) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
+ bool);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+
+ i::LookupIterator it(isolate, self, index, i::LookupIterator::OWN);
+ Maybe<bool> result = i::JSObject::CreateDataProperty(&it, value_obj);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
}
-bool v8::Object::SetPrivate(v8::Handle<Private> key, v8::Handle<Value> value) {
- return ForceSet(v8::Handle<Value>(reinterpret_cast<Value*>(*key)),
- value, DontEnum);
-}
+Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
+ v8::Local<Name> key,
+ v8::Local<Value> value,
+ v8::PropertyAttribute attributes) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DefineOwnProperty()",
+ bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ auto value_obj = Utils::OpenHandle(*value);
+ if (self->IsAccessCheckNeeded() && !isolate->MayAccess(self)) {
+ isolate->ReportFailedAccessCheck(self);
+ return Nothing<bool>();
+ }
+
+ i::Handle<i::FixedArray> desc = isolate->factory()->NewFixedArray(3);
+ desc->set(0, isolate->heap()->ToBoolean(!(attributes & v8::ReadOnly)));
+ desc->set(1, isolate->heap()->ToBoolean(!(attributes & v8::DontEnum)));
+ desc->set(2, isolate->heap()->ToBoolean(!(attributes & v8::DontDelete)));
+ i::Handle<i::JSArray> desc_array =
+ isolate->factory()->NewJSArrayWithElements(desc, i::FAST_ELEMENTS, 3);
+ i::Handle<i::Object> args[] = {self, key_obj, value_obj, desc_array};
+ i::Handle<i::Object> result;
+ has_pending_exception =
+ !CallV8HeapFunction(isolate, "$objectDefineOwnProperty",
+ isolate->factory()->undefined_value(),
+ arraysize(args), args).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(result->BooleanValue());
+}
-namespace {
-i::MaybeHandle<i::Object> DeleteObjectProperty(
- i::Isolate* isolate, i::Handle<i::JSReceiver> receiver,
- i::Handle<i::Object> key, i::LanguageMode language_mode) {
+MUST_USE_RESULT
+static i::MaybeHandle<i::Object> DefineObjectProperty(
+ i::Handle<i::JSObject> js_object, i::Handle<i::Object> key,
+ i::Handle<i::Object> value, PropertyAttributes attrs) {
+ i::Isolate* isolate = js_object->GetIsolate();
// Check if the given key is an array index.
- uint32_t index;
+ uint32_t index = 0;
if (key->ToArrayIndex(&index)) {
- // In Firefox/SpiderMonkey, Safari and Opera you can access the
- // characters of a string using [] notation. In the case of a
- // String object we just need to redirect the deletion to the
- // underlying string if the index is in range. Since the
- // underlying string does nothing with the deletion, we can ignore
- // such deletions.
- if (receiver->IsStringObjectWithCharacterAt(index)) {
- return isolate->factory()->true_value();
- }
-
- return i::JSReceiver::DeleteElement(receiver, index, language_mode);
+ return i::JSObject::SetOwnElementIgnoreAttributes(js_object, index, value,
+ attrs);
}
i::Handle<i::Name> name;
- if (key->IsName()) {
- name = i::Handle<i::Name>::cast(key);
- } else {
- // Call-back into JavaScript to convert the key to a string.
- i::Handle<i::Object> converted;
- if (!i::Execution::ToString(isolate, key).ToHandle(&converted)) {
- return i::MaybeHandle<i::Object>();
- }
- name = i::Handle<i::String>::cast(converted);
- }
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, name,
+ i::Runtime::ToName(isolate, key),
+ i::MaybeHandle<i::Object>());
- if (name->IsString()) {
- name = i::String::Flatten(i::Handle<i::String>::cast(name));
- }
- return i::JSReceiver::DeleteProperty(receiver, name, language_mode);
+ return i::JSObject::DefinePropertyOrElementIgnoreAttributes(js_object, name,
+ value, attrs);
}
-} // namespace
+
+Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
+ v8::Local<Value> key, v8::Local<Value> value,
+ v8::PropertyAttribute attribs) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ auto value_obj = Utils::OpenHandle(*value);
+ has_pending_exception =
+ DefineObjectProperty(self, key_obj, value_obj,
+ static_cast<PropertyAttributes>(attribs)).is_null();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(true);
+}
+
+
+bool v8::Object::ForceSet(v8::Local<Value> key, v8::Local<Value> value,
+ v8::PropertyAttribute attribs) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(),
+ "v8::Object::ForceSet", false, i::HandleScope,
+ false);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+ has_pending_exception =
+ DefineObjectProperty(self, key_obj, value_obj,
+ static_cast<PropertyAttributes>(attribs)).is_null();
+ EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, false);
+ return true;
+}
MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
@@ -3526,7 +3604,7 @@ MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
}
-Local<Value> v8::Object::Get(v8::Handle<Value> key) {
+Local<Value> v8::Object::Get(v8::Local<Value> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Get(context, key), Value);
}
@@ -3549,11 +3627,6 @@ Local<Value> v8::Object::Get(uint32_t index) {
}
-Local<Value> v8::Object::GetPrivate(v8::Handle<Private> key) {
- return Get(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
-}
-
-
Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(
@@ -3576,7 +3649,7 @@ Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
}
-PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
+PropertyAttribute v8::Object::GetPropertyAttributes(v8::Local<Value> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return GetPropertyAttributes(context, key)
.FromMaybe(static_cast<PropertyAttribute>(NONE));
@@ -3629,14 +3702,14 @@ Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
}
-bool v8::Object::SetPrototype(Handle<Value> value) {
+bool v8::Object::SetPrototype(Local<Value> value) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return SetPrototype(context, value).FromMaybe(false);
}
Local<Object> v8::Object::FindInstanceInPrototypeChain(
- v8::Handle<FunctionTemplate> tmpl) {
+ v8::Local<FunctionTemplate> tmpl) {
auto isolate = Utils::OpenHandle(this)->GetIsolate();
i::PrototypeIterator iter(isolate, *Utils::OpenHandle(this),
i::PrototypeIterator::START_AT_RECEIVER);
@@ -3709,11 +3782,13 @@ MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
// return "[object " + c + "]";
if (!name->IsString()) {
- return v8::String::NewFromUtf8(v8_isolate, "[object ]");
+ return v8::String::NewFromUtf8(v8_isolate, "[object ]",
+ NewStringType::kNormal);
}
auto class_name = i::Handle<i::String>::cast(name);
if (i::String::Equals(class_name, isolate->factory()->Arguments_string())) {
- return v8::String::NewFromUtf8(v8_isolate, "[object Object]");
+ return v8::String::NewFromUtf8(v8_isolate, "[object Object]",
+ NewStringType::kNormal);
}
if (internal::FLAG_harmony_tostring) {
PREPARE_FOR_EXECUTION(context, "v8::Object::ObjectProtoToString()", String);
@@ -3749,8 +3824,8 @@ MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
i::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
// Copy the buffer into a heap-allocated string and return it.
- return v8::String::NewFromUtf8(v8_isolate, buf.start(), String::kNormalString,
- buf_len);
+ return v8::String::NewFromUtf8(v8_isolate, buf.start(),
+ NewStringType::kNormal, buf_len);
}
@@ -3773,30 +3848,26 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
auto key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> obj;
has_pending_exception =
- !DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY).ToHandle(&obj);
+ !i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY)
+ .ToHandle(&obj);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(obj->IsTrue());
}
-bool v8::Object::Delete(v8::Handle<Value> key) {
+bool v8::Object::Delete(v8::Local<Value> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return Delete(context, key).FromMaybe(false);
}
-bool v8::Object::DeletePrivate(v8::Handle<Private> key) {
- return Delete(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
-}
-
-
Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Get()", bool);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
Maybe<bool> maybe = Nothing<bool>();
// Check if the given key is an array index.
- uint32_t index;
+ uint32_t index = 0;
if (key_obj->ToArrayIndex(&index)) {
maybe = i::JSReceiver::HasElement(self, index);
} else {
@@ -3812,19 +3883,12 @@ Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
}
-bool v8::Object::Has(v8::Handle<Value> key) {
+bool v8::Object::Has(v8::Local<Value> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return Has(context, key).FromMaybe(false);
}
-bool v8::Object::HasPrivate(v8::Handle<Private> key) {
- // TODO(rossberg): this should use HasOwnProperty, but we'd need to
- // generalise that to a (noy yet existant) Name argument first.
- return Has(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
-}
-
-
Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DeleteProperty()",
bool);
@@ -3866,7 +3930,7 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* obj,
AccessControl settings,
PropertyAttribute attributes) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetAccessor()", bool);
- v8::Handle<AccessorSignature> signature;
+ v8::Local<AccessorSignature> signature;
auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
signature);
if (info.is_null()) return Nothing<bool>();
@@ -3893,23 +3957,18 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
}
-bool Object::SetAccessor(Handle<String> name,
- AccessorGetterCallback getter,
- AccessorSetterCallback setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attributes) {
+bool Object::SetAccessor(Local<String> name, AccessorGetterCallback getter,
+ AccessorSetterCallback setter, v8::Local<Value> data,
+ AccessControl settings, PropertyAttribute attributes) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
attributes).FromMaybe(false);
}
-bool Object::SetAccessor(Handle<Name> name,
- AccessorNameGetterCallback getter,
+bool Object::SetAccessor(Local<Name> name, AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter,
- v8::Handle<Value> data,
- AccessControl settings,
+ v8::Local<Value> data, AccessControl settings,
PropertyAttribute attributes) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
@@ -3917,9 +3976,8 @@ bool Object::SetAccessor(Handle<Name> name,
}
-void Object::SetAccessorProperty(Local<Name> name,
- Local<Function> getter,
- Handle<Function> setter,
+void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
+ Local<Function> setter,
PropertyAttribute attribute,
AccessControl settings) {
// TODO(verwaest): Remove |settings|.
@@ -3951,7 +4009,7 @@ Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
}
-bool v8::Object::HasOwnProperty(Handle<String> key) {
+bool v8::Object::HasOwnProperty(Local<String> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return HasOwnProperty(context, key).FromMaybe(false);
}
@@ -3970,7 +4028,7 @@ Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
}
-bool v8::Object::HasRealNamedProperty(Handle<String> key) {
+bool v8::Object::HasRealNamedProperty(Local<String> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return HasRealNamedProperty(context, key).FromMaybe(false);
}
@@ -4007,7 +4065,7 @@ Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
}
-bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
+bool v8::Object::HasRealNamedCallbackProperty(Local<String> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return HasRealNamedCallbackProperty(context, key).FromMaybe(false);
}
@@ -4034,8 +4092,9 @@ MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
i::PrototypeIterator iter(isolate, self);
if (iter.IsAtEnd()) return MaybeLocal<Value>();
auto proto = i::PrototypeIterator::GetCurrent(iter);
- i::LookupIterator it(self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
- i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ i::LookupIterator it = i::LookupIterator::PropertyOrElement(
+ isolate, self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+ i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(i::Object::GetProperty(&it), &result);
RETURN_ON_FAILED_EXECUTION(Value);
@@ -4045,7 +4104,7 @@ MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
- Handle<String> key) {
+ Local<String> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(GetRealNamedPropertyInPrototypeChain(context, key),
Value);
@@ -4063,8 +4122,9 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
i::PrototypeIterator iter(isolate, self);
if (iter.IsAtEnd()) return Nothing<PropertyAttribute>();
auto proto = i::PrototypeIterator::GetCurrent(iter);
- i::LookupIterator it(self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
- i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ i::LookupIterator it = i::LookupIterator::PropertyOrElement(
+ isolate, self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+ i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
auto result = i::JSReceiver::GetPropertyAttributes(&it);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
if (!it.IsFound()) return Nothing<PropertyAttribute>();
@@ -4077,7 +4137,7 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
Maybe<PropertyAttribute>
-v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(Handle<String> key) {
+v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(Local<String> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return GetRealNamedPropertyAttributesInPrototypeChain(context, key);
}
@@ -4088,8 +4148,9 @@ MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
PREPARE_FOR_EXECUTION(context, "v8::Object::GetRealNamedProperty()", Value);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
- i::LookupIterator it(self, key_obj,
- i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ i::LookupIterator it = i::LookupIterator::PropertyOrElement(
+ isolate, self, key_obj,
+ i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(i::Object::GetProperty(&it), &result);
RETURN_ON_FAILED_EXECUTION(Value);
@@ -4098,7 +4159,7 @@ MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
}
-Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
+Local<Value> v8::Object::GetRealNamedProperty(Local<String> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(GetRealNamedProperty(context, key), Value);
}
@@ -4111,8 +4172,9 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
PropertyAttribute);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
- i::LookupIterator it(self, key_obj,
- i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ i::LookupIterator it = i::LookupIterator::PropertyOrElement(
+ isolate, self, key_obj,
+ i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
auto result = i::JSReceiver::GetPropertyAttributes(&it);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
if (!it.IsFound()) return Nothing<PropertyAttribute>();
@@ -4125,32 +4187,12 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
- Handle<String> key) {
+ Local<String> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return GetRealNamedPropertyAttributes(context, key);
}
-// Turns on access checks by copying the map and setting the check flag.
-// Because the object gets a new map, existing inline cache caching
-// the old map of this object will fail.
-void v8::Object::TurnOnAccessCheck() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
-
- // When turning on access checks for a global object deoptimize all functions
- // as optimized code does not always handle access checks.
- i::Deoptimizer::DeoptimizeGlobalObject(*obj);
-
- i::Handle<i::Map> new_map =
- i::Map::Copy(i::Handle<i::Map>(obj->map()), "APITurnOnAccessCheck");
- new_map->set_is_access_check_needed(true);
- i::JSObject::MigrateToMap(obj, new_map);
-}
-
-
Local<v8::Object> v8::Object::Clone() {
auto self = Utils::OpenHandle(this);
auto isolate = self->GetIsolate();
@@ -4176,8 +4218,8 @@ int v8::Object::GetIdentityHash() {
}
-bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
- v8::Handle<v8::Value> value) {
+bool v8::Object::SetHiddenValue(v8::Local<v8::String> key,
+ v8::Local<v8::Value> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (value.IsEmpty()) return DeleteHiddenValue(key);
ENTER_V8(isolate);
@@ -4193,7 +4235,7 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
}
-v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
+v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Local<v8::String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -4206,7 +4248,7 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
}
-bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
+bool v8::Object::DeleteHiddenValue(v8::Local<v8::String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -4226,14 +4268,14 @@ bool v8::Object::IsCallable() {
MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
- Handle<Value> recv, int argc,
- Handle<Value> argv[]) {
+ Local<Value> recv, int argc,
+ Local<Value> argv[]) {
PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Object::CallAsFunction()",
Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
auto recv_obj = Utils::OpenHandle(*recv);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
i::Handle<i::JSFunction> fun;
if (self->IsJSFunction()) {
@@ -4256,8 +4298,8 @@ MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
}
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Value> recv, int argc,
- v8::Handle<v8::Value> argv[]) {
+Local<v8::Value> Object::CallAsFunction(v8::Local<v8::Value> recv, int argc,
+ v8::Local<v8::Value> argv[]) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
Local<Value>* argv_cast = reinterpret_cast<Local<Value>*>(argv);
RETURN_TO_LOCAL_UNCHECKED(CallAsFunction(context, recv, argc, argv_cast),
@@ -4271,7 +4313,7 @@ MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
"v8::Object::CallAsConstructor()", Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
if (self->IsJSFunction()) {
auto fun = i::Handle<i::JSFunction>::cast(self);
@@ -4300,38 +4342,44 @@ MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
Local<v8::Value> Object::CallAsConstructor(int argc,
- v8::Handle<v8::Value> argv[]) {
+ v8::Local<v8::Value> argv[]) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
Local<Value>* argv_cast = reinterpret_cast<Local<Value>*>(argv);
RETURN_TO_LOCAL_UNCHECKED(CallAsConstructor(context, argc, argv_cast), Value);
}
-Local<Function> Function::New(Isolate* v8_isolate,
- FunctionCallback callback,
- Local<Value> data,
- int length) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+MaybeLocal<Function> Function::New(Local<Context> context,
+ FunctionCallback callback, Local<Value> data,
+ int length) {
+ i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
LOG_API(isolate, "Function::New");
ENTER_V8(isolate);
- return FunctionTemplateNew(
- isolate, callback, data, Local<Signature>(), length, true)->
- GetFunction();
+ return FunctionTemplateNew(isolate, callback, data, Local<Signature>(),
+ length, true)->GetFunction(context);
+}
+
+
+Local<Function> Function::New(Isolate* v8_isolate, FunctionCallback callback,
+ Local<Value> data, int length) {
+ return Function::New(v8_isolate->GetCurrentContext(), callback, data, length)
+ .FromMaybe(Local<Function>());
}
Local<v8::Object> Function::NewInstance() const {
- return NewInstance(0, NULL);
+ return NewInstance(Isolate::GetCurrent()->GetCurrentContext(), 0, NULL)
+ .FromMaybe(Local<Object>());
}
MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
- v8::Handle<v8::Value> argv[]) const {
+ v8::Local<v8::Value> argv[]) const {
PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::NewInstance()",
Object);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
Local<Object> result;
has_pending_exception =
@@ -4342,20 +4390,20 @@ MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
Local<v8::Object> Function::NewInstance(int argc,
- v8::Handle<v8::Value> argv[]) const {
+ v8::Local<v8::Value> argv[]) const {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(NewInstance(context, argc, argv), Object);
}
MaybeLocal<v8::Value> Function::Call(Local<Context> context,
- v8::Handle<v8::Value> recv, int argc,
- v8::Handle<v8::Value> argv[]) {
+ v8::Local<v8::Value> recv, int argc,
+ v8::Local<v8::Value> argv[]) {
PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::Call()", Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
- STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+ STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
Local<Value> result;
has_pending_exception =
@@ -4367,41 +4415,41 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
}
-Local<v8::Value> Function::Call(v8::Handle<v8::Value> recv, int argc,
- v8::Handle<v8::Value> argv[]) {
+Local<v8::Value> Function::Call(v8::Local<v8::Value> recv, int argc,
+ v8::Local<v8::Value> argv[]) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Call(context, recv, argc, argv), Value);
}
-void Function::SetName(v8::Handle<v8::String> name) {
+void Function::SetName(v8::Local<v8::String> name) {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
func->shared()->set_name(*Utils::OpenHandle(*name));
}
-Handle<Value> Function::GetName() const {
+Local<Value> Function::GetName() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name(),
func->GetIsolate()));
}
-Handle<Value> Function::GetInferredName() const {
+Local<Value> Function::GetInferredName() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(),
func->GetIsolate()));
}
-Handle<Value> Function::GetDisplayName() const {
+Local<Value> Function::GetDisplayName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
i::Handle<i::String> property_name =
isolate->factory()->NewStringFromStaticChars("displayName");
i::Handle<i::Object> value =
- i::JSObject::GetDataProperty(func, property_name);
+ i::JSReceiver::GetDataProperty(func, property_name);
if (value->IsString()) {
i::Handle<i::String> name = i::Handle<i::String>::cast(value);
if (name->length() > 0) return Utils::ToLocal(name);
@@ -4416,7 +4464,7 @@ ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return GetScriptOriginForScript(func->GetIsolate(), script);
}
- return v8::ScriptOrigin(Handle<Value>());
+ return v8::ScriptOrigin(Local<Value>());
}
@@ -5179,11 +5227,6 @@ Local<Value> Symbol::Name() const {
}
-Local<Value> Private::Name() const {
- return reinterpret_cast<const Symbol*>(this)->Name();
-}
-
-
double Number::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
@@ -5250,13 +5293,12 @@ Local<Value> v8::Object::SlowGetInternalField(int index) {
}
-void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
+void v8::Object::SetInternalField(int index, v8::Local<Value> value) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
i::Handle<i::Object> val = Utils::OpenHandle(*value);
obj->SetInternalField(index, *val);
- DCHECK(value->Equals(GetInternalField(index)));
}
@@ -5350,6 +5392,12 @@ HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
physical_space_size_(0) { }
+HeapObjectStatistics::HeapObjectStatistics()
+ : object_type_(nullptr),
+ object_sub_type_(nullptr),
+ object_count_(0),
+ object_size_(0) {}
+
bool v8::V8::InitializeICU(const char* icu_data_file) {
return i::InitializeICU(icu_data_file);
}
@@ -5361,16 +5409,15 @@ const char* v8::V8::GetVersion() {
static i::Handle<i::Context> CreateEnvironment(
- i::Isolate* isolate,
- v8::ExtensionConfiguration* extensions,
- v8::Handle<ObjectTemplate> global_template,
- v8::Handle<Value> maybe_global_proxy) {
+ i::Isolate* isolate, v8::ExtensionConfiguration* extensions,
+ v8::Local<ObjectTemplate> global_template,
+ v8::Local<Value> maybe_global_proxy) {
i::Handle<i::Context> env;
// Enter V8 via an ENTER_V8 scope.
{
ENTER_V8(isolate);
- v8::Handle<ObjectTemplate> proxy_template = global_template;
+ v8::Local<ObjectTemplate> proxy_template = global_template;
i::Handle<i::FunctionTemplateInfo> proxy_constructor;
i::Handle<i::FunctionTemplateInfo> global_constructor;
@@ -5426,11 +5473,10 @@ static i::Handle<i::Context> CreateEnvironment(
return env;
}
-Local<Context> v8::Context::New(
- v8::Isolate* external_isolate,
- v8::ExtensionConfiguration* extensions,
- v8::Handle<ObjectTemplate> global_template,
- v8::Handle<Value> global_object) {
+Local<Context> v8::Context::New(v8::Isolate* external_isolate,
+ v8::ExtensionConfiguration* extensions,
+ v8::Local<ObjectTemplate> global_template,
+ v8::Local<Value> global_object) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
LOG_API(isolate, "Context::New");
i::HandleScope scope(isolate);
@@ -5438,12 +5484,17 @@ Local<Context> v8::Context::New(
if (extensions == NULL) extensions = &no_extensions;
i::Handle<i::Context> env =
CreateEnvironment(isolate, extensions, global_template, global_object);
- if (env.is_null()) return Local<Context>();
+ if (env.is_null()) {
+ if (isolate->has_pending_exception()) {
+ isolate->OptionalRescheduleException(true);
+ }
+ return Local<Context>();
+ }
return Utils::ToLocal(scope.CloseAndEscape(env));
}
-void v8::Context::SetSecurityToken(Handle<Value> token) {
+void v8::Context::SetSecurityToken(Local<Value> token) {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
env->set_security_token(*token_handle);
@@ -5456,7 +5507,7 @@ void v8::Context::UseDefaultSecurityToken() {
}
-Handle<Value> v8::Context::GetSecurityToken() {
+Local<Value> v8::Context::GetSecurityToken() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
i::Object* security_token = env->security_token();
@@ -5493,6 +5544,14 @@ void Context::DetachGlobal() {
}
+Local<v8::Object> Context::GetExtrasExportsObject() {
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
+ i::Handle<i::JSObject> exports(context->extras_exports_object(), isolate);
+ return Utils::ToLocal(exports);
+}
+
+
void Context::AllowCodeGenerationFromStrings(bool allow) {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -5508,8 +5567,7 @@ bool Context::IsCodeGenerationFromStringsAllowed() {
}
-void Context::SetErrorMessageForCodeGenerationFromStrings(
- Handle<String> error) {
+void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Handle<i::String> error_handle = Utils::OpenHandle(*error);
context->set_error_message_for_code_gen_from_strings(*error_handle);
@@ -5551,7 +5609,7 @@ Local<v8::Function> FunctionTemplate::GetFunction() {
}
-bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
+bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
auto self = Utils::OpenHandle(this);
auto obj = Utils::OpenHandle(*value);
return self->IsTemplateFor(*obj);
@@ -5707,7 +5765,7 @@ MaybeLocal<String> String::NewFromTwoByte(Isolate* isolate,
}
-Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
+Local<String> v8::String::Concat(Local<String> left, Local<String> right) {
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
i::Isolate* isolate = left_string->GetIsolate();
ENTER_V8(isolate);
@@ -5896,7 +5954,7 @@ bool v8::BooleanObject::ValueOf() const {
}
-Local<v8::Value> v8::StringObject::New(Handle<String> value) {
+Local<v8::Value> v8::StringObject::New(Local<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
i::Isolate* isolate = string->GetIsolate();
LOG_API(isolate, "StringObject::New");
@@ -5917,7 +5975,7 @@ Local<v8::String> v8::StringObject::ValueOf() const {
}
-Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) {
+Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, "SymbolObject::New");
ENTER_V8(i_isolate);
@@ -6000,7 +6058,7 @@ static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
- Handle<String> pattern, Flags flags) {
+ Local<String> pattern, Flags flags) {
PREPARE_FOR_EXECUTION(context, "RegExp::New", RegExp);
Local<v8::RegExp> result;
has_pending_exception =
@@ -6012,7 +6070,7 @@ MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
}
-Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern, Flags flags) {
+Local<v8::RegExp> v8::RegExp::New(Local<String> pattern, Flags flags) {
auto isolate =
reinterpret_cast<Isolate*>(Utils::OpenHandle(*pattern)->GetIsolate());
auto context = isolate->GetCurrentContext();
@@ -6076,8 +6134,9 @@ MaybeLocal<Object> Array::CloneElementAt(Local<Context> context,
if (!paragon->IsJSObject()) return Local<Object>();
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
Local<Object> result;
- has_pending_exception = ToLocal<Object>(
- isolate->factory()->CopyJSObject(paragon_handle), &result);
+ has_pending_exception =
+ !ToLocal<Object>(isolate->factory()->CopyJSObject(paragon_handle),
+ &result);
RETURN_ON_FAILED_EXECUTION(Object);
RETURN_ESCAPED(result);
}
@@ -6089,18 +6148,222 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
}
+Local<v8::Map> v8::Map::New(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, "Map::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::JSMap> obj = i_isolate->factory()->NewJSMap();
+ return Utils::ToLocal(obj);
+}
+
+
+size_t v8::Map::Size() const {
+ i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
+ return i::OrderedHashMap::cast(obj->table())->NumberOfElements();
+}
+
+
+void Map::Clear() {
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ LOG_API(isolate, "Map::Clear");
+ ENTER_V8(isolate);
+ i::Runtime::JSMapClear(isolate, self);
+}
+
+
+MaybeLocal<Value> Map::Get(Local<Context> context, Local<Value> key) {
+ PREPARE_FOR_EXECUTION(context, "Map::Get", Value);
+ auto self = Utils::OpenHandle(this);
+ Local<Value> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
+ has_pending_exception =
+ !ToLocal<Value>(i::Execution::Call(isolate, isolate->map_get(), self,
+ arraysize(argv), argv, false),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+
+MaybeLocal<Map> Map::Set(Local<Context> context, Local<Value> key,
+ Local<Value> value) {
+ PREPARE_FOR_EXECUTION(context, "Map::Set", Map);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
+ Utils::OpenHandle(*value)};
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->map_set(), self, arraysize(argv),
+ argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(Map);
+ RETURN_ESCAPED(Local<Map>::Cast(Utils::ToLocal(result)));
+}
+
+
+Maybe<bool> Map::Has(Local<Context> context, Local<Value> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Map::Has", bool);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->map_has(), self, arraysize(argv),
+ argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(result->IsTrue());
+}
+
+
+Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Map::Delete", bool);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->map_delete(), self, arraysize(argv),
+ argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(result->IsTrue());
+}
+
+
+Local<Array> Map::AsArray() const {
+ i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
+ i::Isolate* isolate = obj->GetIsolate();
+ i::Factory* factory = isolate->factory();
+ LOG_API(isolate, "Map::AsArray");
+ ENTER_V8(isolate);
+ i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(obj->table()));
+ int size = table->NumberOfElements();
+ int length = size * 2;
+ i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
+ for (int i = 0; i < size; ++i) {
+ if (table->KeyAt(i)->IsTheHole()) continue;
+ result->set(i * 2, table->KeyAt(i));
+ result->set(i * 2 + 1, table->ValueAt(i));
+ }
+ i::Handle<i::JSArray> result_array =
+ factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
+ return Utils::ToLocal(result_array);
+}
+
+
+MaybeLocal<Map> Map::FromArray(Local<Context> context, Local<Array> array) {
+ PREPARE_FOR_EXECUTION(context, "Map::FromArray", Map);
+ if (array->Length() % 2 != 0) {
+ return MaybeLocal<Map>();
+ }
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*array)};
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->map_from_array(),
+ isolate->factory()->undefined_value(),
+ arraysize(argv), argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(Map);
+ RETURN_ESCAPED(Local<Map>::Cast(Utils::ToLocal(result)));
+}
+
+
+Local<v8::Set> v8::Set::New(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, "Set::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::JSSet> obj = i_isolate->factory()->NewJSSet();
+ return Utils::ToLocal(obj);
+}
+
+
+size_t v8::Set::Size() const {
+ i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
+ return i::OrderedHashSet::cast(obj->table())->NumberOfElements();
+}
+
+
+void Set::Clear() {
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ LOG_API(isolate, "Set::Clear");
+ ENTER_V8(isolate);
+ i::Runtime::JSSetClear(isolate, self);
+}
+
+
+MaybeLocal<Set> Set::Add(Local<Context> context, Local<Value> key) {
+ PREPARE_FOR_EXECUTION(context, "Set::Add", Set);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->set_add(), self, arraysize(argv),
+ argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(Set);
+ RETURN_ESCAPED(Local<Set>::Cast(Utils::ToLocal(result)));
+}
+
+
+Maybe<bool> Set::Has(Local<Context> context, Local<Value> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Set::Has", bool);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->set_has(), self, arraysize(argv),
+ argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(result->IsTrue());
+}
+
+
+Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Set::Delete", bool);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->set_delete(), self, arraysize(argv),
+ argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(result->IsTrue());
+}
+
+
+Local<Array> Set::AsArray() const {
+ i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
+ i::Isolate* isolate = obj->GetIsolate();
+ i::Factory* factory = isolate->factory();
+ LOG_API(isolate, "Set::AsArray");
+ ENTER_V8(isolate);
+ i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(obj->table()));
+ int length = table->NumberOfElements();
+ i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
+ for (int i = 0; i < length; ++i) {
+ i::Object* key = table->KeyAt(i);
+ if (!key->IsTheHole()) {
+ result->set(i, key);
+ }
+ }
+ i::Handle<i::JSArray> result_array =
+ factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
+ return Utils::ToLocal(result_array);
+}
+
+
+MaybeLocal<Set> Set::FromArray(Local<Context> context, Local<Array> array) {
+ PREPARE_FOR_EXECUTION(context, "Set::FromArray", Set);
+ i::Handle<i::Object> result;
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*array)};
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->set_from_array(),
+ isolate->factory()->undefined_value(),
+ arraysize(argv), argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(Set);
+ RETURN_ESCAPED(Local<Set>::Cast(Utils::ToLocal(result)));
+}
+
+
bool Value::IsPromise() const {
auto self = Utils::OpenHandle(this);
- if (!self->IsJSObject()) return false;
- auto js_object = i::Handle<i::JSObject>::cast(self);
- // Promises can't have access checks.
- if (js_object->map()->is_access_check_needed()) return false;
- auto isolate = js_object->GetIsolate();
- // TODO(dcarney): this should just be read from the symbol registry so as not
- // to be context dependent.
- auto key = isolate->promise_status();
- // Shouldn't be possible to throw here.
- return i::JSObject::HasRealNamedProperty(js_object, key).FromJust();
+ return i::Object::IsPromise(self);
}
@@ -6131,7 +6394,7 @@ Local<Promise> Promise::Resolver::GetPromise() {
Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
- Handle<Value> value) {
+ Local<Value> value) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
@@ -6146,14 +6409,14 @@ Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
}
-void Promise::Resolver::Resolve(Handle<Value> value) {
+void Promise::Resolver::Resolve(Local<Value> value) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
USE(Resolve(context, value));
}
Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
- Handle<Value> value) {
+ Local<Value> value) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
@@ -6168,14 +6431,14 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
}
-void Promise::Resolver::Reject(Handle<Value> value) {
+void Promise::Resolver::Reject(Local<Value> value) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
USE(Reject(context, value));
}
MaybeLocal<Promise> Promise::Chain(Local<Context> context,
- Handle<Function> handler) {
+ Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, "Promise::Chain", Promise);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*handler)};
@@ -6188,14 +6451,14 @@ MaybeLocal<Promise> Promise::Chain(Local<Context> context,
}
-Local<Promise> Promise::Chain(Handle<Function> handler) {
+Local<Promise> Promise::Chain(Local<Function> handler) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Chain(context, handler), Promise);
}
MaybeLocal<Promise> Promise::Catch(Local<Context> context,
- Handle<Function> handler) {
+ Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, "Promise::Catch", Promise);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
@@ -6208,14 +6471,14 @@ MaybeLocal<Promise> Promise::Catch(Local<Context> context,
}
-Local<Promise> Promise::Catch(Handle<Function> handler) {
+Local<Promise> Promise::Catch(Local<Function> handler) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Catch(context, handler), Promise);
}
MaybeLocal<Promise> Promise::Then(Local<Context> context,
- Handle<Function> handler) {
+ Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, "Promise::Then", Promise);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
@@ -6228,7 +6491,7 @@ MaybeLocal<Promise> Promise::Then(Local<Context> context,
}
-Local<Promise> Promise::Then(Handle<Function> handler) {
+Local<Promise> Promise::Then(Local<Function> handler) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Then(context, handler), Promise);
}
@@ -6240,7 +6503,7 @@ bool Promise::HasHandler() {
LOG_API(isolate, "Promise::HasRejectHandler");
ENTER_V8(isolate);
i::Handle<i::Symbol> key = isolate->factory()->promise_has_handler_symbol();
- return i::JSObject::GetDataProperty(promise, key)->IsTrue();
+ return i::JSReceiver::GetDataProperty(promise, key)->IsTrue();
}
@@ -6302,7 +6565,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
LOG_API(i_isolate, "v8::ArrayBuffer::New(size_t)");
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
- i_isolate->factory()->NewJSArrayBuffer();
+ i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
i::Runtime::SetupArrayBufferAllocatingData(i_isolate, obj, byte_length);
return Utils::ToLocal(obj);
}
@@ -6315,7 +6578,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
- i_isolate->factory()->NewJSArrayBuffer();
+ i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
i::Runtime::SetupArrayBuffer(i_isolate, obj,
mode == ArrayBufferCreationMode::kExternalized,
data, byte_length);
@@ -6382,38 +6645,74 @@ size_t v8::ArrayBufferView::ByteLength() {
size_t v8::TypedArray::Length() {
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
- return static_cast<size_t>(obj->length()->Number());
+ return static_cast<size_t>(obj->length_value());
}
-#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size) \
- Local<Type##Array> Type##Array::New(Handle<ArrayBuffer> array_buffer, \
- size_t byte_offset, size_t length) { \
- i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \
- LOG_API(isolate, \
- "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)"); \
- ENTER_V8(isolate); \
- if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue), \
- "v8::" #Type \
- "Array::New(Handle<ArrayBuffer>, size_t, size_t)", \
- "length exceeds max allowed value")) { \
- return Local<Type##Array>(); \
- } \
- i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer); \
- i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray( \
- i::kExternal##Type##Array, buffer, byte_offset, length); \
- return Utils::ToLocal##Type##Array(obj); \
+#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size) \
+ Local<Type##Array> Type##Array::New(Local<ArrayBuffer> array_buffer, \
+ size_t byte_offset, size_t length) { \
+ i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \
+ LOG_API(isolate, \
+ "v8::" #Type "Array::New(Local<ArrayBuffer>, size_t, size_t)"); \
+ ENTER_V8(isolate); \
+ if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue), \
+ "v8::" #Type \
+ "Array::New(Local<ArrayBuffer>, size_t, size_t)", \
+ "length exceeds max allowed value")) { \
+ return Local<Type##Array>(); \
+ } \
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer); \
+ i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray( \
+ i::kExternal##Type##Array, buffer, byte_offset, length); \
+ return Utils::ToLocal##Type##Array(obj); \
+ } \
+ Local<Type##Array> Type##Array::New( \
+ Local<SharedArrayBuffer> shared_array_buffer, size_t byte_offset, \
+ size_t length) { \
+ CHECK(i::FLAG_harmony_sharedarraybuffer); \
+ i::Isolate* isolate = \
+ Utils::OpenHandle(*shared_array_buffer)->GetIsolate(); \
+ LOG_API(isolate, "v8::" #Type \
+ "Array::New(Local<SharedArrayBuffer>, size_t, size_t)"); \
+ ENTER_V8(isolate); \
+ if (!Utils::ApiCheck( \
+ length <= static_cast<size_t>(i::Smi::kMaxValue), \
+ "v8::" #Type \
+ "Array::New(Local<SharedArrayBuffer>, size_t, size_t)", \
+ "length exceeds max allowed value")) { \
+ return Local<Type##Array>(); \
+ } \
+ i::Handle<i::JSArrayBuffer> buffer = \
+ Utils::OpenHandle(*shared_array_buffer); \
+ i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray( \
+ i::kExternal##Type##Array, buffer, byte_offset, length); \
+ return Utils::ToLocal##Type##Array(obj); \
}
TYPED_ARRAYS(TYPED_ARRAY_NEW)
#undef TYPED_ARRAY_NEW
-Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
+Local<DataView> DataView::New(Local<ArrayBuffer> array_buffer,
size_t byte_offset, size_t byte_length) {
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
i::Isolate* isolate = buffer->GetIsolate();
- LOG_API(isolate, "v8::DataView::New(void*, size_t, size_t)");
+ LOG_API(isolate, "v8::DataView::New(Local<ArrayBuffer>, size_t, size_t)");
+ ENTER_V8(isolate);
+ i::Handle<i::JSDataView> obj =
+ isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
+ return Utils::ToLocal(obj);
+}
+
+
+Local<DataView> DataView::New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t byte_length) {
+ CHECK(i::FLAG_harmony_sharedarraybuffer);
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*shared_array_buffer);
+ i::Isolate* isolate = buffer->GetIsolate();
+ LOG_API(isolate,
+ "v8::DataView::New(Local<SharedArrayBuffer>, size_t, size_t)");
ENTER_V8(isolate);
i::Handle<i::JSDataView> obj =
isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
@@ -6421,6 +6720,69 @@ Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
}
+bool v8::SharedArrayBuffer::IsExternal() const {
+ return Utils::OpenHandle(this)->is_external();
+}
+
+
+v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
+ i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ Utils::ApiCheck(!self->is_external(), "v8::SharedArrayBuffer::Externalize",
+ "SharedArrayBuffer already externalized");
+ self->set_is_external(true);
+ isolate->heap()->UnregisterArrayBuffer(isolate->heap()->InNewSpace(*self),
+ self->backing_store());
+ return GetContents();
+}
+
+
+v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
+ i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
+ size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
+ Contents contents;
+ contents.data_ = self->backing_store();
+ contents.byte_length_ = byte_length;
+ return contents;
+}
+
+
+size_t v8::SharedArrayBuffer::ByteLength() const {
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->byte_length()->Number());
+}
+
+
+Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
+ size_t byte_length) {
+ CHECK(i::FLAG_harmony_sharedarraybuffer);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, "v8::SharedArrayBuffer::New(size_t)");
+ ENTER_V8(i_isolate);
+ i::Handle<i::JSArrayBuffer> obj =
+ i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
+ i::Runtime::SetupArrayBufferAllocatingData(i_isolate, obj, byte_length, true,
+ i::SharedFlag::kShared);
+ return Utils::ToLocalShared(obj);
+}
+
+
+Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
+ Isolate* isolate, void* data, size_t byte_length,
+ ArrayBufferCreationMode mode) {
+ CHECK(i::FLAG_harmony_sharedarraybuffer);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, "v8::SharedArrayBuffer::New(void*, size_t)");
+ ENTER_V8(i_isolate);
+ i::Handle<i::JSArrayBuffer> obj =
+ i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
+ i::Runtime::SetupArrayBuffer(i_isolate, obj,
+ mode == ArrayBufferCreationMode::kExternalized,
+ data, byte_length, i::SharedFlag::kShared);
+ return Utils::ToLocalShared(obj);
+}
+
+
Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, "Symbol::New()");
@@ -6484,38 +6846,6 @@ Local<Symbol> v8::Symbol::GetToStringTag(Isolate* isolate) {
}
-Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Private::New()");
- ENTER_V8(i_isolate);
- i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
- if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
- Local<Symbol> result = Utils::ToLocal(symbol);
- return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
-}
-
-
-Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::String> i_name = Utils::OpenHandle(*name);
- i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
- i::Handle<i::String> part = i_isolate->factory()->private_api_string();
- i::Handle<i::JSObject> privates =
- i::Handle<i::JSObject>::cast(
- i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
- i::Handle<i::Object> symbol =
- i::Object::GetPropertyOrElement(privates, i_name).ToHandleChecked();
- if (!symbol->IsSymbol()) {
- DCHECK(symbol->IsUndefined());
- symbol = i_isolate->factory()->NewPrivateSymbol();
- i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
- i::JSObject::SetProperty(privates, i_name, symbol, i::STRICT).Assert();
- }
- Local<Symbol> result = Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
- return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
-}
-
-
Local<Number> v8::Number::New(Isolate* isolate, double value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (std::isnan(value)) {
@@ -6906,9 +7236,8 @@ size_t Isolate::NumberOfHeapSpaces() {
bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
size_t index) {
- if (!space_statistics)
- return false;
- if (index > i::LAST_SPACE || index < i::FIRST_SPACE)
+ if (!space_statistics) return false;
+ if (!i::Heap::IsValidAllocationSpace(static_cast<i::AllocationSpace>(index)))
return false;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -6924,6 +7253,38 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
}
+size_t Isolate::NumberOfTrackedHeapObjectTypes() {
+ return i::Heap::OBJECT_STATS_COUNT;
+}
+
+
+bool Isolate::GetHeapObjectStatisticsAtLastGC(
+ HeapObjectStatistics* object_statistics, size_t type_index) {
+ if (!object_statistics) return false;
+ if (type_index >= i::Heap::OBJECT_STATS_COUNT) return false;
+ if (!i::FLAG_track_gc_object_stats) return false;
+
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Heap* heap = isolate->heap();
+ const char* object_type;
+ const char* object_sub_type;
+ size_t object_count = heap->object_count_last_gc(type_index);
+ size_t object_size = heap->object_size_last_gc(type_index);
+ if (!heap->GetObjectTypeName(type_index, &object_type, &object_sub_type)) {
+ // There should be no objects counted when the type is unknown.
+ DCHECK_EQ(object_count, 0U);
+ DCHECK_EQ(object_size, 0U);
+ return false;
+ }
+
+ object_statistics->object_type_ = object_type;
+ object_statistics->object_sub_type_ = object_sub_type;
+ object_statistics->object_count_ = object_count;
+ object_statistics->object_size_ = object_size;
+ return true;
+}
+
+
void Isolate::GetStackSample(const RegisterState& state, void** frames,
size_t frames_limit, SampleInfo* sample_info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -6965,7 +7326,7 @@ void Isolate::RunMicrotasks() {
}
-void Isolate::EnqueueMicrotask(Handle<Function> microtask) {
+void Isolate::EnqueueMicrotask(Local<Function> microtask) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->EnqueueMicrotask(Utils::OpenHandle(*microtask));
}
@@ -7103,7 +7464,7 @@ bool Isolate::IsDead() {
}
-bool Isolate::AddMessageListener(MessageCallback that, Handle<Value> data) {
+bool Isolate::AddMessageListener(MessageCallback that, Local<Value> data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -7191,15 +7552,17 @@ void Isolate::VisitHandlesForPartialDependence(
}
-String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
+String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
: str_(NULL), length_(0) {
- i::Isolate* isolate = i::Isolate::Current();
if (obj.IsEmpty()) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString(reinterpret_cast<v8::Isolate*>(isolate));
- if (str.IsEmpty()) return;
+ Local<Context> context = v8_isolate->GetCurrentContext();
+ TryCatch try_catch(v8_isolate);
+ Local<String> str;
+ if (!obj->ToString(context).ToLocal(&str)) return;
i::Handle<i::String> i_str = Utils::OpenHandle(*str);
length_ = v8::Utf8Length(*i_str, isolate);
str_ = i::NewArray<char>(length_ + 1);
@@ -7212,15 +7575,16 @@ String::Utf8Value::~Utf8Value() {
}
-String::Value::Value(v8::Handle<v8::Value> obj)
- : str_(NULL), length_(0) {
- i::Isolate* isolate = i::Isolate::Current();
+String::Value::Value(v8::Local<v8::Value> obj) : str_(NULL), length_(0) {
if (obj.IsEmpty()) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- TryCatch try_catch;
- Handle<String> str = obj->ToString(reinterpret_cast<v8::Isolate*>(isolate));
- if (str.IsEmpty()) return;
+ Local<Context> context = v8_isolate->GetCurrentContext();
+ TryCatch try_catch(v8_isolate);
+ Local<String> str;
+ if (!obj->ToString(context).ToLocal(&str)) return;
length_ = str->Length();
str_ = i::NewArray<uint16_t>(length_ + 1);
str->Write(str_);
@@ -7232,19 +7596,19 @@ String::Value::~Value() {
}
-#define DEFINE_ERROR(NAME) \
- Local<Value> Exception::NAME(v8::Handle<v8::String> raw_message) { \
- i::Isolate* isolate = i::Isolate::Current(); \
- LOG_API(isolate, #NAME); \
- ENTER_V8(isolate); \
- i::Object* error; \
- { \
- i::HandleScope scope(isolate); \
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
- error = *isolate->factory()->New##NAME(message); \
- } \
- i::Handle<i::Object> result(error, isolate); \
- return Utils::ToLocal(result); \
+#define DEFINE_ERROR(NAME) \
+ Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
+ i::Isolate* isolate = i::Isolate::Current(); \
+ LOG_API(isolate, #NAME); \
+ ENTER_V8(isolate); \
+ i::Object* error; \
+ { \
+ i::HandleScope scope(isolate); \
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
+ error = *isolate->factory()->NewError("$" #NAME, message); \
+ } \
+ i::Handle<i::Object> result(error, isolate); \
+ return Utils::ToLocal(result); \
}
DEFINE_ERROR(RangeError)
@@ -7256,7 +7620,7 @@ DEFINE_ERROR(Error)
#undef DEFINE_ERROR
-Local<Message> Exception::CreateMessage(Handle<Value> exception) {
+Local<Message> Exception::CreateMessage(Local<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
if (!obj->IsHeapObject()) return Local<Message>();
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
@@ -7267,7 +7631,7 @@ Local<Message> Exception::CreateMessage(Handle<Value> exception) {
}
-Local<StackTrace> Exception::GetStackTrace(Handle<Value> exception) {
+Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
if (!obj->IsJSObject()) return Local<StackTrace>();
i::Handle<i::JSObject> js_obj = i::Handle<i::JSObject>::cast(obj);
@@ -7279,7 +7643,7 @@ Local<StackTrace> Exception::GetStackTrace(Handle<Value> exception) {
// --- D e b u g S u p p o r t ---
-bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
+bool Debug::SetDebugEventListener(EventCallback that, Local<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -7328,8 +7692,8 @@ void Debug::SendCommand(Isolate* isolate,
MaybeLocal<Value> Debug::Call(Local<Context> context,
- v8::Handle<v8::Function> fun,
- v8::Handle<v8::Value> data) {
+ v8::Local<v8::Function> fun,
+ v8::Local<v8::Value> data) {
PREPARE_FOR_EXECUTION(context, "v8::Debug::Call()", Value);
i::Handle<i::Object> data_obj;
if (data.IsEmpty()) {
@@ -7346,15 +7710,15 @@ MaybeLocal<Value> Debug::Call(Local<Context> context,
}
-Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
- v8::Handle<v8::Value> data) {
+Local<Value> Debug::Call(v8::Local<v8::Function> fun,
+ v8::Local<v8::Value> data) {
auto context = ContextFromHeapObject(Utils::OpenHandle(*fun));
RETURN_TO_LOCAL_UNCHECKED(Call(context, fun, data), Value);
}
MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
- v8::Handle<v8::Value> obj) {
+ v8::Local<v8::Value> obj) {
PREPARE_FOR_EXECUTION(context, "v8::Debug::GetMirror()", Value);
i::Debug* isolate_debug = isolate->debug();
has_pending_exception = !isolate_debug->Load();
@@ -7364,7 +7728,7 @@ MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
auto fun_obj = i::Object::GetProperty(debug, name).ToHandleChecked();
auto v8_fun = Utils::ToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
const int kArgc = 1;
- v8::Handle<v8::Value> argv[kArgc] = {obj};
+ v8::Local<v8::Value> argv[kArgc] = {obj};
Local<Value> result;
has_pending_exception = !v8_fun->Call(context, Utils::ToLocal(debug), kArgc,
argv).ToLocal(&result);
@@ -7373,7 +7737,7 @@ MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
}
-Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
+Local<Value> Debug::GetMirror(v8::Local<v8::Value> obj) {
RETURN_TO_LOCAL_UNCHECKED(GetMirror(Local<Context>(), obj), Value);
}
@@ -7386,7 +7750,7 @@ void Debug::ProcessDebugMessages() {
Local<Context> Debug::GetDebugContext() {
i::Isolate* isolate = i::Isolate::Current();
ENTER_V8(isolate);
- return Utils::ToLocal(i::Isolate::Current()->debug()->GetDebugContext());
+ return Utils::ToLocal(isolate->debug()->GetDebugContext());
}
@@ -7396,7 +7760,19 @@ void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
}
-Handle<String> CpuProfileNode::GetFunctionName() const {
+MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
+ Local<Value> value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ i::Handle<i::Object> val = Utils::OpenHandle(*value);
+ i::Handle<i::JSArray> result;
+ if (!i::Runtime::GetInternalProperties(isolate, val).ToHandle(&result))
+ return MaybeLocal<Array>();
+ return Utils::ToLocal(result);
+}
+
+
+Local<String> CpuProfileNode::GetFunctionName() const {
i::Isolate* isolate = i::Isolate::Current();
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
@@ -7421,7 +7797,7 @@ int CpuProfileNode::GetScriptId() const {
}
-Handle<String> CpuProfileNode::GetScriptResourceName() const {
+Local<String> CpuProfileNode::GetScriptResourceName() const {
i::Isolate* isolate = i::Isolate::Current();
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
@@ -7500,7 +7876,7 @@ void CpuProfile::Delete() {
}
-Handle<String> CpuProfile::GetTitle() const {
+Local<String> CpuProfile::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
@@ -7551,13 +7927,13 @@ void CpuProfiler::SetSamplingInterval(int us) {
}
-void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) {
+void CpuProfiler::StartProfiling(Local<String> title, bool record_samples) {
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), record_samples);
}
-CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
+CpuProfile* CpuProfiler::StopProfiling(Local<String> title) {
return reinterpret_cast<CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
*Utils::OpenHandle(*title)));
@@ -7588,7 +7964,7 @@ HeapGraphEdge::Type HeapGraphEdge::GetType() const {
}
-Handle<Value> HeapGraphEdge::GetName() const {
+Local<Value> HeapGraphEdge::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
i::HeapGraphEdge* edge = ToInternal(this);
switch (edge->type()) {
@@ -7632,7 +8008,7 @@ HeapGraphNode::Type HeapGraphNode::GetType() const {
}
-Handle<String> HeapGraphNode::GetName() const {
+Local<String> HeapGraphNode::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(ToInternal(this)->name()));
@@ -7733,13 +8109,13 @@ const HeapSnapshot* HeapProfiler::GetHeapSnapshot(int index) {
}
-SnapshotObjectId HeapProfiler::GetObjectId(Handle<Value> value) {
+SnapshotObjectId HeapProfiler::GetObjectId(Local<Value> value) {
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(obj);
}
-Handle<Value> HeapProfiler::FindObjectById(SnapshotObjectId id) {
+Local<Value> HeapProfiler::FindObjectById(SnapshotObjectId id) {
i::Handle<i::Object> obj =
reinterpret_cast<i::HeapProfiler*>(this)->FindHeapObjectById(id);
if (obj.is_null()) return Local<Value>();
@@ -8040,4 +8416,5 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 38bb920036..b20ef5cf66 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -95,6 +95,7 @@ void NeanderObject::set(int offset, v8::internal::Object* value) {
template <typename T> inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+ if (obj == v8::internal::Smi::FromInt(0)) return nullptr;
return reinterpret_cast<T>(
reinterpret_cast<intptr_t>(
v8::internal::Foreign::cast(obj)->foreign_address()));
@@ -105,6 +106,7 @@ template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData(
v8::internal::Isolate* isolate, T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+ if (obj == nullptr) return handle(v8::internal::Smi::FromInt(0), isolate);
return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
@@ -146,6 +148,8 @@ class RegisteredExtension {
V(RegExp, JSRegExp) \
V(Object, JSObject) \
V(Array, JSArray) \
+ V(Map, JSMap) \
+ V(Set, JSSet) \
V(ArrayBuffer, JSArrayBuffer) \
V(ArrayBufferView, JSArrayBufferView) \
V(TypedArray, JSTypedArray) \
@@ -159,6 +163,7 @@ class RegisteredExtension {
V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
V(DataView, JSDataView) \
+ V(SharedArrayBuffer, JSArrayBuffer) \
V(Name, Name) \
V(String, String) \
V(Symbol, Symbol) \
@@ -202,6 +207,10 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
+ static inline Local<Map> ToLocal(
+ v8::internal::Handle<v8::internal::JSMap> obj);
+ static inline Local<Set> ToLocal(
+ v8::internal::Handle<v8::internal::JSSet> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<ArrayBufferView> ToLocal(
@@ -230,6 +239,9 @@ class Utils {
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<SharedArrayBuffer> ToLocalShared(
+ v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
+
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal(
@@ -356,10 +368,13 @@ MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
+MAKE_TO_LOCAL(ToLocal, JSMap, Map)
+MAKE_TO_LOCAL(ToLocal, JSSet, Set)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
+MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
index d31c479fc1..e7e51fed1f 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/arguments.cc
@@ -11,18 +11,18 @@ namespace v8 {
namespace internal {
-template<typename T>
-template<typename V>
-v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
+template <typename T>
+template <typename V>
+v8::Local<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
Object** handle = &this->begin()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
- if ((*handle)->IsTheHole()) return v8::Handle<V>();
+ if ((*handle)->IsTheHole()) return v8::Local<V>();
return Utils::Convert<Object, V>(Handle<Object>(handle));
}
-v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
+v8::Local<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@@ -35,40 +35,39 @@ v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
}
-#define WRITE_CALL_0(Function, ReturnValue) \
-v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(begin()); \
- f(info); \
- return GetReturnValue<ReturnValue>(isolate); \
-}
-
-
-#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
-v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
- Arg1 arg1) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(begin()); \
- f(arg1, info); \
- return GetReturnValue<ReturnValue>(isolate); \
-}
-
-
-#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
-v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
- Arg1 arg1, \
- Arg2 arg2) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(begin()); \
- f(arg1, arg2, info); \
- return GetReturnValue<ReturnValue>(isolate); \
-}
+#define WRITE_CALL_0(Function, ReturnValue) \
+ v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
+ f(info); \
+ return GetReturnValue<ReturnValue>(isolate); \
+ }
+
+
+#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
+ v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f, \
+ Arg1 arg1) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
+ f(arg1, info); \
+ return GetReturnValue<ReturnValue>(isolate); \
+ }
+
+
+#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
+ v8::Local<ReturnValue> PropertyCallbackArguments::Call( \
+ Function f, Arg1 arg1, Arg2 arg2) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
+ f(arg1, arg2, info); \
+ return GetReturnValue<ReturnValue>(isolate); \
+ }
#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
@@ -102,4 +101,5 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 049a34f4ff..c94014505a 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -128,8 +128,8 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
protected:
explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
- template<typename V>
- v8::Handle<V> GetReturnValue(Isolate* isolate);
+ template <typename V>
+ v8::Local<V> GetReturnValue(Isolate* isolate);
inline Isolate* isolate() {
return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
@@ -177,14 +177,14 @@ class PropertyCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
-#define WRITE_CALL_0(Function, ReturnValue) \
- v8::Handle<ReturnValue> Call(Function f); \
+#define WRITE_CALL_0(Function, ReturnValue) \
+ v8::Local<ReturnValue> Call(Function f);
-#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
- v8::Handle<ReturnValue> Call(Function f, Arg1 arg1); \
+#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
+ v8::Local<ReturnValue> Call(Function f, Arg1 arg1);
-#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
- v8::Handle<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2); \
+#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
+ v8::Local<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2);
#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
void Call(Function f, Arg1 arg1, Arg2 arg2); \
@@ -250,7 +250,7 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
- v8::Handle<v8::Value> Call(FunctionCallback f);
+ v8::Local<v8::Value> Call(FunctionCallback f);
private:
internal::Object** argv_;
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 1227156eda..4b4e1d3208 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -118,10 +118,11 @@ Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- if (FLAG_enable_ool_constant_pool ||
+ if (FLAG_enable_embedded_constant_pool ||
Assembler::IsMovW(Memory::int32_at(pc_))) {
- // We return the PC for ool constant pool since this function is used by the
- // serializer and expects the address to reside within the code object.
+ // We return the PC for embedded constant pool since this function is used
+ // by the serializer and expects the address to reside within the code
+ // object.
return reinterpret_cast<Address>(pc_);
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
@@ -543,7 +544,7 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target) {
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
set_target_address_at(constant_pool_entry, code, target);
} else {
Memory::Address_at(constant_pool_entry) = target;
@@ -560,21 +561,21 @@ void Assembler::deserialization_set_target_internal_reference_at(
bool Assembler::is_constant_pool_load(Address pc) {
if (CpuFeatures::IsSupported(ARMv7)) {
return !Assembler::IsMovW(Memory::int32_at(pc)) ||
- (FLAG_enable_ool_constant_pool &&
+ (FLAG_enable_embedded_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
} else {
return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
- (FLAG_enable_ool_constant_pool &&
+ (FLAG_enable_embedded_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
}
}
-Address Assembler::constant_pool_entry_address(
- Address pc, ConstantPoolArray* constant_pool) {
- if (FLAG_enable_ool_constant_pool) {
+Address Assembler::constant_pool_entry_address(Address pc,
+ Address constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
DCHECK(constant_pool != NULL);
int cp_offset;
if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
@@ -602,7 +603,7 @@ Address Assembler::constant_pool_entry_address(
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
}
- return reinterpret_cast<Address>(constant_pool) + cp_offset;
+ return constant_pool + cp_offset;
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc);
@@ -611,8 +612,7 @@ Address Assembler::constant_pool_entry_address(
}
-Address Assembler::target_address_at(Address pc,
- ConstantPoolArray* constant_pool) {
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
@@ -643,8 +643,7 @@ Address Assembler::target_address_at(Address pc,
}
-void Assembler::set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
if (is_constant_pool_load(pc)) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index a396d0fe6c..96bdf79fac 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -234,9 +234,9 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded.  Being
// specially coded on ARM means that it is a movw/movt instruction, or is an
- // out of line constant pool entry.  These only occur if
- // FLAG_enable_ool_constant_pool is true.
- return FLAG_enable_ool_constant_pool;
+ // embedded constant pool entry.  These only occur if
+ // FLAG_enable_embedded_constant_pool is true.
+ return FLAG_enable_embedded_constant_pool;
}
@@ -449,11 +449,11 @@ const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
- constant_pool_builder_(),
+ constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
- num_pending_32_bit_reloc_info_ = 0;
- num_pending_64_bit_reloc_info_ = 0;
+ num_pending_32_bit_constants_ = 0;
+ num_pending_64_bit_constants_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
@@ -471,23 +471,30 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
reloc_info_writer.Finish();
- if (!FLAG_enable_ool_constant_pool) {
- // Emit constant pool if necessary.
+
+ // Emit constant pool if necessary.
+ int constant_pool_offset = 0;
+ if (FLAG_enable_embedded_constant_pool) {
+ constant_pool_offset = EmitEmbeddedConstantPool();
+ } else {
CheckConstPool(true, false);
- DCHECK(num_pending_32_bit_reloc_info_ == 0);
- DCHECK(num_pending_64_bit_reloc_info_ == 0);
+ DCHECK(num_pending_32_bit_constants_ == 0);
+ DCHECK(num_pending_64_bit_constants_ == 0);
}
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->constant_pool_size =
+ (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
desc->origin = this;
}
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -623,7 +630,7 @@ Register Assembler::GetRm(Instr instr) {
Instr Assembler::GetConsantPoolLoadPattern() {
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
return kLdrPpImmedPattern;
} else {
return kLdrPCImmedPattern;
@@ -632,7 +639,7 @@ Instr Assembler::GetConsantPoolLoadPattern() {
Instr Assembler::GetConsantPoolLoadMask() {
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
return kLdrPpImmedMask;
} else {
return kLdrPCImmedMask;
@@ -1044,8 +1051,8 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
- if (FLAG_enable_ool_constant_pool && assembler != NULL &&
- !assembler->is_ool_constant_pool_available()) {
+ if (FLAG_enable_embedded_constant_pool && assembler != NULL &&
+ !assembler->is_constant_pool_available()) {
return true;
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) {
@@ -1074,8 +1081,9 @@ int Operand::instructions_required(const Assembler* assembler,
if (use_mov_immediate_load(*this, assembler)) {
// A movw / movt or mov / orr immediate load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
- } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
- // An extended constant pool load.
+ } else if (assembler != NULL &&
+ assembler->ConstantPoolAccessIsInOverflow()) {
+ // An overflowed constant pool load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else {
// A small constant pool load.
@@ -1100,23 +1108,23 @@ int Operand::instructions_required(const Assembler* assembler,
void Assembler::move_32_bit_immediate(Register rd,
const Operand& x,
Condition cond) {
- RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
if (x.must_output_reloc_info(this)) {
- RecordRelocInfo(rinfo);
+ RecordRelocInfo(x.rmode_);
}
if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd;
if (CpuFeatures::IsSupported(ARMv7)) {
- if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
+ if (!FLAG_enable_embedded_constant_pool &&
+ x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2);
}
movw(target, imm32 & 0xffff, cond);
movt(target, imm32 >> 16, cond);
} else {
- DCHECK(FLAG_enable_ool_constant_pool);
+ DCHECK(FLAG_enable_embedded_constant_pool);
mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
@@ -1126,10 +1134,11 @@ void Assembler::move_32_bit_immediate(Register rd,
mov(rd, target, LeaveCC, cond);
}
} else {
- DCHECK(!FLAG_enable_ool_constant_pool || is_ool_constant_pool_available());
- ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
- if (section == ConstantPoolArray::EXTENDED_SECTION) {
- DCHECK(FLAG_enable_ool_constant_pool);
+ DCHECK(!FLAG_enable_embedded_constant_pool || is_constant_pool_available());
+ ConstantPoolEntry::Access access =
+ ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_);
+ if (access == ConstantPoolEntry::OVERFLOWED) {
+ DCHECK(FLAG_enable_embedded_constant_pool);
Register target = rd.code() == pc.code() ? ip : rd;
// Emit instructions to load constant pool offset.
if (CpuFeatures::IsSupported(ARMv7)) {
@@ -1144,8 +1153,9 @@ void Assembler::move_32_bit_immediate(Register rd,
// Load from constant pool at offset.
ldr(rd, MemOperand(pp, target), cond);
} else {
- DCHECK(section == ConstantPoolArray::SMALL_SECTION);
- ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+ DCHECK(access == ConstantPoolEntry::REGULAR);
+ ldr(rd, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0),
+ cond);
}
}
}
@@ -1315,8 +1325,7 @@ int Assembler::branch_offset(Label* L) {
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
- if (!is_const_pool_blocked()) BlockConstPoolFor(1);
-
+ BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -2573,7 +2582,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm && is_ool_constant_pool_available()) {
+ } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@@ -2589,18 +2598,17 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
- RelocInfo rinfo(pc_, imm);
- ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
- if (section == ConstantPoolArray::EXTENDED_SECTION) {
- DCHECK(FLAG_enable_ool_constant_pool);
+ ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm);
+ if (access == ConstantPoolEntry::OVERFLOWED) {
+ DCHECK(FLAG_enable_embedded_constant_pool);
// Emit instructions to load constant pool offset.
movw(ip, 0);
movt(ip, 0);
// Load from constant pool at offset.
vldr(dst, MemOperand(pp, ip));
} else {
- DCHECK(section == ConstantPoolArray::SMALL_SECTION);
- vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
+ DCHECK(access == ConstantPoolEntry::REGULAR);
+ vldr(dst, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0));
}
} else {
// Synthesise the double from ARM immediates.
@@ -2615,7 +2623,8 @@ void Assembler::vmov(const DwVfpRegister dst,
} else if (scratch.is(no_reg)) {
mov(ip, Operand(lo));
vmov(dst, VmovIndexLo, ip);
- if ((lo & 0xffff) == (hi & 0xffff)) {
+ if (((lo & 0xffff) == (hi & 0xffff)) &&
+ CpuFeatures::IsSupported(ARMv7)) {
movt(ip, hi >> 16);
} else {
mov(ip, Operand(hi));
@@ -3574,22 +3583,6 @@ void Assembler::GrowBuffer() {
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
-
- // Relocate pending relocation entries.
- for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
- RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
- DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION);
- if (rinfo.rmode() != RelocInfo::JS_RETURN) {
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- }
- for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
- RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
- DCHECK(rinfo.rmode() == RelocInfo::NONE64);
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- constant_pool_builder_.Relocate(pc_delta);
}
@@ -3597,8 +3590,8 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
- DCHECK(num_pending_32_bit_reloc_info_ == 0);
- DCHECK(num_pending_64_bit_reloc_info_ == 0);
+ DCHECK(num_pending_32_bit_constants_ == 0);
+ DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -3609,14 +3602,26 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
- DCHECK(num_pending_32_bit_reloc_info_ == 0);
- DCHECK(num_pending_64_bit_reloc_info_ == 0);
+ DCHECK(num_pending_32_bit_constants_ == 0);
+ DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
}
+void Assembler::dq(uint64_t value) {
+ // No relocation info should be pending while using dq. dq is used
+ // to write pure data with no pointers and the constant pool should
+ // be emitted before using dd.
+ DCHECK(num_pending_32_bit_constants_ == 0);
+ DCHECK(num_pending_64_bit_constants_ == 0);
+ CheckBuffer();
+ *reinterpret_cast<uint64_t*>(pc_) = value;
+ pc_ += sizeof(uint64_t);
+}
+
+
void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) =
@@ -3626,64 +3631,73 @@ void Assembler::emit_code_stub_address(Code* stub) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (RelocInfo::IsNone(rmode) ||
+ // Don't record external references unless the heap will be serialized.
+ (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
+ !emit_debug_code())) {
+ return;
+ }
+ DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ data = RecordedAstId().ToInt();
+ ClearRecordedAstId();
+ }
RelocInfo rinfo(pc_, rmode, data, NULL);
- RecordRelocInfo(rinfo);
+ reloc_info_writer.Write(&rinfo);
}
-void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
- if (!RelocInfo::IsNone(rinfo.rmode())) {
- // Don't record external references unless the heap will be serialized.
- if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
- !serializer_enabled() && !emit_debug_code()) {
- return;
- }
- DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(rinfo.pc(),
- rinfo.rmode(),
- RecordedAstId().ToInt(),
- NULL);
- ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
- } else {
- reloc_info_writer.Write(&rinfo);
+ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
+ RelocInfo::Mode rmode,
+ intptr_t value) {
+ DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
+ rmode != RelocInfo::STATEMENT_POSITION &&
+ rmode != RelocInfo::CONST_POOL && rmode != RelocInfo::NONE64);
+ bool sharing_ok = RelocInfo::IsNone(rmode) ||
+ !(serializer_enabled() || rmode < RelocInfo::CELL);
+ if (FLAG_enable_embedded_constant_pool) {
+ return constant_pool_builder_.AddEntry(position, value, sharing_ok);
+ } else {
+ DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
+ if (num_pending_32_bit_constants_ == 0) {
+ first_const_pool_32_use_ = position;
}
+ ConstantPoolEntry entry(position, value, sharing_ok);
+ pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
+
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+ return ConstantPoolEntry::REGULAR;
}
}
-ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
- const RelocInfo& rinfo) {
- if (FLAG_enable_ool_constant_pool) {
- return constant_pool_builder_.AddEntry(this, rinfo);
+ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
+ double value) {
+ if (FLAG_enable_embedded_constant_pool) {
+ return constant_pool_builder_.AddEntry(position, value);
} else {
- if (rinfo.rmode() == RelocInfo::NONE64) {
- DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
- if (num_pending_64_bit_reloc_info_ == 0) {
- first_const_pool_64_use_ = pc_offset();
- }
- pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
- } else {
- DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
- if (num_pending_32_bit_reloc_info_ == 0) {
- first_const_pool_32_use_ = pc_offset();
- }
- pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
+ DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
+ if (num_pending_64_bit_constants_ == 0) {
+ first_const_pool_64_use_ = position;
}
+ ConstantPoolEntry entry(position, value);
+ pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
+
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
- return ConstantPoolArray::SMALL_SECTION;
+ return ConstantPoolEntry::REGULAR;
}
}
void Assembler::BlockConstPoolFor(int instructions) {
- if (FLAG_enable_ool_constant_pool) {
- // Should be a no-op if using an out-of-line constant pool.
- DCHECK(num_pending_32_bit_reloc_info_ == 0);
- DCHECK(num_pending_64_bit_reloc_info_ == 0);
+ if (FLAG_enable_embedded_constant_pool) {
+ // Should be a no-op if using an embedded constant pool.
+ DCHECK(num_pending_32_bit_constants_ == 0);
+ DCHECK(num_pending_64_bit_constants_ == 0);
return;
}
@@ -3692,10 +3706,11 @@ void Assembler::BlockConstPoolFor(int instructions) {
// Max pool start (if we need a jump and an alignment).
#ifdef DEBUG
int start = pc_limit + kInstrSize + 2 * kPointerSize;
- DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
+ DCHECK((num_pending_32_bit_constants_ == 0) ||
(start - first_const_pool_32_use_ +
- num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
- DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
+ num_pending_64_bit_constants_ * kDoubleSize <
+ kMaxDistToIntPool));
+ DCHECK((num_pending_64_bit_constants_ == 0) ||
(start - first_const_pool_64_use_ < kMaxDistToFPPool));
#endif
no_const_pool_before_ = pc_limit;
@@ -3708,10 +3723,10 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- if (FLAG_enable_ool_constant_pool) {
- // Should be a no-op if using an out-of-line constant pool.
- DCHECK(num_pending_32_bit_reloc_info_ == 0);
- DCHECK(num_pending_64_bit_reloc_info_ == 0);
+ if (FLAG_enable_embedded_constant_pool) {
+ // Should be a no-op if using an embedded constant pool.
+ DCHECK(num_pending_32_bit_constants_ == 0);
+ DCHECK(num_pending_64_bit_constants_ == 0);
return;
}
@@ -3725,8 +3740,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
- if ((num_pending_32_bit_reloc_info_ == 0) &&
- (num_pending_64_bit_reloc_info_ == 0)) {
+ if ((num_pending_32_bit_constants_ == 0) &&
+ (num_pending_64_bit_constants_ == 0)) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
@@ -3737,18 +3752,19 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
- int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
- bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
+ int estimated_size_after_marker =
+ num_pending_32_bit_constants_ * kPointerSize;
+ bool has_fp_values = (num_pending_64_bit_constants_ > 0);
bool require_64_bit_align = false;
if (has_fp_values) {
- require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
+ require_64_bit_align = IsAligned(
+ reinterpret_cast<intptr_t>(pc_ + size_up_to_marker), kDoubleAlignment);
if (require_64_bit_align) {
- size_after_marker += kInstrSize;
+ estimated_size_after_marker += kInstrSize;
}
- size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
+ estimated_size_after_marker += num_pending_64_bit_constants_ * kDoubleSize;
}
-
- int size = size_up_to_marker + size_after_marker;
+ int estimated_size = size_up_to_marker + estimated_size_after_marker;
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
@@ -3762,17 +3778,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
bool need_emit = false;
if (has_fp_values) {
- int dist64 = pc_offset() +
- size -
- num_pending_32_bit_reloc_info_ * kPointerSize -
+ int dist64 = pc_offset() + estimated_size -
+ num_pending_32_bit_constants_ * kPointerSize -
first_const_pool_64_use_;
if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
(!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
need_emit = true;
}
}
- int dist32 =
- pc_offset() + size - first_const_pool_32_use_;
+ int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
(!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
need_emit = true;
@@ -3780,6 +3794,37 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (!need_emit) return;
}
+ // Deduplicate constants.
+ int size_after_marker = estimated_size_after_marker;
+ for (int i = 0; i < num_pending_64_bit_constants_; i++) {
+ ConstantPoolEntry& entry = pending_64_bit_constants_[i];
+ DCHECK(!entry.is_merged());
+ for (int j = 0; j < i; j++) {
+ if (entry.value64() == pending_64_bit_constants_[j].value64()) {
+ DCHECK(!pending_64_bit_constants_[j].is_merged());
+ entry.set_merged_index(j);
+ size_after_marker -= kDoubleSize;
+ break;
+ }
+ }
+ }
+
+ for (int i = 0; i < num_pending_32_bit_constants_; i++) {
+ ConstantPoolEntry& entry = pending_32_bit_constants_[i];
+ DCHECK(!entry.is_merged());
+ if (!entry.sharing_ok()) continue;
+ for (int j = 0; j < i; j++) {
+ if (entry.value() == pending_32_bit_constants_[j].value()) {
+ DCHECK(!pending_32_bit_constants_[j].is_merged());
+ entry.set_merged_index(j);
+ size_after_marker -= kPointerSize;
+ break;
+ }
+ }
+ }
+
+ int size = size_up_to_marker + size_after_marker;
+
int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
@@ -3789,11 +3834,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
RecordComment("[ Constant Pool");
RecordConstPool(size);
+ Label size_check;
+ bind(&size_check);
+
// Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) {
- b(&after_pool);
- }
+ if (require_jump) b(size - kPcLoadDelta);
// Put down constant pool marker "Undefined instruction".
// The data size helps disassembly know what to print.
@@ -3806,104 +3851,77 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries.
- for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
- RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
+ for (int i = 0; i < num_pending_64_bit_constants_; i++) {
+ ConstantPoolEntry& entry = pending_64_bit_constants_[i];
- DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
-
- Instr instr = instr_at(rinfo.pc());
+ Instr instr = instr_at(entry.position());
// Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
DCHECK((IsVldrDPcImmediateOffset(instr) &&
GetVldrDRegisterImmediateOffset(instr) == 0));
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ int delta = pc_offset() - entry.position() - kPcLoadDelta;
DCHECK(is_uint10(delta));
- bool found = false;
- uint64_t value = rinfo.raw_data64();
- for (int j = 0; j < i; j++) {
- RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
- if (value == rinfo2.raw_data64()) {
- found = true;
- DCHECK(rinfo2.rmode() == RelocInfo::NONE64);
- Instr instr2 = instr_at(rinfo2.pc());
- DCHECK(IsVldrDPcImmediateOffset(instr2));
- delta = GetVldrDRegisterImmediateOffset(instr2);
- delta += rinfo2.pc() - rinfo.pc();
- break;
- }
+ if (entry.is_merged()) {
+ ConstantPoolEntry& merged =
+ pending_64_bit_constants_[entry.merged_index()];
+ DCHECK(entry.value64() == merged.value64());
+ Instr merged_instr = instr_at(merged.position());
+ DCHECK(IsVldrDPcImmediateOffset(merged_instr));
+ delta = GetVldrDRegisterImmediateOffset(merged_instr);
+ delta += merged.position() - entry.position();
}
-
- instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
-
- if (!found) {
- uint64_t uint_data = rinfo.raw_data64();
- emit(uint_data & 0xFFFFFFFF);
- emit(uint_data >> 32);
+ instr_at_put(entry.position(),
+ SetVldrDRegisterImmediateOffset(instr, delta));
+ if (!entry.is_merged()) {
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(pc_), kDoubleAlignment));
+ dq(entry.value64());
}
}
// Emit 32-bit constant pool entries.
- for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
- RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
- DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
- rinfo.rmode() != RelocInfo::CONST_POOL &&
- rinfo.rmode() != RelocInfo::NONE64);
-
- Instr instr = instr_at(rinfo.pc());
+ for (int i = 0; i < num_pending_32_bit_constants_; i++) {
+ ConstantPoolEntry& entry = pending_32_bit_constants_[i];
+ Instr instr = instr_at(entry.position());
// 64-bit loads shouldn't get here.
DCHECK(!IsVldrDPcImmediateOffset(instr));
-
- if (IsLdrPcImmediateOffset(instr) &&
- GetLdrRegisterImmediateOffset(instr) == 0) {
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- DCHECK(is_uint12(delta));
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
-
- bool found = false;
- if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
- for (int j = 0; j < i; j++) {
- RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
-
- if ((rinfo2.data() == rinfo.data()) &&
- (rinfo2.rmode() == rinfo.rmode())) {
- Instr instr2 = instr_at(rinfo2.pc());
- if (IsLdrPcImmediateOffset(instr2)) {
- delta = GetLdrRegisterImmediateOffset(instr2);
- delta += rinfo2.pc() - rinfo.pc();
- found = true;
- break;
- }
- }
- }
- }
-
- instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
-
- if (!found) {
- emit(rinfo.data());
- }
- } else {
- DCHECK(IsMovW(instr));
+ DCHECK(!IsMovW(instr));
+ DCHECK(IsLdrPcImmediateOffset(instr) &&
+ GetLdrRegisterImmediateOffset(instr) == 0);
+
+ int delta = pc_offset() - entry.position() - kPcLoadDelta;
+ DCHECK(is_uint12(delta));
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+
+ if (entry.is_merged()) {
+ DCHECK(entry.sharing_ok());
+ ConstantPoolEntry& merged =
+ pending_32_bit_constants_[entry.merged_index()];
+ DCHECK(entry.value() == merged.value());
+ Instr merged_instr = instr_at(merged.position());
+ DCHECK(IsLdrPcImmediateOffset(merged_instr));
+ delta = GetLdrRegisterImmediateOffset(merged_instr);
+ delta += merged.position() - entry.position();
+ }
+ instr_at_put(entry.position(),
+ SetLdrRegisterImmediateOffset(instr, delta));
+ if (!entry.is_merged()) {
+ emit(entry.value());
}
}
- num_pending_32_bit_reloc_info_ = 0;
- num_pending_64_bit_reloc_info_ = 0;
+ num_pending_32_bit_constants_ = 0;
+ num_pending_64_bit_constants_ = 0;
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
RecordComment("]");
- if (after_pool.is_linked()) {
- bind(&after_pool);
- }
+ DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
}
// Since a constant pool was just emitted, move the check offset forward by
@@ -3912,229 +3930,61 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
- if (!FLAG_enable_ool_constant_pool) {
- return isolate->factory()->empty_constant_pool_array();
- }
- return constant_pool_builder_.New(isolate);
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
- constant_pool_builder_.Populate(this, constant_pool);
-}
-
+void Assembler::PatchConstantPoolAccessInstruction(
+ int pc_offset, int offset, ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ DCHECK(FLAG_enable_embedded_constant_pool);
+ Address pc = buffer_ + pc_offset;
-ConstantPoolBuilder::ConstantPoolBuilder()
- : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
-
-
-bool ConstantPoolBuilder::IsEmpty() {
- return entries_.size() == 0;
-}
-
-
-ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
- RelocInfo::Mode rmode) {
- if (rmode == RelocInfo::NONE64) {
- return ConstantPoolArray::INT64;
- } else if (!RelocInfo::IsGCRelocMode(rmode)) {
- return ConstantPoolArray::INT32;
- } else if (RelocInfo::IsCodeTarget(rmode)) {
- return ConstantPoolArray::CODE_PTR;
- } else {
- DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
- return ConstantPoolArray::HEAP_PTR;
- }
-}
-
-
-ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
- Assembler* assm, const RelocInfo& rinfo) {
- RelocInfo::Mode rmode = rinfo.rmode();
- DCHECK(rmode != RelocInfo::COMMENT &&
- rmode != RelocInfo::POSITION &&
- rmode != RelocInfo::STATEMENT_POSITION &&
- rmode != RelocInfo::CONST_POOL);
-
- // Try to merge entries which won't be patched.
- int merged_index = -1;
- ConstantPoolArray::LayoutSection entry_section = current_section_;
- if (RelocInfo::IsNone(rmode) ||
- (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
- size_t i;
- std::vector<ConstantPoolEntry>::const_iterator it;
- for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
- if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
- // Merge with found entry.
- merged_index = i;
- entry_section = entries_[i].section_;
- break;
- }
- }
- }
- DCHECK(entry_section <= current_section_);
- entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
-
- if (merged_index == -1) {
- // Not merged, so update the appropriate count.
- number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
- }
-
- // Check if we still have room for another entry in the small section
- // given Arm's ldr and vldr immediate offset range.
- if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
- !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
- is_uint10(ConstantPoolArray::MaxInt64Offset(
- small_entries()->count_of(ConstantPoolArray::INT64))))) {
- current_section_ = ConstantPoolArray::EXTENDED_SECTION;
- }
- return entry_section;
-}
-
-
-void ConstantPoolBuilder::Relocate(int pc_delta) {
- for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
- entry != entries_.end(); entry++) {
- DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
- entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
- }
-}
-
-
-Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
- if (IsEmpty()) {
- return isolate->factory()->empty_constant_pool_array();
- } else if (extended_entries()->is_empty()) {
- return isolate->factory()->NewConstantPoolArray(*small_entries());
- } else {
- DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
- return isolate->factory()->NewExtendedConstantPoolArray(
- *small_entries(), *extended_entries());
- }
-}
-
-
-void ConstantPoolBuilder::Populate(Assembler* assm,
- ConstantPoolArray* constant_pool) {
- DCHECK_EQ(extended_entries()->is_empty(),
- !constant_pool->is_extended_layout());
- DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
- constant_pool, ConstantPoolArray::SMALL_SECTION)));
- if (constant_pool->is_extended_layout()) {
- DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
- constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
- }
-
- // Set up initial offsets.
- int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
- [ConstantPoolArray::NUMBER_OF_TYPES];
- for (int section = 0; section <= constant_pool->final_section(); section++) {
- int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
- ? small_entries()->total_count()
- : 0;
- for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
- ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
- if (number_of_entries_[section].count_of(type) != 0) {
- offsets[section][type] = constant_pool->OffsetOfElementAt(
- number_of_entries_[section].base_of(type) + section_start);
- }
- }
- }
-
- for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
- entry != entries_.end(); entry++) {
- RelocInfo rinfo = entry->rinfo_;
- RelocInfo::Mode rmode = entry->rinfo_.rmode();
- ConstantPoolArray::Type type = GetConstantPoolType(rmode);
-
- // Update constant pool if necessary and get the entry's offset.
- int offset;
- if (entry->merged_index_ == -1) {
- offset = offsets[entry->section_][type];
- offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
- if (type == ConstantPoolArray::INT64) {
- constant_pool->set_at_offset(offset, rinfo.data64());
- } else if (type == ConstantPoolArray::INT32) {
- constant_pool->set_at_offset(offset,
- static_cast<int32_t>(rinfo.data()));
- } else if (type == ConstantPoolArray::CODE_PTR) {
- constant_pool->set_at_offset(offset,
- reinterpret_cast<Address>(rinfo.data()));
- } else {
- DCHECK(type == ConstantPoolArray::HEAP_PTR);
- constant_pool->set_at_offset(offset,
- reinterpret_cast<Object*>(rinfo.data()));
- }
- offset -= kHeapObjectTag;
- entry->merged_index_ = offset; // Stash offset for merged entries.
- } else {
- DCHECK(entry->merged_index_ < (entry - entries_.begin()));
- offset = entries_[entry->merged_index_].merged_index_;
- }
-
- // Patch vldr/ldr instruction with correct offset.
- Instr instr = assm->instr_at(rinfo.pc());
- if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
- if (CpuFeatures::IsSupported(ARMv7)) {
- // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
- Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
- DCHECK((Assembler::IsMovW(instr) &&
- Instruction::ImmedMovwMovtValue(instr) == 0));
- DCHECK((Assembler::IsMovT(next_instr) &&
- Instruction::ImmedMovwMovtValue(next_instr) == 0));
- assm->instr_at_put(
- rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
- assm->instr_at_put(
- rinfo.pc() + Assembler::kInstrSize,
- Assembler::PatchMovwImmediate(next_instr, offset >> 16));
- } else {
- // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
- Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
- Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
- Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
- DCHECK((Assembler::IsMovImmed(instr) &&
- Instruction::Immed8Value(instr) == 0));
- DCHECK((Assembler::IsOrrImmed(instr_2) &&
- Instruction::Immed8Value(instr_2) == 0) &&
- Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
- DCHECK((Assembler::IsOrrImmed(instr_3) &&
- Instruction::Immed8Value(instr_3) == 0) &&
- Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
- DCHECK((Assembler::IsOrrImmed(instr_4) &&
- Instruction::Immed8Value(instr_4) == 0) &&
- Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
- assm->instr_at_put(
- rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
- assm->instr_at_put(
- rinfo.pc() + Assembler::kInstrSize,
- Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
- assm->instr_at_put(
- rinfo.pc() + 2 * Assembler::kInstrSize,
- Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
- assm->instr_at_put(
- rinfo.pc() + 3 * Assembler::kInstrSize,
- Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
- }
- } else if (type == ConstantPoolArray::INT64) {
- // Instruction to patch must be 'vldr rd, [pp, #0]'.
- DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
- Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
- DCHECK(is_uint10(offset));
- assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
- instr, offset));
+ // Patch vldr/ldr instruction with correct offset.
+ Instr instr = instr_at(pc);
+ if (access == ConstantPoolEntry::OVERFLOWED) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
+ Instr next_instr = instr_at(pc + kInstrSize);
+ DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
+ DCHECK((IsMovT(next_instr) &&
+ Instruction::ImmedMovwMovtValue(next_instr) == 0));
+ instr_at_put(pc, PatchMovwImmediate(instr, offset & 0xffff));
+ instr_at_put(pc + kInstrSize,
+ PatchMovwImmediate(next_instr, offset >> 16));
} else {
- // Instruction to patch must be 'ldr rd, [pp, #0]'.
- DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
- Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
- DCHECK(is_uint12(offset));
- assm->instr_at_put(
- rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
+ // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
+ Instr instr_2 = instr_at(pc + kInstrSize);
+ Instr instr_3 = instr_at(pc + 2 * kInstrSize);
+ Instr instr_4 = instr_at(pc + 3 * kInstrSize);
+ DCHECK((IsMovImmed(instr) && Instruction::Immed8Value(instr) == 0));
+ DCHECK((IsOrrImmed(instr_2) && Instruction::Immed8Value(instr_2) == 0) &&
+ GetRn(instr_2).is(GetRd(instr_2)));
+ DCHECK((IsOrrImmed(instr_3) && Instruction::Immed8Value(instr_3) == 0) &&
+ GetRn(instr_3).is(GetRd(instr_3)));
+ DCHECK((IsOrrImmed(instr_4) && Instruction::Immed8Value(instr_4) == 0) &&
+ GetRn(instr_4).is(GetRd(instr_4)));
+ instr_at_put(pc, PatchShiftImm(instr, (offset & kImm8Mask)));
+ instr_at_put(pc + kInstrSize,
+ PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
+ instr_at_put(pc + 2 * kInstrSize,
+ PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
+ instr_at_put(pc + 3 * kInstrSize,
+ PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
}
+ } else if (type == ConstantPoolEntry::DOUBLE) {
+ // Instruction to patch must be 'vldr rd, [pp, #0]'.
+ DCHECK((IsVldrDPpImmediateOffset(instr) &&
+ GetVldrDRegisterImmediateOffset(instr) == 0));
+ DCHECK(is_uint10(offset));
+ instr_at_put(pc, SetVldrDRegisterImmediateOffset(instr, offset));
+ } else {
+ // Instruction to patch must be 'ldr rd, [pp, #0]'.
+ DCHECK((IsLdrPpImmediateOffset(instr) &&
+ GetLdrRegisterImmediateOffset(instr) == 0));
+ DCHECK(is_uint12(offset));
+ instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset));
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 5422b3f20e..5d66c39a77 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -94,7 +94,7 @@ const int kRegister_pc_Code = 15;
struct Register {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters =
- FLAG_enable_ool_constant_pool ? 8 : 9;
+ FLAG_enable_embedded_constant_pool ? 8 : 9;
static const int kSizeInBytes = 4;
inline static int NumAllocatableRegisters();
@@ -122,7 +122,7 @@ struct Register {
"r7",
"r8",
};
- if (FLAG_enable_ool_constant_pool && (index >= 7)) {
+ if (FLAG_enable_embedded_constant_pool && (index >= 7)) {
return names[index + 1];
}
return names[index];
@@ -164,7 +164,7 @@ const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
// Used as context register.
const Register r7 = {kRegister_r7_Code};
-// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
+// Used as constant pool pointer register if FLAG_enable_embedded_constant_pool.
const Register r8 = { kRegister_r8_Code };
// Used as lithium codegen scratch register.
const Register r9 = { kRegister_r9_Code };
@@ -651,52 +651,6 @@ class NeonListOperand BASE_EMBEDDED {
};
-// Class used to build a constant pool.
-class ConstantPoolBuilder BASE_EMBEDDED {
- public:
- ConstantPoolBuilder();
- ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
- const RelocInfo& rinfo);
- void Relocate(int pc_delta);
- bool IsEmpty();
- Handle<ConstantPoolArray> New(Isolate* isolate);
- void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
-
- inline ConstantPoolArray::LayoutSection current_section() const {
- return current_section_;
- }
-
- inline ConstantPoolArray::NumberOfEntries* number_of_entries(
- ConstantPoolArray::LayoutSection section) {
- return &number_of_entries_[section];
- }
-
- inline ConstantPoolArray::NumberOfEntries* small_entries() {
- return number_of_entries(ConstantPoolArray::SMALL_SECTION);
- }
-
- inline ConstantPoolArray::NumberOfEntries* extended_entries() {
- return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
- }
-
- private:
- struct ConstantPoolEntry {
- ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
- int merged_index)
- : rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
-
- RelocInfo rinfo_;
- ConstantPoolArray::LayoutSection section_;
- int merged_index_;
- };
-
- ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
-
- std::vector<ConstantPoolEntry> entries_;
- ConstantPoolArray::LayoutSection current_section_;
- ConstantPoolArray::NumberOfEntries number_of_entries_[2];
-};
-
struct VmovIndex {
unsigned char index;
};
@@ -754,19 +708,16 @@ class Assembler : public AssemblerBase {
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
- INLINE(static Address constant_pool_entry_address(
- Address pc, ConstantPoolArray* constant_pool));
+ INLINE(static Address constant_pool_entry_address(Address pc,
+ Address constant_pool));
// Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc,
- ConstantPoolArray* constant_pool));
- INLINE(static void set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED));
+ INLINE(static Address target_address_at(Address pc, Address constant_pool));
+ INLINE(static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(Address pc,
@@ -774,7 +725,7 @@ class Assembler : public AssemblerBase {
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED)) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@@ -841,6 +792,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -1448,11 +1402,13 @@ class Assembler : public AssemblerBase {
void RecordConstPool(int size);
// Writes a single byte or word of data in the code stream. Used
- // for inline tables, e.g., jump-tables. The constant pool should be
- // emitted before any use of db and dd to ensure that constant pools
+ // for inline tables, e.g., jump-tables. CheckConstantPool() should be
+ // called before any use of db/dd/dq/dp to ensure that constant pools
// are not emitted as part of the tables generated.
void db(uint8_t data);
void dd(uint32_t data);
+ void dq(uint64_t data);
+ void dp(uintptr_t data) { dd(data); }
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
@@ -1524,8 +1480,8 @@ class Assembler : public AssemblerBase {
static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB;
// All relocations could be integer, it therefore acts as the limit.
- static const int kMaxNumPending32RelocInfo = kMaxDistToIntPool/kInstrSize;
- static const int kMaxNumPending64RelocInfo = kMaxDistToFPPool/kInstrSize;
+ static const int kMaxNumPending32Constants = kMaxDistToIntPool / kInstrSize;
+ static const int kMaxNumPending64Constants = kMaxDistToFPPool / kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -1540,17 +1496,19 @@ class Assembler : public AssemblerBase {
}
}
- // Allocate a constant pool of the correct size for the generated code.
- Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
-
- // Generate the constant pool for the generated code.
- void PopulateConstantPool(ConstantPoolArray* constant_pool);
+ int EmitEmbeddedConstantPool() {
+ DCHECK(FLAG_enable_embedded_constant_pool);
+ return constant_pool_builder_.Emit(this);
+ }
- bool use_extended_constant_pool() const {
- return constant_pool_builder_.current_section() ==
- ConstantPoolArray::EXTENDED_SECTION;
+ bool ConstantPoolAccessIsInOverflow() const {
+ return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
+ ConstantPoolEntry::OVERFLOWED;
}
+ void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type);
protected:
// Relocation for a type-recording IC has the AST id added to it. This
@@ -1585,10 +1543,10 @@ class Assembler : public AssemblerBase {
// Max pool start (if we need a jump and an alignment).
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
- DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
- (start + num_pending_64_bit_reloc_info_ * kDoubleSize <
+ DCHECK((num_pending_32_bit_constants_ == 0) ||
+ (start + num_pending_64_bit_constants_ * kDoubleSize <
(first_const_pool_32_use_ + kMaxDistToIntPool)));
- DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
+ DCHECK((num_pending_64_bit_constants_ == 0) ||
(start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif
// Two cases:
@@ -1647,20 +1605,20 @@ class Assembler : public AssemblerBase {
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
- // Relocation info records are also used during code generation as temporary
+ // ConstantPoolEntry records are used during code generation as temporary
// containers for constants and code target addresses until they are emitted
- // to the constant pool. These pending relocation info records are temporarily
- // stored in a separate buffer until a constant pool is emitted.
+ // to the constant pool. These records are temporarily stored in a separate
+ // buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- // The buffers of pending relocation info.
- RelocInfo pending_32_bit_reloc_info_[kMaxNumPending32RelocInfo];
- RelocInfo pending_64_bit_reloc_info_[kMaxNumPending64RelocInfo];
- // Number of pending reloc info entries in the 32 bits buffer.
- int num_pending_32_bit_reloc_info_;
- // Number of pending reloc info entries in the 64 bits buffer.
- int num_pending_64_bit_reloc_info_;
+ // The buffers of pending constant pool entries.
+ ConstantPoolEntry pending_32_bit_constants_[kMaxNumPending32Constants];
+ ConstantPoolEntry pending_64_bit_constants_[kMaxNumPending64Constants];
+ // Number of pending constant pool entries in the 32 bits buffer.
+ int num_pending_32_bit_constants_;
+ // Number of pending constant pool entries in the 64 bits buffer.
+ int num_pending_64_bit_constants_;
ConstantPoolBuilder constant_pool_builder_;
@@ -1689,15 +1647,12 @@ class Assembler : public AssemblerBase {
void bind_to(Label* L, int pos);
void next(Label* L);
- enum UseConstantPoolMode {
- USE_CONSTANT_POOL,
- DONT_USE_CONSTANT_POOL
- };
-
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
- void RecordRelocInfo(const RelocInfo& rinfo);
- ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo);
+ ConstantPoolEntry::Access ConstantPoolAddEntry(int position,
+ RelocInfo::Mode rmode,
+ intptr_t value);
+ ConstantPoolEntry::Access ConstantPoolAddEntry(int position, double value);
friend class RelocInfo;
friend class CodePatcher;
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 3dd61b00a4..2859f97dc4 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -343,6 +343,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
+ bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -367,10 +368,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r2);
}
- // Preserve the two incoming parameters on the stack.
+ // Preserve the incoming parameters on the stack.
__ SmiTag(r0);
- __ push(r0); // Smi-tagged arguments count.
- __ push(r1); // Constructor function.
+ __ push(r0);
+ __ push(r1);
+ if (use_new_target) {
+ __ push(r3);
+ }
Label rt_call, allocated, normal_new, count_incremented;
__ cmp(r1, r3);
@@ -446,7 +450,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
- // r3: object size (not including memento if create_memento)
+ // r3: object size (including memento if create_memento)
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
@@ -520,7 +524,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r4, r4, Operand(kHeapObjectTag));
// Check if a non-empty properties array is needed. Continue with
- // allocated object if not fall through to runtime call if it is.
+ // allocated object if not; allocate and initialize a FixedArray if yes.
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
@@ -575,15 +579,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r5: FixedArray (not tagged)
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(&entry);
- __ bind(&loop);
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r2, r6);
- __ b(lt, &loop);
- }
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(r2, r6, r0);
// Store the initialized FixedArray into the properties field of
// the JSObject
@@ -617,7 +614,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated);
if (create_memento) {
- __ ldr(r2, MemOperand(sp, kPointerSize * 2));
+ int offset = (use_new_target ? 3 : 2) * kPointerSize;
+ __ ldr(r2, MemOperand(sp, offset));
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r5);
__ b(eq, &count_incremented);
@@ -631,23 +629,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
+ // Restore the parameters.
+ if (use_new_target) {
+ __ pop(r3);
+ }
+ __ pop(r1);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ ldr(r0, MemOperand(sp));
+ __ SmiUntag(r0);
+
+ // Push new.target onto the construct frame. This is stored just below the
+ // receiver on the stack.
+ if (use_new_target) {
+ __ push(r3);
+ }
__ push(r4);
__ push(r4);
- // Reload the number of arguments and the constructor from the stack.
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
-
// Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Set up number of arguments for function call below
- __ SmiUntag(r0, r3);
-
// Copy arguments and receiver to the expression stack.
// r0: number of arguments
// r1: constructor function
@@ -655,9 +657,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: new.target (if used)
+ // sp[2/3]: number of arguments (smi-tagged)
Label loop, entry;
+ __ SmiTag(r3, r0);
__ b(&entry);
__ bind(&loop);
__ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
@@ -680,15 +683,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ // TODO(arv): Remove the "!use_new_target" before supporting optimization
+ // of functions that reference new.target
+ if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r0: result
// sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@@ -699,8 +704,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// r0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
__ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -718,9 +723,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// r0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
+ int offset = (use_new_target ? 2 : 1) * kPointerSize;
+ __ ldr(r1, MemOperand(sp, offset));
// Leave construct frame.
}
@@ -733,12 +739,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@@ -789,8 +800,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ sub(r4, r4, Operand(2), SetCC);
__ b(ge, &loop);
- __ add(r0, r0, Operand(1));
-
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@@ -819,7 +828,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// r0: result
// sp[0]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ ldr(r1, MemOperand(sp, 0));
+ // Get arguments count, skipping over new.target.
+ __ ldr(r1, MemOperand(sp, kPointerSize));
// Leave construct frame.
}
@@ -874,7 +884,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
- // r5-r6, r8 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
+ // r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
@@ -922,7 +932,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
- if (!FLAG_enable_ool_constant_pool) {
+ if (!FLAG_enable_embedded_constant_pool) {
__ mov(r8, Operand(r4));
}
if (kR9Available == 1) {
@@ -1166,8 +1176,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- if (FLAG_enable_ool_constant_pool) {
- __ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
+ __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r0);
}
// Load the OSR entrypoint offset from the deoptimization data.
@@ -1175,10 +1187,8 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex)));
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ add(r0, r0, Operand::SmiUntag(r1));
- __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Compute the target address = code start + osr_offset
+ __ add(lr, r0, Operand::SmiUntag(r1));
// And "return" to the OSR entry point of the function.
__ Ret();
@@ -1392,6 +1402,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
__ ldr(key, MemOperand(fp, indexOffset));
__ b(&entry);
@@ -1401,7 +1413,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
+ FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
+ Handle<TypeFeedbackVector> feedback_vector =
+ masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
+ int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ mov(slot, Operand(Smi::FromInt(index)));
+ __ Move(vector, feedback_vector);
+ Handle<Code> ic =
+ KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
// Push the nth argument.
@@ -1649,8 +1668,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
- (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
- fp.bit() | lr.bit());
+ (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
__ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1722,6 +1741,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
+
+ // If the function is strong we need to throw an error.
+ Label no_strong_error;
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r5, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
+ kSmiTagSize)));
+ __ b(eq, &no_strong_error);
+
+ // What we really care about is the required number of arguments.
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kLengthOffset));
+ __ cmp(r0, Operand::SmiUntag(r4));
+ __ b(ge, &no_strong_error);
+
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ }
+
+ __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into r0 and copy end address is fp.
@@ -1792,6 +1832,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 6133281a63..005fb97513 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -93,9 +93,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cond);
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cond, Strength strength);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -113,15 +112,15 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- r0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ r0.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor.GetEnvironmentParameterRegister(i));
+ __ push(descriptor.GetRegisterParameter(i));
}
__ CallExternalReference(miss, param_count);
}
@@ -238,9 +237,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cond) {
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cond, Strength strength) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
@@ -251,10 +249,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
+ // Call runtime on identical JSObjects.
__ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, slow);
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics, since
+ // we need to throw a TypeError. Smis have already been ruled out.
+ __ cmp(r4, Operand(HEAP_NUMBER_TYPE));
+ __ b(eq, &return_equal);
+ __ tst(r4, Operand(kIsNotStringMask));
+ __ b(ne, slow);
+ }
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
@@ -262,8 +270,16 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cond != eq) {
__ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics,
+ // since we need to throw a TypeError. Smis and heap numbers have
+ // already been ruled out.
+ __ tst(r4, Operand(kIsNotStringMask));
+ __ b(ne, slow);
+ }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -561,7 +577,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
+ EmitIdenticalObjectComparison(masm, &slow, cc, strength());
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -663,7 +679,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
- native = Builtins::COMPARE;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if (cc == lt || cc == le) {
ncr = GREATER;
@@ -1084,10 +1101,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(r1));
__ mov(r2, Operand(pending_handler_offset_address));
__ ldr(r2, MemOperand(r2));
- if (FLAG_enable_ool_constant_pool) {
- __ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset));
+ __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
}
- __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(pc, r1, r2);
}
@@ -1132,8 +1149,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r3: argc
// r4: argv
int marker = type();
- if (FLAG_enable_ool_constant_pool) {
- __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
+ if (FLAG_enable_embedded_constant_pool) {
+ __ mov(r8, Operand::Zero());
}
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
@@ -1142,8 +1159,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldr(r5, MemOperand(r5));
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
- (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
- ip.bit());
+ (FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
+ ip.bit());
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -1331,11 +1348,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ ldr(map_load_offset, MemOperand(map_load_offset));
__ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
- __ mov(r8, map);
+ __ mov(scratch, map);
// |map_load_offset| points at the beginning of the cell. Calculate the
// field containing the map.
__ add(function, map_load_offset, Operand(Cell::kValueOffset - 1));
- __ RecordWriteField(map_load_offset, Cell::kValueOffset, r8, function,
+ __ RecordWriteField(map_load_offset, Cell::kValueOffset, scratch, function,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
@@ -1473,9 +1490,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(r4, r5, VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister()));
+ DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
r5, &miss);
@@ -1494,9 +1510,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register scratch = r5;
Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!FLAG_vector_ics ||
- (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
- result.is(VectorLoadICDescriptor::SlotRegister())));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+ result.is(LoadWithVectorDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@@ -1520,7 +1535,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@@ -1578,8 +1592,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[4] : receiver displacement
// sp[8] : function
- CHECK(!has_new_target());
-
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1608,8 +1620,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r6 : allocated object (tagged)
// r9 : mapped parameter count (tagged)
- CHECK(!has_new_target());
-
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
// r1 = parameter count (tagged)
@@ -1666,7 +1676,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kNormalOffset =
Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
const int kAliasedOffset =
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
@@ -1850,14 +1860,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- if (has_new_target()) {
- __ cmp(r1, Operand(Smi::FromInt(0)));
- Label skip_decrement;
- __ b(eq, &skip_decrement);
- // Subtract 1 from smi-tagged arguments count.
- __ sub(r1, r1, Operand(2));
- __ bind(&skip_decrement);
- }
__ str(r1, MemOperand(sp, 0));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -1939,9 +1941,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry.
- // sp[0] : index of rest parameter
- // sp[4] : number of parameters
- // sp[8] : receiver displacement
+ // sp[0] : language mode
+ // sp[4] : index of rest parameter
+ // sp[8] : number of parameters
+ // sp[12] : receiver displacement
Label runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1951,13 +1954,13 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r1, MemOperand(sp, 1 * kPointerSize));
+ __ str(r1, MemOperand(sp, 2 * kPointerSize));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 2 * kPointerSize));
+ __ str(r3, MemOperand(sp, 3 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+ __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@@ -2418,7 +2421,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r5;
- Register weak_value = r8;
+ Register weak_value = r6;
__ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
@@ -2703,6 +2706,13 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
+ // Increment the call count for monomorphic function calls.
+ __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ ldr(r3, FieldMemOperand(r2, 0));
+ __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ str(r3, FieldMemOperand(r2, 0));
+
__ mov(r2, r4);
__ mov(r3, r1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
@@ -2762,6 +2772,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(r1, &extra_checks_or_miss);
+ // Increment the call count for monomorphic function calls.
+ __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ ldr(r3, FieldMemOperand(r2, 0));
+ __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ str(r3, FieldMemOperand(r2, 0));
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2837,6 +2854,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
+ // Initialize the call counter.
+ __ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
+
// Store the function. Use a stub since we need a frame for allocation.
// r2 - vector
// r3 - slot
@@ -2937,9 +2959,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Push(VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else {
// index_ is consumed by runtime conversion function.
__ Push(object_, index_);
@@ -2954,9 +2976,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r0);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Pop(VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister(), object_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_);
} else {
__ pop(object_);
}
@@ -3567,7 +3589,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -4348,15 +4370,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawLoadStub stub(isolate(), state());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawKeyedLoadStub stub(isolate());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4375,12 +4397,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
-void VectorRawLoadStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4474,14 +4494,14 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
-void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
- Register name = VectorLoadICDescriptor::NameRegister(); // r2
- Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
- Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
+ Register name = LoadWithVectorDescriptor::NameRegister(); // r2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register receiver_map = r5;
- Register scratch1 = r8;
+ Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
@@ -4521,24 +4541,24 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
-void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
- Register key = VectorLoadICDescriptor::NameRegister(); // r2
- Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
- Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
+ Register key = LoadWithVectorDescriptor::NameRegister(); // r2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register receiver_map = r5;
- Register scratch1 = r8;
+ Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
@@ -4568,7 +4588,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &try_poly_name);
Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
@@ -4592,6 +4612,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ StoreIC::GenerateMiss(masm);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -5297,6 +5369,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index fd1b0efc8d..a456996a27 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -946,6 +946,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index 3f3c5ed773..0749356909 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -126,6 +126,7 @@ int Registers::Number(const char* name) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index af68fb24e2..7b8529c4bb 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -42,6 +42,11 @@ const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
const int kPCRegister = 15;
const int kNoRegister = -1;
+// Used in embedded constant pool builder - max reach in bits for
+// various load instructions (unsigned)
+const int kLdrMaxReachBits = 12;
+const int kVldrMaxReachBits = 10;
+
// -----------------------------------------------------------------------------
// Conditions.
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 4bbfd375a8..dd2d13c686 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -77,6 +77,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#endif
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index d9c25c6588..7d9313200b 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -140,53 +140,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC load (from ic-arm.cc).
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- RegList regs = receiver.bit() | name.bit();
- if (FLAG_vector_ics) {
- regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
- }
- Generate_DebugBreakCallHelper(masm, regs, 0);
-}
-
-
-void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-arm.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for keyed IC load (from ic-arm.cc).
- GenerateLoadICDebugBreak(masm);
-}
-
-
-void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC keyed store call (from ic-arm.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
- // Register state for CompareNil IC
- // ----------- S t a t e -------------
- // -- r0 : value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
-}
-
-
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
@@ -267,7 +220,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
StandardFrameConstants::kConstantPoolOffset - kPointerSize));
// Pop return address, frame and constant pool pointer (if
- // FLAG_enable_ool_constant_pool).
+ // FLAG_enable_embedded_constant_pool).
__ LeaveFrame(StackFrame::INTERNAL);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@@ -289,6 +242,7 @@ const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 9359768e07..a9bcea9726 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -353,11 +353,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- DCHECK(FLAG_enable_ool_constant_pool);
+ DCHECK(FLAG_enable_embedded_constant_pool);
SetFrameSlot(offset, value);
}
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 7da2e6010a..1c1516d168 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1904,8 +1904,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
}
-} } // namespace v8::internal
-
+} // namespace internal
+} // namespace v8
//------------------------------------------------------------------------------
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index fde4a17749..3f3c4f04c2 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -21,7 +21,7 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
- DCHECK(FLAG_enable_ool_constant_pool);
+ DCHECK(FLAG_enable_embedded_constant_pool);
return pp;
}
@@ -29,18 +29,12 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- DCHECK(FLAG_enable_ool_constant_pool);
+ DCHECK(FLAG_enable_embedded_constant_pool);
return pp;
}
-Object*& ExitFrame::constant_pool_slot() const {
- DCHECK(FLAG_enable_ool_constant_pool);
- const int offset = ExitFrameConstants::kConstantPoolOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 3720a2bde0..db6a9e52e0 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -66,11 +66,23 @@ const int kNumDoubleCalleeSaved = 8;
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;
+// The embedded constant pool pointer (r8/pp) is not included in the safepoint
+// since it is not tagged. This register is preserved in the stack frame where
+// its value will be updated if GC code movement occurs. Including it in the
+// safepoint (where it will not be relocated) would cause a stale value to be
+// restored.
+const RegList kConstantPointerRegMask =
+ FLAG_enable_embedded_constant_pool ? (1 << 8) : 0;
+const int kNumConstantPoolPointerReg =
+ FLAG_enable_embedded_constant_pool ? 1 : 0;
+
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+const RegList kSafepointSavedRegisters =
+ kJSCallerSaved | (kCalleeSaved & ~kConstantPointerRegMask);
+const int kNumSafepointSavedRegisters =
+ kNumJSCallerSaved + kNumCalleeSaved - kNumConstantPoolPointerReg;
// ----------------------------------------------------
@@ -84,11 +96,11 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- static const int kFrameSize = FLAG_enable_ool_constant_pool ?
- 3 * kPointerSize : 2 * kPointerSize;
+ static const int kFrameSize =
+ FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
- static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
- -3 * kPointerSize : 0;
+ static const int kConstantPoolOffset =
+ FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
@@ -116,36 +128,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kImplicitReceiverOffset = -6 * kPointerSize;
- static const int kConstructorOffset = -5 * kPointerSize;
- static const int kLengthOffset = -4 * kPointerSize;
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 6f43d17b5c..e4b7cf34ee 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -96,7 +96,7 @@ class JumpPatchSite BASE_EMBEDDED {
// The live registers are:
// o r1: the JS function object being called (i.e., ourselves)
// o cp: our context
-// o pp: our caller's constant pool pointer (if FLAG_enable_ool_constant_pool)
+// o pp: our caller's constant pool pointer (if enabled)
// o fp: our caller's frame pointer
// o sp: stack pointer
// o lr: return address
@@ -105,10 +105,6 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-arm.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
- handler_table_ =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForRange(function()->handler_count()), TENURED));
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -127,7 +123,7 @@ void FullCodeGenerator::Generate() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis()) {
+ info->MayUseThis() && info->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@@ -191,17 +187,17 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info->scope()->num_heap_slots() > 0) {
// Argument to NewContext is the function, which is still in r1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ push(r1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -216,8 +212,9 @@ void FullCodeGenerator::Generate() {
__ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -241,10 +238,41 @@ void FullCodeGenerator::Generate() {
}
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register) {
+ __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep is marked as such.
+ }
+ SetVar(this_function_var, r1, r0, r2);
+ }
+
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
+ __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+ __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+ Label non_construct_frame, done;
+
+ __ b(ne, &non_construct_frame);
+ __ ldr(r0,
+ MemOperand(r2, ConstructFrameConstants::kOriginalConstructorOffset));
+ __ b(&done);
+
+ __ bind(&non_construct_frame);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+
+ SetVar(new_target_var, r0, r2, r3);
+ }
// Possibly allocate RestParameters
int rest_index;
@@ -254,15 +282,12 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
- --num_parameters;
- ++rest_index;
- }
__ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r2, Operand(Smi::FromInt(num_parameters)));
__ mov(r1, Operand(Smi::FromInt(rest_index)));
- __ Push(r3, r2, r1);
+ __ mov(r0, Operand(Smi::FromInt(language_mode())));
+ __ Push(r3, r2, r1, r0);
RestParamAccessStub stub(isolate());
__ CallStub(&stub);
@@ -283,8 +308,8 @@ void FullCodeGenerator::Generate() {
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ add(r2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
+
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r1, Operand(Smi::FromInt(num_parameters)));
__ Push(r3, r2, r1);
@@ -300,12 +325,13 @@ void FullCodeGenerator::Generate() {
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type, has_new_target);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, r0, r1, r2);
}
+
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
@@ -325,7 +351,7 @@ void FullCodeGenerator::Generate() {
VariableDeclaration* function = scope()->function();
DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
@@ -481,11 +507,8 @@ void FullCodeGenerator::EmitReturnSequence() {
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
int32_t arg_count = info_->scope()->num_parameters() + 1;
- if (IsSubclassConstructor(info_->function()->kind())) {
- arg_count++;
- }
int32_t sp_delta = arg_count * kPointerSize;
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ SetReturnPosition(function());
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
@@ -852,7 +875,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
@@ -860,8 +884,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
zone());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -869,7 +893,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
@@ -880,7 +904,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
@@ -911,25 +935,26 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ str(result_register(), StackOperand(variable));
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
@@ -948,7 +973,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ mov(r2, Operand(variable->name()));
__ mov(r1, Operand(Smi::FromInt(NONE)));
@@ -966,20 +991,21 @@ void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
// TODO(rossberg)
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::LOOKUP:
UNREACHABLE();
}
}
@@ -1057,9 +1083,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ SetExpressionPosition(clause);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
+ strength(language_mode())).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1105,8 +1131,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt, SKIP_BREAK);
+
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- SetStatementPosition(stmt);
Label loop, exit;
ForIn loop_statement(this, stmt);
@@ -1114,7 +1141,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionPosition(stmt->enumerable());
+ SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
@@ -1218,7 +1245,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- SetExpressionPosition(stmt->each());
+ SetExpressionAsStatementPosition(stmt->each());
// Load the current count to r0, load the length to r1.
__ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
@@ -1252,9 +1279,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ push(r1); // Enumerable.
__ push(r3); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kForInFilter, 2);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ mov(r3, Operand(r0), SetCC);
+ __ mov(r3, Operand(r0));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, ip);
__ b(eq, loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
@@ -1263,7 +1292,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), r3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each());
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
@@ -1324,39 +1353,16 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
-void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
- Comment cnmt(masm_, "[ SuperReference ");
-
- __ ldr(LoadDescriptor::ReceiverRegister(),
- MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
- __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
-
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
- }
-
- __ cmp(r0, Operand(isolate()->factory()->undefined_value()));
- Label done;
- __ b(ne, &done);
- __ CallRuntime(Runtime::kThrowNonMethodError, 0);
- __ bind(&done);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset) {
+ int offset,
+ FeedbackVectorICSlot slot) {
if (NeedsHomeObject(initializer)) {
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
}
}
@@ -1410,17 +1416,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
-
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
- ? NOT_CONTEXTUAL
- : CONTEXTUAL;
- CallLoadIC(mode);
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_state);
}
@@ -1489,30 +1487,43 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ // Inside typeof use a regular load, not a contextual load, to avoid
+ // a reference error.
+ CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
// Record position before possible IC call.
- SetSourcePosition(proxy->position());
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- CallGlobalLoadIC(var->name());
+ EmitGlobalVariableLoad(proxy, typeof_state);
context()->Plug(r0);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1581,16 +1592,20 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
__ bind(&slow);
__ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name.
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Runtime::FunctionId function_id =
+ typeof_state == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ __ CallRuntime(function_id, 2);
__ bind(&done);
context()->Plug(r0);
}
@@ -1661,7 +1676,6 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
@@ -1682,13 +1696,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// result_saved is false the result is in r0.
bool result_saved = false;
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
AccessorTable accessor_table(zone());
int property_index = 0;
+ // store_slot_index points to the vector IC slot for the next store IC used.
+ // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
+ // and must be updated if the number of store ICs emitted here changes.
+ int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1715,7 +1728,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(r0));
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- CallStoreIC(key->LiteralFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ CallStoreIC();
+ } else {
+ CallStoreIC(key->LiteralFeedbackId());
+ }
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1723,6 +1741,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(), MemOperand(sp));
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ }
CallStoreIC();
}
} else {
@@ -1736,7 +1757,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
__ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes
__ push(r0);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1775,9 +1797,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(it->second->getter, 2);
+ EmitSetHomeObjectIfNeeded(
+ it->second->getter, 2,
+ expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(it->second->setter, 3);
+ EmitSetHomeObjectIfNeeded(
+ it->second->setter, 3,
+ expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1812,7 +1838,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1858,6 +1885,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(r0);
}
+
+ // Verify that compilation exactly consumed the number of store ic slots that
+ // the ObjectLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1899,8 +1930,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ if (subexpr->IsSpread()) break;
+
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1913,7 +1947,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
__ str(result_register(), FieldMemOperand(r1, offset));
@@ -1922,12 +1956,37 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
- __ mov(r3, Operand(Smi::FromInt(i)));
+ __ mov(r3, Operand(Smi::FromInt(array_index)));
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ __ pop(); // literal index
+ __ Pop(r0);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ __ Push(r0);
+ if (subexpr->IsSpread()) {
+ VisitForStackValue(subexpr->AsSpread()->expression());
+ __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ } else {
+ VisitForStackValue(subexpr);
+ __ CallRuntime(Runtime::kAppendElement, 2);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
@@ -1943,9 +2002,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = GetAssignType(property);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1962,8 +2022,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case NAMED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch = r1;
@@ -1973,9 +2035,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
__ Push(result_register());
if (expr->is_compound()) {
@@ -2033,7 +2096,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(r0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
@@ -2050,14 +2112,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
}
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
+ expr->op(), expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
break;
@@ -2081,6 +2142,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
@@ -2163,7 +2226,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(r0); // result
- EnterTryBlock(expr->index(), &l_catch);
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(r0); // result
__ jmp(&l_suspend);
@@ -2173,7 +2237,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
const int generator_object_depth = kPointerSize + try_block_size;
__ ldr(r0, MemOperand(sp, generator_object_depth));
__ push(r0); // g
- __ Push(Smi::FromInt(expr->index())); // handler-index
+ __ Push(Smi::FromInt(handler_index)); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
__ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
@@ -2186,7 +2250,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(r0); // result
EmitReturnSequence();
__ bind(&l_resume); // received in r0
- ExitTryBlock(expr->index());
+ ExitTryBlock(handler_index);
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2199,11 +2263,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ ldr(load_receiver, MemOperand(sp, kPointerSize));
__ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- }
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
@@ -2219,10 +2281,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2232,10 +2292,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // r0=result.value
context()->DropAndPlug(2, r0); // drop iter and g
break;
@@ -2284,7 +2342,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ bind(&resume_frame);
// lr = return address.
// fp = caller's frame pointer.
- // pp = caller's constant pool (if FLAG_enable_ool_constant_pool),
+ // pp = caller's constant pool (if FLAG_enable_embedded_constant_pool),
// cp = callee's context,
// r4 = callee's JS function.
__ PushFixedFrame(r4);
@@ -2305,10 +2363,9 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
// Load the new code object's constant pool pointer.
- __ ldr(pp,
- MemOperand(r3, Code::kConstantPoolOffset - Code::kHeaderSize));
+ __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
}
__ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
@@ -2382,51 +2439,44 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!prop->IsSuperAccess());
__ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL, language_mode());
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ Push(key->value());
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
- } else {
- CallIC(ic, prop->PropertyFeedbackId());
- }
+ SetExpressionPosition(prop);
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallIC(ic);
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
- SetSourcePosition(prop->position());
-
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ SetExpressionPosition(prop);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
}
@@ -2451,8 +2501,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2524,7 +2574,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
+ int* used_store_slots) {
// Constructor is in r0.
DCHECK(lit != NULL);
__ push(r0);
@@ -2558,7 +2609,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(value, 2,
+ lit->SlotForHomeObject(value, used_store_slots));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -2596,8 +2648,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(r1);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2605,17 +2657,18 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorICSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
Property* prop = expr->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
break;
}
case NAMED_PROPERTY: {
@@ -2625,13 +2678,15 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ Push(r0);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
// stack: value, this; r0: home_object
Register scratch = r2;
Register scratch2 = r3;
@@ -2646,9 +2701,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
case KEYED_SUPER_PROPERTY: {
__ Push(r0);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
Register scratch = r2;
Register scratch2 = r3;
@@ -2671,6 +2726,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Move(StoreDescriptor::NameRegister(), r0);
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2694,11 +2750,13 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
- if (var->IsUnallocated()) {
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorICSlot slot) {
+ if (var->IsUnallocatedOrGlobalSlot()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
} else if (var->mode() == LET && op != Token::INIT_LET) {
@@ -2790,12 +2848,15 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2834,15 +2895,17 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r0));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2851,6 +2914,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
Expression* key = expr->key();
if (key->IsPropertyName()) {
@@ -2859,9 +2924,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Move(LoadDescriptor::ReceiverRegister(), r0);
EmitNamedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
EmitNamedSuperPropertyLoad(expr);
}
} else {
@@ -2872,9 +2937,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
@@ -2933,21 +2998,21 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
+ SetExpressionPosition(prop);
- SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
const Register scratch = r1;
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ Push(r0);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ Push(r0);
__ Push(r0);
__ ldr(scratch, MemOperand(sp, kPointerSize * 2));
__ Push(scratch);
__ Push(key->value());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2955,7 +3020,8 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -2997,18 +3063,18 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
// Load the function from the receiver.
const Register scratch = r1;
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ Push(r0);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ Push(r0);
__ Push(r0);
__ ldr(scratch, MemOperand(sp, kPointerSize * 2));
__ Push(scratch);
VisitForStackValue(prop->key());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -3016,7 +3082,8 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -3032,14 +3099,11 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
}
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -3055,19 +3119,15 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // r5: copy of the first argument or undefined if it doesn't exist.
+ // r4: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ ldr(r5, MemOperand(sp, arg_count * kPointerSize));
+ __ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
}
- // r4: the receiver of the enclosing function.
- __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
// r3: the receiver of the enclosing function.
- int receiver_offset = 2 + info_->scope()->num_parameters();
- __ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// r2: language mode.
__ mov(r2, Operand(Smi::FromInt(language_mode())));
@@ -3076,21 +3136,13 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ Push(r5);
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::EmitLoadSuperConstructor() {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(r0);
- __ CallRuntime(Runtime::kGetPrototype, 1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperReference* super_ref) {
+ SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
Variable* this_var = super_ref->this_var()->var();
GetVar(r1, this_var);
__ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
@@ -3101,7 +3153,51 @@ void FullCodeGenerator::EmitInitializeThisAfterSuper(
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
- EmitVariableAssignment(this_var, Token::INIT_CONST);
+ EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
+}
+
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in r0)
+ // and the object holding it (returned in edx).
+ DCHECK(!context_register().is(r2));
+ __ mov(r2, Operand(callee->name()));
+ __ Push(context_register(), r2);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ Push(r0, r1); // Function, receiver.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ b(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(r0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ push(r1);
+ __ bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ push(r2); // Reserved receiver slot.
+ }
}
@@ -3117,39 +3213,32 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
+ // In a call to eval, we first call
+ // RuntimeHidden_asResolvePossiblyDirectEval to resolve the function we need
+ // to call. Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2); // Reserved receiver slot.
+ PushCalleeAndWithBaseObject(expr);
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(r1);
+ EmitResolvePossiblyDirectEval(arg_count);
- // The runtime call returns a pair of values in r0 (function) and
- // r1 (receiver). Touch up the stack with the right values.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+ // Touch up the stack with the resolved function.
+ __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3162,43 +3251,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
- VariableProxy* proxy = callee->AsVariableProxy();
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
- }
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in r0)
- // and the object holding it (returned in edx).
- DCHECK(!context_register().is(r2));
- __ mov(r2, Operand(proxy->name()));
- __ Push(context_register(), r2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ Push(r0, r1); // Function, receiver.
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ b(&call);
- __ bind(&done);
- // Push function.
- __ push(r0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ push(r1);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found
- // by LoadContextSlot.
+ PushCalleeAndWithBaseObject(expr);
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
@@ -3210,10 +3263,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
- {
- PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
- }
if (is_named_call) {
EmitCallWithLoadIC(expr);
} else {
@@ -3225,9 +3275,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
+ VisitForStackValue(callee);
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ push(r1);
// Emit function call.
@@ -3250,7 +3298,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- DCHECK(!expr->expression()->IsSuperReference());
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
@@ -3262,7 +3310,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into r1 and r0.
__ mov(r0, Operand(arg_count));
@@ -3286,11 +3334,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor();
+ VariableProxy* new_target_proxy = super_call_ref->new_target_var();
+ VisitForStackValue(new_target_proxy);
+
+ EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
// Push the arguments ("left-to-right") on the stack.
@@ -3302,7 +3353,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into r1 and r0.
__ mov(r0, Operand(arg_count));
@@ -3328,7 +3379,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
+ EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(r0);
}
@@ -3613,6 +3664,28 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r0, if_false);
+ __ CompareObjectType(r0, r1, r1, JS_TYPED_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3854,6 +3927,28 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = nullptr;
+ Label* if_false = nullptr;
+ Label* fall_through = nullptr;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r0, if_false);
+ __ CompareObjectType(r0, r1, r1, JS_DATE_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3862,20 +3957,15 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done, not_date_object;
Register object = r0;
Register result = r0;
Register scratch0 = r9;
Register scratch1 = r1;
- __ JumpIfSmi(object, &not_date_object);
- __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
- __ b(ne, &not_date_object);
-
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch1, Operand(stamp));
@@ -3891,13 +3981,10 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ PrepareCallCFunction(2, scratch1);
__ mov(r1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
+ __ bind(&done);
}
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(r0);
+ context()->Plug(result);
}
@@ -4189,11 +4276,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+
+ // new.target
+ VisitForStackValue(args->at(0));
- EmitLoadSuperConstructor();
+ // .this_function
+ VisitForStackValue(args->at(1));
+ __ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
// Check if the calling frame is an arguments adaptor frame.
@@ -4211,9 +4302,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(r1, r1);
-
- // Subtract 1 from arguments count, for new.target.
- __ sub(r1, r1, Operand(1));
__ mov(r0, r1);
// Get arguments pointer in r2.
@@ -4586,11 +4674,14 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr === CallRuntime("ReflectConstruct")
+ DCHECK_EQ(1, expr->arguments()->length());
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
+
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
- SuperReference* super_reference = args->at(0)->AsSuperReference();
+ SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
@@ -4600,8 +4691,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ push(ip);
__ str(r0, MemOperand(sp, kPointerSize));
- // Push super
- EmitLoadSuperConstructor();
+ // Push super constructor
+ EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
// Push arguments array
@@ -4617,7 +4708,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
- EmitInitializeThisAfterSuper(super_reference);
+ // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
+ EmitInitializeThisAfterSuper(super_call_ref);
}
@@ -4630,13 +4722,9 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Load the function from the receiver.
__ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
}
@@ -4644,8 +4732,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4670,6 +4757,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
@@ -4695,6 +4783,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime function.
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
context()->Plug(r0);
}
@@ -4719,10 +4808,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
context()->Plug(r0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- DCHECK(is_sloppy(language_mode()) || var->is_this());
- if (var->IsUnallocated()) {
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(SLOPPY)));
@@ -4732,7 +4822,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- context()->Plug(var->is_this());
+ context()->Plug(is_this);
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
@@ -4820,10 +4910,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4846,8 +4935,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
const Register scratch = r1;
__ ldr(scratch, MemOperand(sp, kPointerSize));
@@ -4858,9 +4948,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
__ Push(result_register());
const Register scratch = r1;
@@ -4938,9 +5028,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call);
__ bind(&slow);
}
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ if (!is_strong(language_mode())) {
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4973,22 +5065,24 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(count_value)));
- // Record position before stub call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), Token::ADD, language_mode()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
+ strength(language_mode())).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
+ if (is_strong(language_mode())) {
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Store the value returned in r0.
switch (assign_type) {
case VARIABLE:
if (expr->is_postfix()) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(r0);
}
@@ -4999,7 +5093,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
}
@@ -5008,7 +5102,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -5046,7 +5145,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -5061,46 +5165,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- DCHECK(!context()->IsEffect());
- DCHECK(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallLoadIC(NOT_CONTEXTUAL);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(r0);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ mov(r0, Operand(proxy->name()));
- __ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(r0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Expression* sub_expr,
Handle<String> check) {
@@ -5179,7 +5243,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -5233,9 +5297,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic = CodeFactory::CompareIC(
+ isolate(), op, strength(language_mode())).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -5348,6 +5411,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ mov(ip, Operand(pending_message_obj));
__ ldr(r1, MemOperand(ip));
__ push(r1);
+
+ ClearPendingMessage();
}
@@ -5370,12 +5435,29 @@ void FullCodeGenerator::ExitFinallyBlock() {
}
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(r1));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+ __ mov(ip, Operand(pending_message_obj));
+ __ str(r1, MemOperand(ip));
+}
+
+
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+ DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(slot)));
+}
+
+
#undef __
static Address GetInterruptImmediateLoadAddress(Address pc) {
Address load_address = pc - 2 * Assembler::kInstrSize;
- if (!FLAG_enable_ool_constant_pool) {
+ if (!FLAG_enable_embedded_constant_pool) {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
} else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
// This is an extended constant pool lookup.
@@ -5512,6 +5594,7 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 11613d1402..67f65f5cb3 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return r1; }
const Register LoadDescriptor::NameRegister() { return r2; }
+const Register LoadDescriptor::SlotRegister() { return r0; }
-const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r0; }
-
-
-const Register VectorLoadICDescriptor::VectorRegister() { return r3; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
const Register StoreDescriptor::ReceiverRegister() { return r1; }
@@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r4; }
+
+
+const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
@@ -56,109 +60,101 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-const Register GrowArrayElementsDescriptor::CapacityRegister() { return r2; }
-void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r2};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r1};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewContextDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+void NumberToStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowArrayDescriptor::Initialize(
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r3, r2, r1};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r3, r2, r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowObjectDescriptor::Initialize(
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r3, r2, r1, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r3, r2, r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateAllocationSiteDescriptor::Initialize(
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r2, r3};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r2, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r2, r3, r1};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r2, r3, r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreArrayLiteralElementDescriptor::Initialize(
+void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r3, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r3, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r1};
- data->Initialize(arraysize(registers), registers, NULL);
+void CallFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackDescriptor::Initialize(
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r1, r3};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r1, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r1, r3, r2};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r1, r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
@@ -166,234 +162,206 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// vector (Smi)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {cp, r0, r1, r2};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r0, r1, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void RegExpConstructResultDescriptor::Initialize(
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r2, r1, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r2, r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void TransitionElementsKindDescriptor::Initialize(
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r0, r1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r0, r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::Initialize(
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // register state
- // cp -- context
- Register registers[] = {cp};
- data->Initialize(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-void ArrayConstructorConstantArgCountDescriptor::Initialize(
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
// r0 -- number of arguments
// r1 -- function
// r2 -- allocation site with elements kind
- Register registers[] = {cp, r1, r2};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r1, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, r1, r2, r0};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(),
- Representation::Tagged(), Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r1, r2, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
- CallInterfaceDescriptorData* data) {
+void InternalArrayConstructorConstantArgCountDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
// r0 -- number of arguments
// r1 -- constructor function
- Register registers[] = {cp, r1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorDescriptor::Initialize(
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, r1, r0};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r1, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareNilDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToBooleanDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r1, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOpWithAllocationSiteDescriptor::Initialize(
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r2, r1, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r2, r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r1, r0};
- data->Initialize(arraysize(registers), registers, NULL);
+void StringAddDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void KeyedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
r2, // key
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- data->Initialize(arraysize(registers), registers, representations,
- &noInlineDescriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &noInlineDescriptor);
}
-void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void NamedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
r2, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- data->Initialize(arraysize(registers), registers, representations,
- &noInlineDescriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &noInlineDescriptor);
}
-void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallHandlerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
r0, // receiver
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- data->Initialize(arraysize(registers), registers, representations,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &default_descriptor);
}
-void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
r1, // JSFunction
r0, // actual number of arguments
r2, // expected number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &default_descriptor);
}
-void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
r0, // callee
r4, // call_data
r2, // holder
r1, // api_function_address
r3, // actual number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Integer32(), // actual number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &default_descriptor);
}
-void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiAccessorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
r0, // callee
r4, // call_data
r2, // holder
r1, // api_function_address
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- data->Initialize(arraysize(registers), registers, representations,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &default_descriptor);
}
+
+
+void MathRoundVariantDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r1, // math rounding function
+ r3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index ccd962c23f..55e501762c 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -1092,10 +1092,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
ops.Add(target, zone());
- for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op =
- UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), cp);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@@ -1105,20 +1113,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
-LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
- HTailCallThroughMegamorphicCache* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* receiver_register =
- UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
- LOperand* name_register =
- UseFixed(instr->name(), LoadDescriptor::NameRegister());
-
- // Not marked as call. It can't deoptimize, and it never returns.
- return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
-}
-
-
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
@@ -1869,7 +1863,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result =
new(zone()) LDateField(object, FixedTemp(r1), instr->index());
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
+ return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -2148,7 +2142,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@@ -2197,7 +2191,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2271,7 +2265,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2336,8 +2330,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- return MarkAsCall(
- new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result =
+ new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+ return MarkAsCall(result, instr);
}
@@ -2369,6 +2371,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = Use(instr->object());
+ LOperand* elements = Use(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, r0);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2407,8 +2424,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
- LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
+ LStoreNamedGeneric* result =
+ new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2485,7 +2509,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@@ -2602,7 +2626,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
+ chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@@ -2671,4 +2695,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
return MarkAsCall(DefineFixed(result, cp), instr);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index e0d4293ee3..d61c3d4c0d 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -117,6 +117,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@@ -153,7 +154,6 @@ class LCodeGen;
V(SubI) \
V(RSubI) \
V(TaggedToI) \
- V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -474,26 +474,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
};
-class LTailCallThroughMegamorphicCache final
- : public LTemplateInstruction<0, 3, 0> {
- public:
- LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
- LOperand* name) {
- inputs_[0] = context;
- inputs_[1] = receiver;
- inputs_[2] = name;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* name() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
- "tail-call-through-megamorphic-cache")
- DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -1196,6 +1176,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ Strength strength() { return hydrogen()->strength(); }
+
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1567,7 +1549,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
+ Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@@ -1865,8 +1847,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
inputs_.AddAll(operands, zone);
}
@@ -1876,6 +1862,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@@ -2188,17 +2178,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2247,22 +2242,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* obj,
- LOperand* key,
- LOperand* value) {
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = obj;
+ inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2318,6 +2315,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
};
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index e91fddfdd3..747730b3f5 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -113,7 +113,7 @@ bool LCodeGen::GeneratePrologue() {
// r1: Callee's JS function.
// cp: Callee's context.
- // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
+ // pp: Callee's constant pool pointer (if enabled)
// fp: Caller's frame pointer.
// lr: Caller's pc.
@@ -121,7 +121,7 @@ bool LCodeGen::GeneratePrologue() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info_->is_native()) {
+ !info_->is_native() && info_->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@@ -197,8 +197,9 @@ bool LCodeGen::GeneratePrologue() {
__ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -595,52 +596,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- bool has_closure_id = !info()->closure().is_null() &&
- !info()->closure().is_identical_to(environment->closure());
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- }
+ WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
+ AddToTranslation(
+ environment, translation, value, environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@@ -960,28 +926,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (auto function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
}
-
inlined_function_count_ = deoptimization_literals_.length();
}
@@ -1016,10 +965,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
- // Register pp always contains a pointer to the constant pool.
- safepoint.DefinePointerRegister(pp, zone());
- }
}
@@ -1936,20 +1881,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
- Label runtime, done;
DCHECK(object.is(result));
DCHECK(object.is(r0));
DCHECK(!scratch.is(scratch0()));
DCHECK(!scratch.is(object));
- __ SmiTst(object);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
- __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
-
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand(stamp));
@@ -2174,8 +2114,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r0));
DCHECK(ToRegister(instr->result()).is(r0));
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), instr->op(), instr->language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
@@ -2611,7 +2551,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
@@ -2885,37 +2826,41 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
int additional_delta = (call_size / Assembler::kInstrSize) + 4;
- // Make sure that code size is predicable, since we use specific constants
- // offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(
- masm_, (additional_delta + 1) * Assembler::kInstrSize);
- // Make sure we don't emit any additional entries in the constant pool before
- // the call to ensure that the CallCodeSize() calculated the correct number of
- // instructions for the constant pool load.
{
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- int map_check_delta =
- masm_->InstructionsGeneratedSince(map_check) + additional_delta;
- int bool_load_delta =
- masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- __ BlockConstPoolFor(additional_delta);
- // r5 is used to communicate the offset to the location of the map check.
- __ mov(r5, Operand(map_check_delta * kPointerSize));
- // r6 is used to communicate the offset to the location of the bool load.
- __ mov(r6, Operand(bool_load_delta * kPointerSize));
- // The mov above can generate one or two instructions. The delta was
- // computed for two instructions, so we need to pad here in case of one
- // instruction.
- while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
- __ nop();
+ // Make sure that code size is predicable, since we use specific constants
+ // offsets in the code to find embedded values..
+ PredictableCodeSizeScope predictable(
+ masm_, additional_delta * Assembler::kInstrSize);
+ // The labels must be already bound since the code has predictabel size up
+ // to the call instruction.
+ DCHECK(map_check->is_bound());
+ DCHECK(bool_load->is_bound());
+ // Make sure we don't emit any additional entries in the constant pool
+ // before the call to ensure that the CallCodeSize() calculated the
+ // correct number of instructions for the constant pool load.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ int map_check_delta =
+ masm_->InstructionsGeneratedSince(map_check) + additional_delta;
+ int bool_load_delta =
+ masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ __ BlockConstPoolFor(additional_delta);
+ // r5 is used to communicate the offset to the location of the map check.
+ __ mov(r5, Operand(map_check_delta * kPointerSize));
+ // r6 is used to communicate the offset to the location of the bool load.
+ __ mov(r6, Operand(bool_load_delta * kPointerSize));
+ // The mov above can generate one or two instructions. The delta was
+ // computed for two instructions, so we need to pad here in case of one
+ // instruction.
+ while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
+ __ nop();
+ }
}
+ CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
}
- CallCodeGeneric(stub.GetCode(),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value (r0) into the result register slot and
@@ -2928,7 +2873,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
@@ -2986,10 +2932,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = VectorLoadICDescriptor::SlotRegister();
- DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ Register slot_register = LoadDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(r0));
AllowDeferredHandleDereference vector_structure_check;
@@ -3002,6 +2947,20 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ mov(slot_register, Operand(Smi::FromInt(index)));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object())
@@ -3009,11 +2968,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3108,12 +3065,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- }
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL,
- instr->hydrogen()->initialization_state()).code();
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3269,7 +3225,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3420,9 +3377,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3961,29 +3918,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
-void LCodeGen::DoTailCallThroughMegamorphicCache(
- LTailCallThroughMegamorphicCache* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register name = ToRegister(instr->name());
- DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(name.is(LoadDescriptor::NameRegister()));
- DCHECK(receiver.is(r1));
- DCHECK(name.is(r2));
- Register scratch = r4;
- Register extra = r5;
- Register extra2 = r6;
- Register extra3 = r9;
-
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(
- masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Tail call to miss if we ended up here.
- LoadIC::GenerateMiss(masm());
-}
-
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
@@ -4274,10 +4208,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- StoreIC::initialize_stub(isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state());
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4385,7 +4323,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4498,6 +4437,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@@ -4505,6 +4448,100 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = r0;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ jmp(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ cmp(ToRegister(current_capacity), Operand(constant_key));
+ __ b(le, deferred->entry());
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ cmp(ToRegister(key), Operand(constant_capacity));
+ __ b(ge, deferred->entry());
+ } else {
+ __ cmp(ToRegister(key), ToRegister(current_capacity));
+ __ b(ge, deferred->entry());
+ }
+
+ if (instr->elements()->IsRegister()) {
+ __ Move(result, ToRegister(instr->elements()));
+ } else {
+ __ ldr(result, ToMemOperand(instr->elements()));
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = r0;
+ __ mov(result, Operand::Zero());
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ if (instr->object()->IsRegister()) {
+ __ Move(result, ToRegister(instr->object()));
+ } else {
+ __ ldr(result, ToMemOperand(instr->object()));
+ }
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
+ } else {
+ __ Move(r3, ToRegister(key));
+ __ SmiTag(r3);
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ __ SmiTst(result);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+}
+
+
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Register scratch = scratch0();
@@ -5957,4 +5994,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 40ab3e83e8..176097f5d9 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -27,7 +27,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@@ -112,6 +111,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -241,7 +241,6 @@ class LCodeGen: public LCodeGenBase {
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -324,10 +323,11 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index 2fceec9d21..f8e4a7f680 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -299,4 +299,5 @@ void LGapResolver::EmitMove(int index) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 12108a0040..61e484bd85 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -691,28 +691,28 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
void MacroAssembler::PushFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
- stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
- cp.bit() |
- (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
- fp.bit() |
- lr.bit());
+ stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
+ (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
}
void MacroAssembler::PopFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
- ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
- cp.bit() |
- (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
- fp.bit() |
- lr.bit());
+ ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
+ (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+ fp.bit() | lr.bit());
}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of contiguous register values starting with r0:
- DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
+ // Safepoints expect a block of contiguous register values starting with r0.
+ // except when FLAG_enable_embedded_constant_pool, which omits pp.
+ DCHECK(kSafepointSavedRegisters ==
+ (FLAG_enable_embedded_constant_pool
+ ? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
+ : (1 << kNumSafepointSavedRegisters) - 1));
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
@@ -742,6 +742,10 @@ void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
+ if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
+ // RegList omits pp.
+ reg_code -= 1;
+ }
DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return reg_code;
}
@@ -985,13 +989,20 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
+void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
+ Register code_target_address) {
+ DCHECK(FLAG_enable_embedded_constant_pool);
+ ldr(pp, MemOperand(code_target_address,
+ Code::kConstantPoolOffset - Code::kHeaderSize));
+ add(pp, pp, code_target_address);
+}
+
+
void MacroAssembler::LoadConstantPoolPointerRegister() {
- if (FLAG_enable_ool_constant_pool) {
- int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
- pc_offset() - Instruction::kPCReadOffset;
- DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
- ldr(pp, MemOperand(pc, constant_pool_offset));
- }
+ DCHECK(FLAG_enable_embedded_constant_pool);
+ int entry_offset = pc_offset() + Instruction::kPCReadOffset;
+ sub(ip, pc, Operand(entry_offset));
+ LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
}
@@ -1000,9 +1011,9 @@ void MacroAssembler::StubPrologue() {
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
- set_ool_constant_pool_available(true);
+ set_constant_pool_available(true);
}
}
@@ -1025,9 +1036,9 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
- set_ool_constant_pool_available(true);
+ set_constant_pool_available(true);
}
}
@@ -1036,7 +1047,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
PushFixedFrame();
- if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
+ if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
}
mov(ip, Operand(Smi::FromInt(type)));
@@ -1056,9 +1067,9 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type) {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer
- // (if FLAG_enable_ool_constant_pool).
+ // (if FLAG_enable_embedded_constant_pool).
int frame_ends;
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
frame_ends = pc_offset();
ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
@@ -1084,7 +1095,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
}
mov(ip, Operand(CodeObject()));
@@ -1103,7 +1114,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// fp - ExitFrameConstants::kFrameSize -
// DwVfpRegister::kMaxNumRegisters * kDoubleSize,
// since the sp slot, code slot and constant pool slot (if
- // FLAG_enable_ool_constant_pool) were pushed after the fp.
+ // FLAG_enable_embedded_constant_pool) were pushed after the fp.
}
// Reserve place for the return address and stack space and align the frame
@@ -1183,7 +1194,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
#endif
// Tear down the exit frame, pop the arguments, and return.
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
}
mov(sp, Operand(fp));
@@ -1559,6 +1570,7 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
add(t0, t0, scratch);
// hash = hash ^ (hash >> 16);
eor(t0, t0, Operand(t0, LSR, 16));
+ bic(t0, t0, Operand(0xc0000000u));
}
@@ -3162,7 +3174,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
bind(&entry);
cmp(start_offset, end_offset);
- b(lt, &loop);
+ b(lo, &loop);
}
@@ -3390,7 +3402,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
if (ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
- add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
+ add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
@@ -3401,7 +3413,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Label small_constant_pool_load, load_result;
ldr(result, MemOperand(ldr_location));
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
// Check if this is an extended constant pool load.
and_(scratch, result, Operand(GetConsantPoolLoadMask()));
teq(scratch, Operand(GetConsantPoolLoadPattern()));
@@ -3455,7 +3467,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
bind(&load_result);
// Get the address of the constant.
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
add(result, pp, Operand(result));
} else {
add(result, ldr_location, Operand(result));
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index e6047adc48..7ece4b2fa6 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -437,7 +437,7 @@ class MacroAssembler: public Assembler {
}
// Push a fixed frame, consisting of lr, fp, constant pool (if
- // FLAG_enable_ool_constant_pool), context and JS function / marker id if
+ // FLAG_enable_embedded_constant_pool), context and JS function / marker id if
// marker_reg is a valid register.
void PushFixedFrame(Register marker_reg = no_reg);
void PopFixedFrame(Register marker_reg = no_reg);
@@ -1441,6 +1441,11 @@ class MacroAssembler: public Assembler {
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
+ // Loads the constant pool pointer (pp) register.
+ void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
+ Register code_target_address);
+ void LoadConstantPoolPointerRegister();
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@@ -1482,9 +1487,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
- // Loads the constant pool pointer (pp) register.
- void LoadConstantPoolPointerRegister();
-
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 33f941bbc4..9f4b4af42d 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -1193,6 +1193,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index ca9a0a8947..3a02ee0094 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -774,8 +774,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
-Simulator::~Simulator() {
-}
+Simulator::~Simulator() { free(stack_); }
// When the generated code calls an external reference we need to catch that in
@@ -824,7 +823,7 @@ class Redirection {
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
- addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+ addr_of_swi - offsetof(Redirection, swi_instruction_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
@@ -834,6 +833,14 @@ class Redirection {
return redirection->external_function();
}
+ static void DeleteChain(Redirection* redirection) {
+ while (redirection != nullptr) {
+ Redirection* next = redirection->next_;
+ delete redirection;
+ redirection = next;
+ }
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -842,6 +849,19 @@ class Redirection {
};
+// static
+void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+ Redirection::DeleteChain(first);
+ if (i_cache != nullptr) {
+ for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ entry = i_cache->Next(entry)) {
+ delete static_cast<CachePage*>(entry->value);
+ }
+ delete i_cache;
+ }
+}
+
+
void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
@@ -4131,7 +4151,8 @@ uintptr_t Simulator::PopAddress() {
return address;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // USE_SIMULATOR
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 12b6853e7b..eea43efc53 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -194,6 +194,8 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
+ static void TearDown(HashMap* i_cache, Redirection* first);
+
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 7d8d81e38d..bbd44c5f10 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -586,14 +586,13 @@ Address Assembler::target_pointer_address_at(Address pc) {
// Read/Modify the code target address in the branch/call instruction at pc.
-Address Assembler::target_address_at(Address pc,
- ConstantPoolArray* constant_pool) {
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
return Memory::Address_at(target_pointer_address_at(pc));
}
Address Assembler::target_address_at(Address pc, Code* code) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
@@ -665,8 +664,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
-void Assembler::set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
@@ -685,7 +683,7 @@ void Assembler::set_target_address_at(Address pc,
Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@@ -867,8 +865,8 @@ bool RelocInfo::IsPatchedReturnSequence() {
// See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn().
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
- return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
- i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
+ return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) &&
+ i2->IsBranchAndLinkToRegister() && (i2->Rn() == kIp0Code);
}
@@ -1084,13 +1082,14 @@ Instr Assembler::SF(Register rd) {
}
-Instr Assembler::ImmAddSub(int64_t imm) {
+Instr Assembler::ImmAddSub(int imm) {
DCHECK(IsImmAddSub(imm));
if (is_uint12(imm)) { // No shift required.
- return imm << ImmAddSub_offset;
+ imm <<= ImmAddSub_offset;
} else {
- return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
}
+ return imm;
}
@@ -1239,13 +1238,13 @@ LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
}
-Instr Assembler::ImmMoveWide(uint64_t imm) {
+Instr Assembler::ImmMoveWide(int imm) {
DCHECK(is_uint16(imm));
return imm << ImmMoveWide_offset;
}
-Instr Assembler::ShiftMoveWide(int64_t shift) {
+Instr Assembler::ShiftMoveWide(int shift) {
DCHECK(is_uint2(shift));
return shift << ShiftMoveWide_offset;
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index d072433633..5445fe1a1b 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -580,8 +580,9 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer = reinterpret_cast<byte*>(buffer_);
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
- desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
- reloc_info_writer.pos();
+ desc->reloc_size =
+ static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
+ reloc_info_writer.pos());
desc->origin = this;
}
}
@@ -600,13 +601,13 @@ void Assembler::CheckLabelLinkChain(Label const * label) {
if (label->is_linked()) {
static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
int links_checked = 0;
- int linkoffset = label->pos();
+ int64_t linkoffset = label->pos();
bool end_of_chain = false;
while (!end_of_chain) {
if (++links_checked > kMaxLinksToCheck) break;
Instruction * link = InstructionAt(linkoffset);
- int linkpcoffset = link->ImmPCOffset();
- int prevlinkoffset = linkoffset + linkpcoffset;
+ int64_t linkpcoffset = link->ImmPCOffset();
+ int64_t prevlinkoffset = linkoffset + linkpcoffset;
end_of_chain = (linkoffset == prevlinkoffset);
linkoffset = linkoffset + linkpcoffset;
@@ -645,7 +646,8 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
// currently referring to this label.
label->Unuse();
} else {
- label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
+ label->link_to(
+ static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_));
}
} else if (branch == next_link) {
@@ -721,7 +723,7 @@ void Assembler::bind(Label* label) {
while (label->is_linked()) {
int linkoffset = label->pos();
Instruction* link = InstructionAt(linkoffset);
- int prevlinkoffset = linkoffset + link->ImmPCOffset();
+ int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset());
CheckLabelLinkChain(label);
@@ -811,12 +813,13 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
while (!end_of_chain) {
Instruction * link = InstructionAt(link_offset);
- link_pcoffset = link->ImmPCOffset();
+ link_pcoffset = static_cast<int>(link->ImmPCOffset());
// ADR instructions are not handled by veneers.
if (link->IsImmBranch()) {
- int max_reachable_pc = InstructionOffset(link) +
- Instruction::ImmBranchRange(link->BranchType());
+ int max_reachable_pc =
+ static_cast<int>(InstructionOffset(link) +
+ Instruction::ImmBranchRange(link->BranchType()));
typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
std::pair<unresolved_info_it, unresolved_info_it> range;
range = unresolved_branches_.equal_range(max_reachable_pc);
@@ -888,12 +891,12 @@ bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
// 0: ldr xzr, #<size of pool>
- bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
+ bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode);
// It is still worth asserting the marker is complete.
// 4: blr xzr
DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
- instr->following()->Rn() == xzr.code()));
+ instr->following()->Rn() == kZeroRegCode));
return result;
}
@@ -909,7 +912,7 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
const char* message =
reinterpret_cast<const char*>(
instr->InstructionAtOffset(kDebugMessageOffset));
- int size = kDebugMessageOffset + strlen(message) + 1;
+ int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
return RoundUp(size, kInstructionSize) / kInstructionSize;
}
// Same for printf support, see MacroAssembler::CallPrintf().
@@ -1599,9 +1602,11 @@ void Assembler::LoadStorePair(const CPURegister& rt,
// 'rt' and 'rt2' can only be aliased for stores.
DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
+ DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
+ int offset = static_cast<int>(addr.offset());
Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
- ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
+ ImmLSPair(offset, CalcLSPairDataSize(op));
Instr addrmodeop;
if (addr.IsImmediateOffset()) {
@@ -1645,11 +1650,11 @@ void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
DCHECK(!rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(addr.IsImmediateOffset());
-
LSDataSize size = CalcLSPairDataSize(
static_cast<LoadStorePairOp>(op & LoadStorePairMask));
- Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
- ImmLSPair(addr.offset(), size));
+ DCHECK(IsImmLSPair(addr.offset(), size));
+ int offset = static_cast<int>(addr.offset());
+ Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
}
@@ -2137,13 +2142,13 @@ Instr Assembler::ImmFP64(double imm) {
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = double_to_rawbits(imm);
// bit7: a000.0000
- uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
+ uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
// bit6: 0b00.0000
- uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
+ uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
// bit5_to_0: 00cd.efgh
- uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
+ uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
- return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+ return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset);
}
@@ -2188,8 +2193,8 @@ void Assembler::MoveWide(const Register& rd,
DCHECK(is_uint16(imm));
- Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
- Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
+ ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
}
@@ -2205,7 +2210,7 @@ void Assembler::AddSub(const Register& rd,
DCHECK(IsImmAddSub(immediate));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
- ImmAddSub(immediate) | dest_reg | RnSP(rn));
+ ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
} else if (operand.IsShiftedRegister()) {
DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
DCHECK(operand.shift() != ROR);
@@ -2259,7 +2264,7 @@ void Assembler::brk(int code) {
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
- EmitData(string, len);
+ EmitData(string, static_cast<int>(len));
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
STATIC_ASSERT(sizeof(pad) == kInstructionSize);
@@ -2362,7 +2367,8 @@ void Assembler::ConditionalCompare(const Register& rn,
if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue();
DCHECK(IsImmConditionalCompare(immediate));
- ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
+ ccmpop = ConditionalCompareImmediateFixed | op |
+ ImmCondCmp(static_cast<unsigned>(immediate));
} else {
DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
@@ -2502,15 +2508,16 @@ void Assembler::LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base());
- int64_t offset = addr.offset();
if (addr.IsImmediateOffset()) {
LSDataSize size = CalcLSDataSize(op);
- if (IsImmLSScaled(offset, size)) {
+ if (IsImmLSScaled(addr.offset(), size)) {
+ int offset = static_cast<int>(addr.offset());
// Use the scaled addressing mode.
Emit(LoadStoreUnsignedOffsetFixed | memop |
ImmLSUnsigned(offset >> size));
- } else if (IsImmLSUnscaled(offset)) {
+ } else if (IsImmLSUnscaled(addr.offset())) {
+ int offset = static_cast<int>(addr.offset());
// Use the unscaled addressing mode.
Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
} else {
@@ -2536,7 +2543,8 @@ void Assembler::LoadStore(const CPURegister& rt,
} else {
// Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base()));
- if (IsImmLSUnscaled(offset)) {
+ if (IsImmLSUnscaled(addr.offset())) {
+ int offset = static_cast<int>(addr.offset());
if (addr.IsPreIndex()) {
Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
} else {
@@ -2568,6 +2576,14 @@ bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
}
+bool Assembler::IsImmLLiteral(int64_t offset) {
+ int inst_size = static_cast<int>(kInstructionSizeLog2);
+ bool offset_is_inst_multiple =
+ (((offset >> inst_size) << inst_size) == offset);
+ return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
+}
+
+
// Test if a given value can be encoded in the immediate field of a logical
// instruction.
// If it can be encoded, the function returns true, and values pointed to by n,
@@ -2849,7 +2865,8 @@ void Assembler::GrowBuffer() {
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
- desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
+ desc.reloc_size =
+ static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos());
// Copy the data.
intptr_t pc_delta = desc.buffer - buffer;
@@ -3065,7 +3082,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
}
// Record the veneer pool size.
- int pool_size = SizeOfCodeGeneratedSince(&size_check);
+ int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
if (unresolved_branches_.empty()) {
@@ -3113,7 +3130,8 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
int Assembler::buffer_space() const {
- return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
+ return static_cast<int>(reloc_info_writer.pos() -
+ reinterpret_cast<byte*>(pc_));
}
@@ -3124,20 +3142,6 @@ void Assembler::RecordConstPool(int size) {
}
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return isolate->factory()->empty_constant_pool_array();
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return;
-}
-
-
void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
// The code at the current instruction should be:
// adr rd, 0
@@ -3171,6 +3175,7 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index d672589462..5fab081d4b 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -764,7 +764,7 @@ class ConstPool {
shared_entries_count(0) {}
void RecordEntry(intptr_t data, RelocInfo::Mode mode);
int EntryCount() const {
- return shared_entries_count + unique_entries_.size();
+ return shared_entries_count + static_cast<int>(unique_entries_.size());
}
bool IsEmpty() const {
return shared_entries_.empty() && unique_entries_.empty();
@@ -851,6 +851,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
inline void Unreachable();
@@ -871,13 +874,10 @@ class Assembler : public AssemblerBase {
inline static Address target_pointer_address_at(Address pc);
// Read/Modify the code target address in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc,
- ConstantPoolArray* constant_pool);
- inline static void set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
+ inline static Address target_address_at(Address pc, Address constant_pool);
+ inline static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(Address pc,
Code* code,
@@ -951,7 +951,7 @@ class Assembler : public AssemblerBase {
// Return the number of instructions generated from label to the
// current position.
- int InstructionsGeneratedSince(const Label* label) {
+ uint64_t InstructionsGeneratedSince(const Label* label) {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
@@ -1767,6 +1767,8 @@ class Assembler : public AssemblerBase {
// Required by V8.
void dd(uint32_t data) { dc32(data); }
void db(uint8_t data) { dc8(data); }
+ void dq(uint64_t data) { dc64(data); }
+ void dp(uintptr_t data) { dc64(data); }
// Code generation helpers --------------------------------------------------
@@ -1774,7 +1776,7 @@ class Assembler : public AssemblerBase {
Instruction* pc() const { return Instruction::Cast(pc_); }
- Instruction* InstructionAt(int offset) const {
+ Instruction* InstructionAt(ptrdiff_t offset) const {
return reinterpret_cast<Instruction*>(buffer_ + offset);
}
@@ -1841,7 +1843,7 @@ class Assembler : public AssemblerBase {
// Data Processing encoding.
inline static Instr SF(Register rd);
- inline static Instr ImmAddSub(int64_t imm);
+ inline static Instr ImmAddSub(int imm);
inline static Instr ImmS(unsigned imms, unsigned reg_size);
inline static Instr ImmR(unsigned immr, unsigned reg_size);
inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
@@ -1876,10 +1878,11 @@ class Assembler : public AssemblerBase {
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
+ static bool IsImmLLiteral(int64_t offset);
// Move immediates encoding.
- inline static Instr ImmMoveWide(uint64_t imm);
- inline static Instr ShiftMoveWide(int64_t shift);
+ inline static Instr ImmMoveWide(int imm);
+ inline static Instr ShiftMoveWide(int shift);
// FP Immediates.
static Instr ImmFP32(float imm);
@@ -1908,11 +1911,12 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
- // Allocate a constant pool of the correct size for the generated code.
- Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
-
- // Generate the constant pool for the generated code.
- void PopulateConstantPool(ConstantPoolArray* constant_pool);
+ void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+ }
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 3df8896576..45ac1a063b 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -331,6 +331,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
+ bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -360,11 +361,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register argc = x0;
Register constructor = x1;
Register original_constructor = x3;
- // x1: constructor function
+
+ // Preserve the incoming parameters on the stack.
__ SmiTag(argc);
- __ Push(argc, constructor);
- // sp[0] : Constructor function.
- // sp[1]: number of arguments (smi-tagged)
+ if (use_new_target) {
+ __ Push(argc, constructor, original_constructor);
+ } else {
+ __ Push(argc, constructor);
+ }
+ // sp[0]: new.target (if used)
+ // sp[0/1]: Constructor function.
+ // sp[1/2]: number of arguments (smi-tagged)
Label rt_call, count_incremented, allocated, normal_new;
__ Cmp(constructor, original_constructor);
@@ -522,7 +529,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Add(new_obj, new_obj, kHeapObjectTag);
// Check if a non-empty properties array is needed. Continue with
- // allocated object if not, or fall through to runtime call if it is.
+ // allocated object if not; allocate and initialize a FixedArray if yes.
Register element_count = x3;
__ Ldrb(element_count,
FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
@@ -580,7 +587,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&allocated);
if (create_memento) {
- __ Peek(x10, 2 * kXRegSize);
+ int offset = (use_new_target ? 3 : 2) * kXRegSize;
+ __ Peek(x10, offset);
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
// r2 is an AllocationSite. We are creating a memento from it, so we
// need to increment the memento create count.
@@ -592,18 +600,24 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
- __ Push(x4, x4);
+ // Restore the parameters.
+ if (use_new_target) {
+ __ Pop(original_constructor);
+ }
+ __ Pop(constructor);
// Reload the number of arguments from the stack.
// Set it up in x0 for the function call below.
- // jssp[0]: receiver
- // jssp[1]: receiver
- // jssp[2]: constructor function
- // jssp[3]: number of arguments (smi-tagged)
- __ Peek(constructor, 2 * kXRegSize); // Load constructor.
- __ Peek(argc, 3 * kXRegSize); // Load number of arguments.
+ // jssp[0]: number of arguments (smi-tagged)
+ __ Peek(argc, 0); // Load number of arguments.
__ SmiUntag(argc);
+ if (use_new_target) {
+ __ Push(original_constructor, x4, x4);
+ } else {
+ __ Push(x4, x4);
+ }
+
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -614,8 +628,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x2: address of last argument (caller sp)
// jssp[0]: receiver
// jssp[1]: receiver
- // jssp[2]: constructor function
- // jssp[3]: number of arguments (smi-tagged)
+ // jssp[2]: new.target (if used)
+ // jssp[2/3]: number of arguments (smi-tagged)
// Compute the start address of the copy in x3.
__ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments;
@@ -646,15 +660,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ // TODO(arv): Remove the "!use_new_target" before supporting optimization
+ // of functions that reference new.target
+ if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore the context from the frame.
// x0: result
// jssp[0]: receiver
- // jssp[1]: constructor function
- // jssp[2]: number of arguments (smi-tagged)
+ // jssp[1]: new.target (if used)
+ // jssp[1/2]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@@ -665,8 +681,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// x0: result
// jssp[0]: receiver (newly allocated object)
- // jssp[1]: constructor function
- // jssp[2]: number of arguments (smi-tagged)
+ // jssp[1]: number of arguments (smi-tagged)
__ JumpIfSmi(x0, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -683,9 +698,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&exit);
// x0: result
// jssp[0]: receiver (newly allocated object)
- // jssp[1]: constructor function
- // jssp[2]: number of arguments (smi-tagged)
- __ Peek(x1, 2 * kXRegSize);
+ // jssp[1]: new.target (if used)
+ // jssp[1/2]: number of arguments (smi-tagged)
+ int offset = (use_new_target ? 2 : 1) * kXRegSize;
+ __ Peek(x1, offset);
// Leave construct frame.
}
@@ -698,12 +714,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@@ -731,7 +752,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// sp[1]: new.target
// sp[2]: receiver (the hole)
-
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -759,8 +779,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ Drop(1);
__ Bind(&done_copying_arguments);
- __ Add(x0, x0, Operand(1)); // new.target
-
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@@ -787,8 +805,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// jssp[0]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Load number of arguments (smi).
- __ Peek(x1, 0);
+ // Load number of arguments (smi), skipping over new.target.
+ __ Peek(x1, kPointerSize);
// Leave construct frame
}
@@ -1388,6 +1406,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
__ Ldr(key, MemOperand(fp, indexOffset));
__ B(&entry);
@@ -1397,7 +1417,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ Ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
+ FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
+ Handle<TypeFeedbackVector> feedback_vector =
+ masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
+ int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ Mov(slot, Smi::FromInt(index));
+ __ Mov(vector, feedback_vector);
+ Handle<Code> ic =
+ KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
// Push the nth argument.
@@ -1733,13 +1760,38 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ Bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
Register copy_from = x10;
Register copy_end = x11;
Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14;
+ // If the function is strong we need to throw an error.
+ Label no_strong_error;
+ __ Ldr(scratch1,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch2.W(),
+ FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestAndBranchIfAllClear(scratch2.W(),
+ (1 << SharedFunctionInfo::kStrongModeFunction),
+ &no_strong_error);
+
+ // What we really care about is the required number of arguments.
+ DCHECK_EQ(kPointerSize, kInt64Size);
+ __ Ldr(scratch2.W(),
+ FieldMemOperand(scratch1, SharedFunctionInfo::kLengthOffset));
+ __ Cmp(argc_actual, Operand(scratch2, LSR, 1));
+ __ B(ge, &no_strong_error);
+
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ }
+
+ __ Bind(&no_strong_error);
+ EnterArgumentsAdaptorFrame(masm);
+
__ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
__ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
@@ -1810,6 +1862,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 7f3c995204..e67b4fd2be 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -102,17 +102,17 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) ||
- x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) {
- queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
+ queue.Queue(descriptor.GetRegisterParameter(i));
}
queue.PushQueued();
@@ -203,13 +203,11 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// See call site for description.
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch,
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
+ Register right, Register scratch,
FPRegister double_scratch,
- Label* slow,
- Condition cond) {
+ Label* slow, Condition cond,
+ Strength strength) {
DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
@@ -223,10 +221,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Smis. If it's not a heap number, then return equal.
Register right_type = scratch;
if ((cond == lt) || (cond == gt)) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
__ JumpIfObjectType(right, right_type, right_type, FIRST_SPEC_OBJECT_TYPE,
slow, ge);
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics, since
+ // we need to throw a TypeError. Smis have already been ruled out.
+ __ Cmp(right_type, Operand(HEAP_NUMBER_TYPE));
+ __ B(eq, &return_equal);
+ __ Tst(right_type, Operand(kIsNotStringMask));
+ __ B(ne, slow);
+ }
} else if (cond == eq) {
__ JumpIfHeapNumber(right, &heap_number);
} else {
@@ -235,8 +243,16 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Comparing JS objects with <=, >= is complicated.
__ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, slow);
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics,
+ // since we need to throw a TypeError. Smis and heap numbers have
+ // already been ruled out.
+ __ Tst(right_type, Operand(kIsNotStringMask));
+ __ B(ne, slow);
+ }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -513,7 +529,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
+ EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond,
+ strength());
// If either is a smi (we know that at least one is not a smi), then they can
// only be strictly equal if the other is a HeapNumber.
@@ -632,7 +649,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cond == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
- native = Builtins::COMPARE;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) {
ncr = GREATER;
@@ -1433,9 +1451,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister()));
+ DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss);
@@ -1455,9 +1472,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register result = x0;
Register scratch = x10;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!FLAG_vector_ics ||
- (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
- result.is(VectorLoadICDescriptor::SlotRegister())));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+ result.is(LoadWithVectorDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@@ -1669,7 +1685,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- CHECK(!has_new_target());
Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
Register key = ArgumentsAccessReadDescriptor::index();
DCHECK(arg_count.is(x0));
@@ -1726,8 +1741,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// jssp[8]: address of receiver argument
// jssp[16]: function
- CHECK(!has_new_target());
-
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
Register caller_fp = x10;
@@ -1759,8 +1772,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
//
// Returns pointer to result object in x0.
- CHECK(!has_new_target());
-
// Note: arg_count_smi is an alias of param_count_smi.
Register arg_count_smi = x3;
Register param_count_smi = x3;
@@ -1869,8 +1880,9 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Ldr(sloppy_args_map,
ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Ldr(aliased_args_map,
- ContextMemOperand(global_ctx, Context::ALIASED_ARGUMENTS_MAP_INDEX));
+ __ Ldr(
+ aliased_args_map,
+ ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
__ Cmp(mapped_params, 0);
__ CmovX(sloppy_args_map, aliased_args_map, ne);
@@ -2087,15 +2099,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
MemOperand(caller_fp,
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(param_count, param_count_smi);
- if (has_new_target()) {
- __ Cmp(param_count, Operand(0));
- Label skip_decrement;
- __ B(eq, &skip_decrement);
- // Skip new.target: it is not a part of arguments.
- __ Sub(param_count, param_count, Operand(1));
- __ SmiTag(param_count_smi, param_count);
- __ Bind(&skip_decrement);
- }
__ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
@@ -2192,19 +2195,21 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry.
- // jssp[0]: index of rest parameter (tagged)
- // jssp[8]: number of parameters (tagged)
- // jssp[16]: address of receiver argument
+ // jssp[0]: language mode (tagged)
+ // jssp[8]: index of rest parameter (tagged)
+ // jssp[16]: number of parameters (tagged)
+ // jssp[24]: address of receiver argument
//
// Returns pointer to result object in x0.
// Get the stub arguments from the frame, and make an untagged copy of the
// parameter count.
- Register rest_index_smi = x1;
- Register param_count_smi = x2;
- Register params = x3;
+ Register language_mode_smi = x1;
+ Register rest_index_smi = x2;
+ Register param_count_smi = x3;
+ Register params = x4;
Register param_count = x13;
- __ Pop(rest_index_smi, param_count_smi, params);
+ __ Pop(language_mode_smi, rest_index_smi, param_count_smi, params);
__ SmiUntag(param_count, param_count_smi);
// Test if arguments adaptor needed.
@@ -2217,11 +2222,12 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &runtime);
- // x1 rest_index_smi index of rest parameter
- // x2 param_count_smi number of parameters passed to function (smi)
- // x3 params pointer to parameters
- // x11 caller_fp caller's frame pointer
- // x13 param_count number of parameters passed to function
+ // x1 language_mode_smi language mode
+ // x2 rest_index_smi index of rest parameter
+ // x3 param_count_smi number of parameters passed to function (smi)
+ // x4 params pointer to parameters
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
// Patch the argument length and parameters pointer.
__ Ldr(param_count_smi,
@@ -2232,8 +2238,8 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
__ Bind(&runtime);
- __ Push(params, param_count_smi, rest_index_smi);
- __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+ __ Push(params, param_count_smi, rest_index_smi, language_mode_smi);
+ __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@@ -3086,10 +3092,18 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
+ // Increment the call count for monomorphic function calls.
+ __ Add(feedback_vector, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Add(feedback_vector, feedback_vector,
+ Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ Ldr(index, FieldMemOperand(feedback_vector, 0));
+ __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Str(index, FieldMemOperand(feedback_vector, 0));
+
Register allocation_site = feedback_vector;
+ Register original_constructor = index;
__ Mov(allocation_site, scratch);
-
- Register original_constructor = x3;
__ Mov(original_constructor, function);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@@ -3155,6 +3169,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(function, &extra_checks_or_miss);
+ // Increment the call count for monomorphic function calls.
+ __ Add(feedback_vector, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Add(feedback_vector, feedback_vector,
+ Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ Ldr(index, FieldMemOperand(feedback_vector, 0));
+ __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Str(index, FieldMemOperand(feedback_vector, 0));
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -3230,6 +3253,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ // Initialize the call counter.
+ __ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
+ __ Adds(x4, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
+
// Store the function. Use a stub since we need a frame for allocation.
// x2 - vector
// x3 - slot
@@ -3324,9 +3353,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// If index is a heap number, try converting it to an integer.
__ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Push(VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else {
// Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_);
@@ -3341,9 +3370,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Mov(index_, x0);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Pop(object_, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister());
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister());
} else {
__ Pop(object_);
}
@@ -3471,7 +3500,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ Ret();
__ Bind(&unordered);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -4467,15 +4496,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawLoadStub stub(isolate(), state());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawKeyedLoadStub stub(isolate());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4494,12 +4523,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
-void VectorRawLoadStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4596,11 +4623,11 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
-void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
- Register name = VectorLoadICDescriptor::NameRegister(); // x2
- Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
- Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
+ Register name = LoadWithVectorDescriptor::NameRegister(); // x2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register receiver_map = x5;
Register scratch1 = x6;
@@ -4640,21 +4667,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
-void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
- Register key = VectorLoadICDescriptor::NameRegister(); // x2
- Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
- Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
+ Register key = LoadWithVectorDescriptor::NameRegister(); // x2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register receiver_map = x5;
Register scratch1 = x6;
@@ -4686,7 +4713,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
&try_poly_name);
Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ Bind(&try_poly_name);
@@ -4710,6 +4737,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ Bind(&miss);
+ StoreIC::GenerateMiss(masm);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ Bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@@ -5412,7 +5491,7 @@ static const int kCallApiFunctionSpillSpace = 4;
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
+ return static_cast<int>(ref0.address() - ref1.address());
}
@@ -5751,6 +5830,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index 528400698b..1b64a625f9 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -138,8 +138,10 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
- int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
- int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
+ auto offset_to_incremental_noncompacting =
+ static_cast<int32_t>(instr1->ImmPCOffset());
+ auto offset_to_incremental_compacting =
+ static_cast<int32_t>(instr2->ImmPCOffset());
switch (mode) {
case STORE_BUFFER_ONLY:
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index cda6e5b6c0..2d1ef57f38 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -634,6 +634,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 8db120ba45..fc7bef69e9 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -84,6 +84,8 @@ const int64_t kXMaxInt = 0x7fffffffffffffffL;
const int64_t kXMinInt = 0x8000000000000000L;
const int32_t kWMaxInt = 0x7fffffff;
const int32_t kWMinInt = 0x80000000;
+const unsigned kIp0Code = 16;
+const unsigned kIp1Code = 17;
const unsigned kFramePointerRegCode = 29;
const unsigned kLinkRegCode = 30;
const unsigned kZeroRegCode = 31;
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 11ba7c98a0..8258fbfde3 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -120,6 +120,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
#endif
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/debug-arm64.cc b/deps/v8/src/arm64/debug-arm64.cc
index 56e3c031ed..2eec4466e1 100644
--- a/deps/v8/src/arm64/debug-arm64.cc
+++ b/deps/v8/src/arm64/debug-arm64.cc
@@ -203,53 +203,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC load (from ic-arm.cc).
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- RegList regs = receiver.Bit() | name.Bit();
- if (FLAG_vector_ics) {
- regs |= VectorLoadICTrampolineDescriptor::SlotRegister().Bit();
- }
- Generate_DebugBreakCallHelper(masm, regs, 0, x10);
-}
-
-
-void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-arm64.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
-}
-
-
-void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for keyed IC load (from ic-arm.cc).
- GenerateLoadICDebugBreak(masm);
-}
-
-
-void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC keyed store call (from ic-arm64.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
-}
-
-
-void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
- // Register state for CompareNil IC
- // ----------- S t a t e -------------
- // -- r0 : value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
-}
-
-
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
@@ -346,6 +299,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
const bool LiveEdit::kFrameDropperSupported = true;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/decoder-arm64.cc b/deps/v8/src/arm64/decoder-arm64.cc
index 5cca85ea28..08aab4286e 100644
--- a/deps/v8/src/arm64/decoder-arm64.cc
+++ b/deps/v8/src/arm64/decoder-arm64.cc
@@ -81,6 +81,7 @@ VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/delayed-masm-arm64.cc b/deps/v8/src/arm64/delayed-masm-arm64.cc
index b51e77ec86..77ad79199e 100644
--- a/deps/v8/src/arm64/delayed-masm-arm64.cc
+++ b/deps/v8/src/arm64/delayed-masm-arm64.cc
@@ -193,6 +193,7 @@ void DelayedMasm::EmitPending() {
ResetPending();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index b28d6f1d8b..41a87643f2 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -354,11 +354,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- // No out-of-line constant pool support.
+ // No embedded constant pool support.
UNREACHABLE();
}
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 36bad37705..232dfce5f0 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -1369,11 +1369,12 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
if (format[5] == 'I') {
- uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
+ uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide())
+ << (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm);
} else {
DCHECK(format[5] == 'L');
- AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
+ AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
}
@@ -1383,13 +1384,13 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
case 'L': {
switch (format[2]) {
case 'L': { // ILLiteral - Immediate Load Literal.
- AppendToOutput("pc%+" PRId64,
- instr->ImmLLiteral() << kLoadLiteralScaleLog2);
+ AppendToOutput("pc%+" PRId32, instr->ImmLLiteral()
+ << kLoadLiteralScaleLog2);
return 9;
}
case 'S': { // ILS - Immediate Load/Store.
if (instr->ImmLS() != 0) {
- AppendToOutput(", #%" PRId64, instr->ImmLS());
+ AppendToOutput(", #%" PRId32, instr->ImmLS());
}
return 3;
}
@@ -1397,14 +1398,14 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
if (instr->ImmLSPair() != 0) {
// format[3] is the scale value. Convert to a number.
int scale = format[3] - 0x30;
- AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
+ AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
}
return 4;
}
case 'U': { // ILU - Immediate Load/Store Unsigned.
if (instr->ImmLSUnsigned() != 0) {
- AppendToOutput(", #%" PRIu64,
- instr->ImmLSUnsigned() << instr->SizeLS());
+ AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned()
+ << instr->SizeLS());
}
return 3;
}
@@ -1427,7 +1428,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
AppendToOutput("#%d", 64 - instr->FPScale());
return 8;
} else {
- AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(),
format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
return 9;
}
@@ -1538,7 +1539,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
case 'L': { // HLo.
if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
- AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
+ AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
instr->ImmDPShift());
}
return 3;
@@ -1729,7 +1730,8 @@ void PrintDisassembler::ProcessOutput(Instruction* instr) {
GetOutput());
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
namespace disasm {
diff --git a/deps/v8/src/arm64/frames-arm64.cc b/deps/v8/src/arm64/frames-arm64.cc
index b3633e07ba..73c678aaa6 100644
--- a/deps/v8/src/arm64/frames-arm64.cc
+++ b/deps/v8/src/arm64/frames-arm64.cc
@@ -31,12 +31,7 @@ Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
}
-Object*& ExitFrame::constant_pool_slot() const {
- UNREACHABLE();
- return Memory::Object_at(NULL);
-}
-
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
index 883079c9be..963dc3e025 100644
--- a/deps/v8/src/arm64/frames-arm64.h
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -63,36 +63,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
- static const int kLengthOffset = -4 * kPointerSize;
- static const int kConstructorOffset = -5 * kPointerSize;
- static const int kImplicitReceiverOffset = -6 * kPointerSize;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/arm64/full-codegen-arm64.cc
index aee9ddf403..324bfb8160 100644
--- a/deps/v8/src/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/arm64/full-codegen-arm64.cc
@@ -103,10 +103,6 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-arm.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
- handler_table_ =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForRange(function()->handler_count()), TENURED));
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -125,7 +121,7 @@ void FullCodeGenerator::Generate() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis()) {
+ info->MayUseThis() && info->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
@@ -192,17 +188,17 @@ void FullCodeGenerator::Generate() {
bool function_in_register_x1 = true;
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info->scope()->num_heap_slots() > 0) {
// Argument to NewContext is the function, which is still in x1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -217,8 +213,9 @@ void FullCodeGenerator::Generate() {
__ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -230,8 +227,8 @@ void FullCodeGenerator::Generate() {
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()), x10,
+ x11, kLRHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, &done);
@@ -242,10 +239,47 @@ void FullCodeGenerator::Generate() {
}
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register_x1) {
+ __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep is marked as such.
+ }
+ SetVar(this_function_var, x1, x0, x2);
+ }
+
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+ // Get the frame pointer for the calling frame.
+ __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ Label check_frame_marker;
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
+ __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+ __ Bind(&check_frame_marker);
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
+
+ Label non_construct_frame, done;
+
+ __ B(ne, &non_construct_frame);
+ __ Ldr(x0,
+ MemOperand(x2, ConstructFrameConstants::kOriginalConstructorOffset));
+ __ B(&done);
+
+ __ Bind(&non_construct_frame);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+
+ __ Bind(&done);
+
+ SetVar(new_target_var, x0, x2, x3);
+ }
// Possibly allocate RestParameters
int rest_index;
@@ -255,15 +289,12 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
- --num_parameters;
- ++rest_index;
- }
__ Add(x3, fp, StandardFrameConstants::kCallerSPOffset + offset);
__ Mov(x2, Smi::FromInt(num_parameters));
__ Mov(x1, Smi::FromInt(rest_index));
- __ Push(x3, x2, x1);
+ __ Mov(x0, Smi::FromInt(language_mode()));
+ __ Push(x3, x2, x1, x0);
RestParamAccessStub stub(isolate());
__ CallStub(&stub);
@@ -300,7 +331,7 @@ void FullCodeGenerator::Generate() {
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type, has_new_target);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, x0, x1, x2);
@@ -310,7 +341,6 @@ void FullCodeGenerator::Generate() {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
-
// Visit the declarations and body unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
@@ -324,7 +354,7 @@ void FullCodeGenerator::Generate() {
VariableDeclaration* function = scope()->function();
DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
@@ -404,7 +434,8 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
// we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to
// the result).
int distance =
- masm_->SizeOfCodeGeneratedSince(back_edge_target) + kCodeSizeMultiplier / 2;
+ static_cast<int>(masm_->SizeOfCodeGeneratedSince(back_edge_target) +
+ kCodeSizeMultiplier / 2);
int weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
@@ -467,7 +498,7 @@ void FullCodeGenerator::EmitReturnSequence() {
{
InstructionAccurateScope scope(masm_,
Assembler::kJSReturnSequenceInstructions);
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ SetReturnPosition(function());
__ RecordJSReturn();
// This code is generated using Assembler methods rather than Macro
// Assembler methods because it will be patched later on, and so the size
@@ -486,9 +517,6 @@ void FullCodeGenerator::EmitReturnSequence() {
__ add(current_sp, current_sp, ip0);
__ ret();
int32_t arg_count = info_->scope()->num_parameters() + 1;
- if (IsSubclassConstructor(info_->function()->kind())) {
- arg_count++;
- }
__ dc64(kXRegSize * arg_count);
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
@@ -790,12 +818,8 @@ void FullCodeGenerator::SetVar(Variable* var,
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
// scratch0 contains the correct context.
- __ RecordWriteContextSlot(scratch0,
- location.offset(),
- src,
- scratch1,
- kLRHasBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWriteContextSlot(scratch0, static_cast<int>(location.offset()),
+ src, scratch1, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
@@ -849,7 +873,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
@@ -857,8 +882,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
zone());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
@@ -866,7 +891,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
@@ -877,7 +902,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
__ Mov(x2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
@@ -908,25 +933,26 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
Comment cmnt(masm_, "[ Function Declaration");
VisitForAccumulatorValue(declaration->fun());
__ Str(result_register(), StackOperand(variable));
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ Function Declaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
@@ -945,7 +971,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Function Declaration");
__ Mov(x2, Operand(variable->name()));
__ Mov(x1, Smi::FromInt(NONE));
@@ -963,20 +989,21 @@ void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
// TODO(rossberg)
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::LOOKUP:
UNREACHABLE();
}
}
@@ -1056,9 +1083,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ SetExpressionPosition(clause);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
+ strength(language_mode())).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1102,9 +1129,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt, SKIP_BREAK);
+
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+
// TODO(all): This visitor probably needs better comments and a revisit.
- SetStatementPosition(stmt);
Label loop, exit;
ForIn loop_statement(this, stmt);
@@ -1112,7 +1141,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionPosition(stmt->enumerable());
+ SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
Register null_value = x15;
@@ -1206,7 +1235,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ Bind(&loop);
- SetExpressionPosition(stmt->each());
+ SetExpressionAsStatementPosition(stmt->each());
// Load the current count to x0, load the length to x1.
__ PeekPair(x0, x1, 0);
@@ -1239,10 +1268,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(x1, x3);
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kForInFilter, 2);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ Mov(x3, x0);
- __ Cbz(x0, loop_statement.continue_label());
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex,
+ loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
// entry in register x3.
@@ -1250,7 +1280,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Mov(result_register(), x3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each());
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
@@ -1312,39 +1342,15 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
-void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
- Comment cnmt(masm_, "[ SuperReference ");
-
- __ ldr(LoadDescriptor::ReceiverRegister(),
- MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
- __ Mov(LoadDescriptor::NameRegister(), Operand(home_object_symbol));
-
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->HomeObjectFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
- }
-
- __ Mov(x10, Operand(isolate()->factory()->undefined_value()));
- __ cmp(x0, x10);
- Label done;
- __ b(&done, ne);
- __ CallRuntime(Runtime::kThrowNonMethodError, 0);
- __ bind(&done);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset) {
+ int offset,
+ FeedbackVectorICSlot slot) {
if (NeedsHomeObject(initializer)) {
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
}
}
@@ -1393,16 +1399,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ Bind(&fast);
}
- __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- }
-
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
- : CONTEXTUAL;
- CallLoadIC(mode);
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_state);
}
@@ -1468,30 +1467,43 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
+ // Inside typeof use a regular load, not a contextual load, to avoid
+ // a reference error.
+ CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
// Record position before possible IC call.
- SetSourcePosition(proxy->position());
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "Global variable");
- __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- }
- CallGlobalLoadIC(var->name());
+ EmitGlobalVariableLoad(proxy, typeof_state);
context()->Plug(x0);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
@@ -1561,16 +1573,20 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Label done, slow;
// Generate code for loading from variables potentially shadowed by
// eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
__ Bind(&slow);
Comment cmnt(masm_, "Lookup variable");
__ Mov(x1, Operand(var->name()));
__ Push(cp, x1); // Context and name.
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Runtime::FunctionId function_id =
+ typeof_state == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ __ CallRuntime(function_id, 2);
__ Bind(&done);
context()->Plug(x0);
break;
@@ -1640,7 +1656,6 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
@@ -1661,13 +1676,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// result_saved is false the result is in x0.
bool result_saved = false;
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
AccessorTable accessor_table(zone());
int property_index = 0;
+ // store_slot_index points to the vector IC slot for the next store IC used.
+ // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
+ // and must be updated if the number of store ICs emitted here changes.
+ int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1694,7 +1708,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(x0));
__ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
- CallStoreIC(key->LiteralFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ CallStoreIC();
+ } else {
+ CallStoreIC(key->LiteralFeedbackId());
+ }
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1702,6 +1721,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), 0);
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ }
CallStoreIC();
}
} else {
@@ -1714,7 +1736,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
__ Mov(x0, Smi::FromInt(SLOPPY)); // Language mode
__ Push(x0);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1752,9 +1775,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(x10);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(it->second->getter, 2);
+ EmitSetHomeObjectIfNeeded(
+ it->second->getter, 2,
+ expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(it->second->setter, 3);
+ EmitSetHomeObjectIfNeeded(
+ it->second->setter, 3,
+ expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ Mov(x10, Smi::FromInt(NONE));
__ Push(x10);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1789,7 +1816,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1835,6 +1863,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(x0);
}
+
+ // Verify that compilation exactly consumed the number of store ic slots that
+ // the ObjectLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1873,8 +1905,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ if (subexpr->IsSpread()) break;
+
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1887,7 +1922,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ Peek(x6, kPointerSize); // Copy of array literal.
__ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
__ Str(result_register(), FieldMemOperand(x1, offset));
@@ -1896,12 +1931,37 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
- __ Mov(x3, Smi::FromInt(i));
+ __ Mov(x3, Smi::FromInt(array_index));
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ __ Drop(1); // literal index
+ __ Pop(x0);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ __ Push(x0);
+ if (subexpr->IsSpread()) {
+ VisitForStackValue(subexpr->AsSpread()->expression());
+ __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ } else {
+ VisitForStackValue(subexpr);
+ __ CallRuntime(Runtime::kAppendElement, 2);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
@@ -1917,9 +1977,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = GetAssignType(property);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1936,8 +1997,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case NAMED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch = x10;
@@ -1946,9 +2009,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
__ Push(result_register());
if (expr->is_compound()) {
@@ -2004,7 +2068,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ Push(x0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
@@ -2021,14 +2084,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
}
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
+ expr->op(), expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
break;
@@ -2051,52 +2113,45 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!prop->IsSuperAccess());
__ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
- }
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
+ CallLoadIC(NOT_CONTEXTUAL, language_mode());
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ Push(key->value());
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
// Call keyed load IC. It has arguments key and receiver in x0 and x1.
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallIC(ic);
- } else {
- CallIC(ic, prop->PropertyFeedbackId());
- }
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
+ CallIC(ic);
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
- SetSourcePosition(prop->position());
-
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ SetExpressionPosition(prop);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
}
@@ -2119,8 +2174,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
{
Assembler::BlockPoolsScope scope(masm_);
CallIC(code, expr->BinaryOperationFeedbackId());
@@ -2202,8 +2257,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ Pop(x1);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{
Assembler::BlockPoolsScope scope(masm_);
@@ -2214,7 +2269,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
+ int* used_store_slots) {
// Constructor is in x0.
DCHECK(lit != NULL);
__ push(x0);
@@ -2248,7 +2304,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(value, 2,
+ lit->SlotForHomeObject(value, used_store_slots));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -2284,17 +2341,18 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorICSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
Property* prop = expr->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
break;
}
case NAMED_PROPERTY: {
@@ -2306,13 +2364,15 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ Push(x0);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
// stack: value, this; x0: home_object
Register scratch = x10;
Register scratch2 = x11;
@@ -2327,9 +2387,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
case KEYED_SUPER_PROPERTY: {
__ Push(x0);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
Register scratch = x10;
Register scratch2 = x11;
@@ -2352,6 +2412,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Mov(StoreDescriptor::NameRegister(), x0);
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::ValueRegister());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2375,13 +2436,14 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorICSlot slot) {
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
- if (var->IsUnallocated()) {
+ if (var->IsUnallocatedOrGlobalSlot()) {
// Global var, const, or let.
__ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
} else if (var->mode() == LET && op != Token::INIT_LET) {
@@ -2472,12 +2534,15 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ Pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
@@ -2518,15 +2583,18 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
// Assignment to a property, using a keyed store IC.
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
// TODO(all): Could we pass this in registers rather than on the stack?
__ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(x0));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
@@ -2535,6 +2603,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
Expression* key = expr->key();
if (key->IsPropertyName()) {
@@ -2543,9 +2612,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Move(LoadDescriptor::ReceiverRegister(), x0);
EmitNamedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
EmitNamedSuperPropertyLoad(expr);
}
} else {
@@ -2556,9 +2625,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
@@ -2619,29 +2688,30 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
+ SetExpressionPosition(prop);
- SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
const Register scratch = x10;
- SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ Push(x0);
+ SuperPropertyReference* super_ref =
+ callee->AsProperty()->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ Push(x0);
__ Peek(scratch, kPointerSize);
__ Push(x0, scratch);
__ Push(key->value());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
- // - key
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2681,19 +2751,19 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
-
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
// Load the function from the receiver.
const Register scratch = x10;
- SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ Push(x0);
+ SuperPropertyReference* super_ref =
+ callee->AsProperty()->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ Push(x0);
__ Peek(scratch, kPointerSize);
__ Push(x0, scratch);
VisitForStackValue(prop->key());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2701,7 +2771,8 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2717,13 +2788,10 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
- }
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
@@ -2750,32 +2818,22 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // Prepare to push the receiver of the enclosing function.
- int receiver_offset = 2 + info_->scope()->num_parameters();
- __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
// Prepare to push the language mode.
- __ Mov(x12, Smi::FromInt(language_mode()));
+ __ Mov(x11, Smi::FromInt(language_mode()));
// Prepare to push the start position of the scope the calls resides in.
- __ Mov(x13, Smi::FromInt(scope()->start_position()));
+ __ Mov(x12, Smi::FromInt(scope()->start_position()));
// Push.
- __ Push(x9, x10, x11, x12, x13);
+ __ Push(x9, x10, x11, x12);
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::EmitLoadSuperConstructor() {
- __ ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(x0);
- __ CallRuntime(Runtime::kGetPrototype, 1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperReference* super_ref) {
+ SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
Variable* this_var = super_ref->this_var()->var();
GetVar(x1, this_var);
Label uninitialized_this;
@@ -2785,7 +2843,49 @@ void FullCodeGenerator::EmitInitializeThisAfterSuper(
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
- EmitVariableAssignment(this_var, Token::INIT_CONST);
+ EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
+}
+
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ Bind(&slow);
+ // Call the runtime to find the function to call (returned in x0)
+ // and the object holding it (returned in x1).
+ __ Mov(x10, Operand(callee->name()));
+ __ Push(context_register(), x10);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ Push(x0, x1); // Receiver, function.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ B(&call);
+ __ Bind(&done);
+ // Push function.
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the undefined to the call function stub.
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x0, x1);
+ __ Bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10); // Reserved receiver slot.
+ }
}
@@ -2802,17 +2902,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- {
- PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- __ Push(x10); // Reserved receiver slot.
+ PushCalleeAndWithBaseObject(expr);
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
@@ -2825,15 +2920,13 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ Push(x10);
EmitResolvePossiblyDirectEval(arg_count);
- // The runtime call returns a pair of values in x0 (function) and
- // x1 (receiver). Touch up the stack with the right values.
- __ PokePair(x1, x0, arg_count * kPointerSize);
+ // Touch up the stack with the resolved function.
+ __ Poke(x0, (arg_count + 1) * kPointerSize);
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Call the evaluated function.
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
@@ -2849,41 +2942,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
- VariableProxy* proxy = callee->AsVariableProxy();
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
- }
-
- __ Bind(&slow);
- // Call the runtime to find the function to call (returned in x0)
- // and the object holding it (returned in x1).
- __ Mov(x10, Operand(proxy->name()));
- __ Push(context_register(), x10);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ Push(x0, x1); // Receiver, function.
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ B(&call);
- __ Bind(&done);
- // Push function.
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the undefined to the call function stub.
- __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
- __ Push(x0, x1);
- __ Bind(&call);
- }
-
- // The receiver is either the global receiver or an object found
- // by LoadContextSlot.
+ PushCalleeAndWithBaseObject(expr);
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
@@ -2895,10 +2954,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
- {
- PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
- }
if (is_named_call) {
EmitCallWithLoadIC(expr);
} else {
@@ -2910,9 +2966,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
- }
__ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
__ Push(x1);
// Emit function call.
@@ -2935,7 +2989,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- DCHECK(!expr->expression()->IsSuperReference());
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
@@ -2947,7 +3001,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into x1 and x0.
__ Mov(x0, arg_count);
@@ -2971,11 +3025,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor();
+ VariableProxy* new_target_proxy = super_call_ref->new_target_var();
+ VisitForStackValue(new_target_proxy);
+
+ EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
// Push the arguments ("left-to-right") on the stack.
@@ -2987,7 +3044,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into x1 and x0.
__ Mov(x0, arg_count);
@@ -3013,7 +3070,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
+ EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(x0);
}
@@ -3310,6 +3367,28 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_TYPED_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3559,6 +3638,28 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = nullptr;
+ Label* if_false = nullptr;
+ Label* fall_through = nullptr;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_DATE_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3567,23 +3668,19 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done, not_date_object;
Register object = x0;
Register result = x0;
Register stamp_addr = x10;
Register stamp_cache = x11;
- __ JumpIfSmi(object, &not_date_object);
- __ JumpIfNotObjectType(object, x10, x10, JS_DATE_TYPE, &not_date_object);
-
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ B(&done);
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ Mov(x10, stamp);
- __ Ldr(stamp_addr, MemOperand(x10));
+ __ Mov(stamp_addr, stamp);
+ __ Ldr(stamp_addr, MemOperand(stamp_addr));
__ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
__ Cmp(stamp_addr, stamp_cache);
__ B(ne, &runtime);
@@ -3595,13 +3692,10 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ Bind(&runtime);
__ Mov(x1, index);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ B(&done);
+ __ Bind(&done);
}
- __ Bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ Bind(&done);
- context()->Plug(x0);
+ context()->Plug(result);
}
@@ -3893,11 +3987,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
- EmitLoadSuperConstructor();
+ // new.target
+ VisitForStackValue(args->at(0));
+
+ // .this_function
+ VisitForStackValue(args->at(1));
+ __ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
// Check if the calling frame is an arguments adaptor frame.
@@ -3916,8 +4014,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ Ldr(x1, MemOperand(x11, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(x1, x1);
- // Subtract 1 from arguments count, for new.target.
- __ Sub(x1, x1, Operand(1));
__ Mov(x0, x1);
// Get arguments pointer in x11.
@@ -4270,11 +4366,14 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr === CallRuntime("ReflectConstruct")
+ DCHECK_EQ(1, expr->arguments()->length());
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
+
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
- SuperReference* super_reference = args->at(0)->AsSuperReference();
+ SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
@@ -4283,8 +4382,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ Pop(x10);
__ Push(x0, x10);
- // Push super
- EmitLoadSuperConstructor();
+ // Push super constructor
+ EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
// Push arguments array
@@ -4300,7 +4399,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, x0);
- EmitInitializeThisAfterSuper(super_reference);
+ // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
+ EmitInitializeThisAfterSuper(super_call_ref);
}
@@ -4314,13 +4414,9 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Load the function from the receiver.
Handle<String> name = expr->name();
__ Mov(LoadDescriptor::NameRegister(), Operand(name));
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- }
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
+ CallLoadIC(NOT_CONTEXTUAL);
}
@@ -4328,8 +4424,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ Peek(x1, (arg_count + 1) * kPointerSize);
__ CallStub(&stub);
@@ -4352,6 +4447,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
@@ -4377,6 +4473,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime function.
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
context()->Plug(x0);
}
@@ -4401,10 +4498,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
context()->Plug(x0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- DCHECK(is_sloppy(language_mode()) || var->is_this());
- if (var->IsUnallocated()) {
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
__ Ldr(x12, GlobalObjectMemOperand());
__ Mov(x11, Operand(var->name()));
__ Mov(x10, Smi::FromInt(SLOPPY));
@@ -4414,7 +4512,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- context()->Plug(var->is_this());
+ context()->Plug(is_this);
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
@@ -4501,10 +4599,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4526,8 +4623,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
const Register scratch = x10;
__ Peek(scratch, kPointerSize);
@@ -4537,9 +4635,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
__ Push(result_register());
const Register scratch1 = x10;
@@ -4615,9 +4713,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ B(&stub_call);
__ Bind(&slow);
}
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ if (!is_strong(language_mode())) {
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4649,25 +4749,28 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Mov(x1, x0);
__ Mov(x0, Smi::FromInt(count_value));
- // Record position before stub call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
{
Assembler::BlockPoolsScope scope(masm_);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), Token::ADD, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), Token::ADD,
+ strength(language_mode())).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
}
__ Bind(&done);
+ if (is_strong(language_mode())) {
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Store the value returned in x0.
switch (assign_type) {
case VARIABLE:
if (expr->is_postfix()) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(x0);
}
@@ -4678,7 +4781,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
}
@@ -4687,7 +4790,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ Pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4725,7 +4833,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4740,45 +4853,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- DCHECK(!context()->IsEffect());
- DCHECK(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- }
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallLoadIC(NOT_CONTEXTUAL);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(x0);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
-
- __ Bind(&slow);
- __ Mov(x0, Operand(proxy->name()));
- __ Push(cp, x0);
- __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ Bind(&done);
-
- context()->Plug(x0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Expression* sub_expr,
Handle<String> check) {
@@ -4865,7 +4939,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Try to generate an optimized comparison with a literal value.
// TODO(jbramley): This only checks common values like NaN or undefined.
@@ -4920,9 +4994,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ Bind(&slow_case);
}
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic = CodeFactory::CompareIC(
+ isolate(), op, strength(language_mode())).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4974,6 +5047,8 @@ void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
@@ -5063,7 +5138,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ Bind(&l_try);
__ Pop(x0); // result
- EnterTryBlock(expr->index(), &l_catch);
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ Push(x0); // result
__ B(&l_suspend);
@@ -5078,7 +5154,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
const int generator_object_depth = kPointerSize + try_block_size;
__ Peek(x0, generator_object_depth);
__ Push(x0); // g
- __ Push(Smi::FromInt(expr->index())); // handler-index
+ __ Push(Smi::FromInt(handler_index)); // handler-index
DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
__ Mov(x1, Smi::FromInt(l_continuation.pos()));
__ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
@@ -5091,7 +5167,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Pop(x0); // result
EmitReturnSequence();
__ Bind(&l_resume); // received in x0
- ExitTryBlock(expr->index());
+ ExitTryBlock(handler_index);
// receiver = iter; f = 'next'; arg = received;
__ Bind(&l_next);
@@ -5104,11 +5180,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Bind(&l_call);
__ Peek(load_receiver, 1 * kPointerSize);
__ Peek(load_name, 2 * kPointerSize);
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
- }
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
CallIC(ic, TypeFeedbackId::None());
__ Mov(x1, x0);
__ Poke(x1, 2 * kPointerSize);
@@ -5124,10 +5198,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->DoneFeedbackSlot()));
- }
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(expr->DoneFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL); // x0=result.done
// The ToBooleanStub argument (result.done) is in x0.
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
@@ -5137,10 +5209,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ Pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- if (FLAG_vector_ics) {
- __ Mov(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->ValueFeedbackSlot()));
- }
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(expr->ValueFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL); // x0=result.value
context()->DropAndPlug(2, x0); // drop iter and g
break;
@@ -5362,6 +5432,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ Mov(x10, pending_message_obj);
__ Ldr(x10, MemOperand(x10));
__ Push(x10);
+
+ ClearPendingMessage();
}
@@ -5386,6 +5458,22 @@ void FullCodeGenerator::ExitFinallyBlock() {
}
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(x10));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Mov(x13, pending_message_obj);
+ __ Str(x10, MemOperand(x13));
+}
+
+
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+ DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ __ Mov(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
+}
+
+
#undef __
@@ -5484,6 +5572,7 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 7a5effe427..789268430d 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -93,9 +93,9 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
// met.
uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
- int64_t n = BitN();
- int64_t imm_s = ImmSetBits();
- int64_t imm_r = ImmRotate();
+ int32_t n = BitN();
+ int32_t imm_s = ImmSetBits();
+ int32_t imm_r = ImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
@@ -211,7 +211,7 @@ Instruction* Instruction::ImmPCOffsetTarget() {
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
- int32_t offset) {
+ ptrdiff_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}
@@ -242,7 +242,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
ptrdiff_t target_offset = DistanceTo(target);
Instr imm;
if (Instruction::IsValidPCRelOffset(target_offset)) {
- imm = Assembler::ImmPCRelAddress(target_offset);
+ imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
PatchingAssembler patcher(this,
@@ -254,9 +254,11 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
void Instruction::SetBranchImmTarget(Instruction* target) {
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
+ DCHECK(IsValidImmPCOffset(BranchType(),
+ DistanceTo(target) >> kInstructionSizeLog2));
+ int offset = static_cast<int>(DistanceTo(target) >> kInstructionSizeLog2);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
- ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
switch (BranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
@@ -287,9 +289,9 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
-
- ptrdiff_t target_offset = DistanceTo(target) >> kInstructionSizeLog2;
- DCHECK(is_int32(target_offset));
+ DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
+ int32_t target_offset =
+ static_cast<int32_t>(DistanceTo(target) >> kInstructionSizeLog2);
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
@@ -302,8 +304,9 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
- ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
- Instr imm = Assembler::ImmLLiteral(offset);
+ DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
+ Instr imm = Assembler::ImmLLiteral(
+ static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm);
@@ -316,7 +319,7 @@ void Instruction::SetImmLLiteral(Instruction* source) {
bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr
// (x31).
- return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
+ return IsMovz() && SixtyFourBits() && (Rd() == kZeroRegCode);
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
}
@@ -334,6 +337,7 @@ uint64_t InstructionSequence::InlineData() const {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 142b7c11d4..145a7c9053 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -137,8 +137,8 @@ class Instruction {
return following(-count);
}
- #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
- int64_t Name() const { return Func(HighBit, LowBit); }
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ int32_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
@@ -146,8 +146,8 @@ class Instruction {
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
DCHECK(IsPCRelAddressing());
- int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
- int const width = ImmPCRelLo_width + ImmPCRelHi_width;
+ int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width - 1, 0, offset);
}
@@ -369,7 +369,7 @@ class Instruction {
// PC-relative addressing instruction.
Instruction* ImmPCOffsetTarget();
- static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
+ static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset);
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
@@ -409,9 +409,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
- static bool IsValidPCRelOffset(int offset) {
- return is_int21(offset);
- }
+ static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(Instruction* target);
void SetBranchImmTarget(Instruction* target);
};
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index da505ff294..9bd02f45ab 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -591,4 +591,5 @@ void Instrument::VisitUnimplemented(Instruction* instr) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 7787224156..62e6f2a79e 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return x1; }
const Register LoadDescriptor::NameRegister() { return x2; }
+const Register LoadDescriptor::SlotRegister() { return x0; }
-const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return x0; }
-
-
-const Register VectorLoadICDescriptor::VectorRegister() { return x3; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
const Register StoreDescriptor::ReceiverRegister() { return x1; }
@@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; }
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return x4; }
+
+
+const Register VectorStoreICDescriptor::VectorRegister() { return x3; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
@@ -62,389 +66,338 @@ const Register MathPowIntegerDescriptor::exponent() { return x12; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-const Register GrowArrayElementsDescriptor::CapacityRegister() { return x2; }
-void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x2: function info
- Register registers[] = {cp, x2};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void FastNewContextDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x1: function
- Register registers[] = {cp, x1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x0: value
- Register registers[] = {cp, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void NumberToStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x0: value
- Register registers[] = {cp, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, x3};
- data->Initialize(arraysize(registers), registers, NULL);
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {x3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowArrayDescriptor::Initialize(
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // cp: context
// x3: array literals array
// x2: array literal index
// x1: constant elements
- Register registers[] = {cp, x3, x2, x1};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {x3, x2, x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowObjectDescriptor::Initialize(
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // cp: context
// x3: object literals array
// x2: object literal index
// x1: constant properties
// x0: object literal flags
- Register registers[] = {cp, x3, x2, x1, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x3, x2, x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateAllocationSiteDescriptor::Initialize(
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // cp: context
// x2: feedback vector
// x3: call feedback slot
- Register registers[] = {cp, x2, x3};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {x2, x3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x2: feedback vector
// x3: call feedback slot
// x1: tagged value to put in the weak cell
- Register registers[] = {cp, x2, x3, x1};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {x2, x3, x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreArrayLiteralElementDescriptor::Initialize(
+void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, x3, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x3, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x1 function the function to call
- Register registers[] = {cp, x1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackDescriptor::Initialize(
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, x1, x3};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {x1, x3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, x1, x3, x2};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {x1, x3, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {cp, x0, x1, x2};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x0, x1, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void RegExpConstructResultDescriptor::Initialize(
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // cp: context
// x2: length
// x1: index (of last match)
// x0: string
- Register registers[] = {cp, x2, x1, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x2, x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void TransitionElementsKindDescriptor::Initialize(
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // cp: context
// x0: value (js_array)
// x1: to_map
- Register registers[] = {cp, x0, x1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x0, x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::Initialize(
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // cp: context
- Register registers[] = {cp};
- data->Initialize(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-void ArrayConstructorConstantArgCountDescriptor::Initialize(
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // cp: context
// x1: function
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
- Register registers[] = {cp, x1, x2};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x1, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, x1, x2, x0};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(),
- Representation::Tagged(), Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {x1, x2, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
- CallInterfaceDescriptorData* data) {
- // cp: context
+void InternalArrayConstructorConstantArgCountDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// x1: constructor function
// x0: number of arguments to the constructor function
- Register registers[] = {cp, x1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorDescriptor::Initialize(
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, x1, x0};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
- Register registers[] = {cp, x1, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void CompareNilDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x0: value to compare
- Register registers[] = {cp, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void ToBooleanDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x0: value
- Register registers[] = {cp, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
- Register registers[] = {cp, x1, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOpWithAllocationSiteDescriptor::Initialize(
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // cp: context
// x2: allocation site
// x1: left operand
// x0: right operand
- Register registers[] = {cp, x2, x1, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x2, x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- // cp: context
+void StringAddDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
- Register registers[] = {cp, x1, x0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void KeyedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
x2, // key
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- data->Initialize(arraysize(registers), registers, representations,
- &noInlineDescriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &noInlineDescriptor);
}
-void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void NamedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
x2, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- data->Initialize(arraysize(registers), registers, representations,
- &noInlineDescriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &noInlineDescriptor);
}
-void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallHandlerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
x0, // receiver
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- data->Initialize(arraysize(registers), registers, representations,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &default_descriptor);
}
-void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
x1, // JSFunction
x0, // actual number of arguments
x2, // expected number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &default_descriptor);
}
-void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
x0, // callee
x4, // call_data
x2, // holder
x1, // api_function_address
x3, // actual number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Integer32(), // actual number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &default_descriptor);
}
-void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiAccessorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
- cp, // context
x0, // callee
x4, // call_data
x2, // holder
x1, // api_function_address
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- data->Initialize(arraysize(registers), registers, representations,
- &default_descriptor);
+ data->InitializePlatformSpecific(arraysize(registers), registers,
+ &default_descriptor);
}
+
+
+void MathRoundVariantDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ x1, // math rounding function
+ x3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index a4a36bfa15..fef51c669b 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -1050,10 +1050,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
ops.Add(target, zone());
- for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op =
- UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), cp);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@@ -1391,7 +1399,7 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), x0);
LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+ return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1504,7 +1512,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
+ chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@@ -1588,20 +1596,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
-LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
- HTailCallThroughMegamorphicCache* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* receiver_register =
- UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
- LOperand* name_register =
- UseFixed(instr->name(), LoadDescriptor::NameRegister());
-
- // Not marked as call. It can't deoptimize, and it never returns.
- return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
-}
-
-
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
// The function is required (by MacroAssembler::InvokeFunction) to be in x1.
@@ -1700,7 +1694,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -1766,7 +1760,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@@ -1788,7 +1782,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2028,7 +2022,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@@ -2402,8 +2396,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- return MarkAsCall(
- new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result = new (zone())
+ LStoreKeyedGeneric(context, object, key, value, slot, vector);
+ return MarkAsCall(result, instr);
}
@@ -2442,7 +2444,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreNamedGeneric* result =
+ new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2567,6 +2577,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseRegister(instr->object());
+ LOperand* elements = UseRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, x0);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), x3);
@@ -2763,4 +2788,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
index 7473597b15..4507c07591 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -125,6 +125,7 @@ class LCodeGen;
V(MathRoundD) \
V(MathRoundI) \
V(MathSqrt) \
+ V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@@ -164,7 +165,6 @@ class LCodeGen;
V(SubI) \
V(SubS) \
V(TaggedToI) \
- V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -318,26 +318,6 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
};
-class LTailCallThroughMegamorphicCache final
- : public LTemplateInstruction<0, 3, 0> {
- public:
- LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
- LOperand* name) {
- inputs_[0] = context;
- inputs_[1] = receiver;
- inputs_[2] = name;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* name() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
- "tail-call-through-megamorphic-cache")
- DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -739,7 +719,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
+ Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@@ -1181,6 +1161,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ Strength strength() { return hydrogen()->strength(); }
+
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1550,8 +1532,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
inputs_.AddAll(operands, zone);
}
@@ -1561,6 +1547,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@@ -2563,22 +2553,24 @@ class LStoreKeyedFixedDouble final : public LStoreKeyed<1> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* obj,
- LOperand* key,
- LOperand* value) {
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = obj;
+ inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2615,17 +2607,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2637,6 +2634,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
};
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index c2a2ff364c..074926b83b 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -224,55 +224,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- bool has_closure_id = !info()->closure().is_null() &&
- !info()->closure().is_identical_to(environment->closure());
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- default:
- UNREACHABLE();
- }
+ WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
-
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
+ AddToTranslation(
+ environment, translation, value, environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@@ -345,16 +307,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
environment->set_has_been_used();
@@ -435,6 +387,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
CallFunctionStub stub(isolate(), arity, flags);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
+ RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@@ -449,6 +402,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
DCHECK(ToRegister(instr->result()).is(x0));
}
@@ -504,6 +458,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
+ RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
DCHECK(ToRegister(instr->result()).is(x0));
}
@@ -525,7 +480,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
__ Mov(cp, ToRegister(context));
} else if (context->IsStackSlot()) {
- __ Ldr(cp, ToMemOperand(context));
+ __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
@@ -669,7 +624,7 @@ bool LCodeGen::GeneratePrologue() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info_->is_native()) {
+ !info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
@@ -728,8 +683,9 @@ bool LCodeGen::GeneratePrologue() {
__ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
Register value = x0;
Register scratch = x3;
@@ -743,8 +699,9 @@ bool LCodeGen::GeneratePrologue() {
__ Str(value, target);
// Update the write barrier. This clobbers value and scratch.
if (need_write_barrier) {
- __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
- GetLinkRegisterState(), kSaveFPRegs);
+ __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
+ value, scratch, GetLinkRegisterState(),
+ kSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, &done);
@@ -995,15 +952,10 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length(); i < length; i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (auto function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
}
-
inlined_function_count_ = deoptimization_literals_.length();
}
@@ -1281,13 +1233,37 @@ static int64_t ArgumentsOffsetWithoutFrame(int index) {
}
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
DCHECK(op != NULL);
DCHECK(!op->IsRegister());
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()));
+ int fp_offset = StackSlotOffset(op->index());
+ // Loads and stores have a bigger reach in positive offset than negative.
+ // We try to access using jssp (positive offset) first, then fall back to
+ // fp (negative offset) if that fails.
+ //
+ // We can reference a stack slot from jssp only if we know how much we've
+ // put on the stack. We don't know this in the following cases:
+ // - stack_mode != kCanUseStackPointer: this is the case when deferred
+ // code has saved the registers.
+ // - saves_caller_doubles(): some double registers have been pushed, jssp
+ // references the end of the double registers and not the end of the stack
+ // slots.
+ // In both of the cases above, we _could_ add the tracking information
+ // required so that we can use jssp here, but in practice it isn't worth it.
+ if ((stack_mode == kCanUseStackPointer) &&
+ !info()->saves_caller_doubles()) {
+ int jssp_offset_to_fp =
+ StandardFrameConstants::kFixedFrameSizeFromFp +
+ (pushed_arguments_ + GetStackSlotCount()) * kPointerSize;
+ int jssp_offset = fp_offset + jssp_offset_to_fp;
+ if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
+ return MemOperand(masm()->StackPointer(), jssp_offset);
+ }
+ }
+ return MemOperand(fp, fp_offset);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -1772,8 +1748,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0));
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), instr->op(), instr->language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2021,29 +1997,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
-void LCodeGen::DoTailCallThroughMegamorphicCache(
- LTailCallThroughMegamorphicCache* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register name = ToRegister(instr->name());
- DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(name.is(LoadDescriptor::NameRegister()));
- DCHECK(receiver.is(x1));
- DCHECK(name.is(x2));
- Register scratch = x4;
- Register extra = x5;
- Register extra2 = x6;
- Register extra3 = x7;
-
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(
- masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Tail call to miss if we ended up here.
- LoadIC::GenerateMiss(masm());
-}
-
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -2085,6 +2038,8 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
generator.AfterCall();
}
+
+ RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@@ -2104,11 +2059,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
__ Call(x10);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
+ RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@@ -2134,6 +2091,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
default:
UNREACHABLE();
}
+ RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@@ -2554,7 +2512,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->left()).Is(x1));
DCHECK(ToRegister(instr->right()).Is(x0));
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// Signal that we don't inline smi code before this stub.
InlineSmiCheckInfo::EmitNotInlined(masm());
@@ -2653,18 +2612,14 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register temp1 = x10;
Register temp2 = x11;
Smi* index = instr->index();
- Label runtime, done;
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
- DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
- __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
-
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ Mov(temp1, Operand(stamp));
@@ -2680,9 +2635,8 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ Bind(&runtime);
__ Mov(x1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ Bind(&done);
}
-
- __ Bind(&done);
}
@@ -3196,6 +3150,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr);
}
+ RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@@ -3309,6 +3264,16 @@ void LCodeGen::DoLabel(LLabel* label) {
label->block_id(),
LabelType(label));
+ // Inherit pushed_arguments_ from the predecessor's argument count.
+ if (label->block()->HasPredecessor()) {
+ pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count();
+#ifdef DEBUG
+ for (auto p : *label->block()->predecessors()) {
+ DCHECK_EQ(p->argument_count(), pushed_arguments_);
+ }
+#endif
+ }
+
__ Bind(label->label());
current_block_ = label->block_id();
DoGap(label);
@@ -3361,10 +3326,9 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = VectorLoadICDescriptor::SlotRegister();
- DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(x0));
AllowDeferredHandleDereference vector_structure_check;
@@ -3377,17 +3341,29 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Mov(vector_register, vector);
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ Mov(slot_register, Smi::FromInt(index));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object())
.is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).Is(x0));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3505,7 +3481,8 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3657,9 +3634,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3712,13 +3689,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// LoadIC expects name and receiver in registers.
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- }
-
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL,
- instr->hydrogen()->initialization_state()).code();
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@@ -4754,6 +4729,8 @@ void LCodeGen::DoPushArguments(LPushArguments* instr) {
// The preamble was done by LPreparePushArguments.
args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
+
+ RecordPushedArgumentsDelta(instr->ArgumentCount());
}
@@ -5137,14 +5114,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
SmiCheck check_needed =
instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context,
- target.offset(),
- value,
- scratch,
- GetLinkRegisterState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
+ __ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
+ scratch, GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed);
}
__ Bind(&skip_assignment);
}
@@ -5221,7 +5193,8 @@ void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -5322,6 +5295,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@@ -5329,6 +5306,91 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = x0;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ B(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ Cmp(ToRegister(current_capacity), Operand(constant_key));
+ __ B(le, deferred->entry());
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ Cmp(ToRegister(key), Operand(constant_capacity));
+ __ B(ge, deferred->entry());
+ } else {
+ __ Cmp(ToRegister(key), ToRegister(current_capacity));
+ __ B(ge, deferred->entry());
+ }
+
+ __ Mov(result, ToRegister(instr->elements()));
+
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = x0;
+ __ Mov(result, 0);
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ __ Move(result, ToRegister(instr->object()));
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
+ } else {
+ __ Mov(x3, ToRegister(key));
+ __ SmiTag(x3);
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
+}
+
+
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Representation representation = instr->representation();
@@ -5433,10 +5495,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- StoreIC::initialize_stub(isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state());
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5548,7 +5614,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
InlineSmiCheckInfo::EmitNotInlined(masm());
@@ -6054,5 +6121,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
}
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/arm64/lithium-codegen-arm64.h
index 809ed556d0..d73b060cd7 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.h
@@ -28,7 +28,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@@ -37,7 +36,8 @@ class LCodeGen: public LCodeGenBase {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
+ expected_safepoint_kind_(Safepoint::kSimple),
+ pushed_arguments_(0) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -81,7 +81,9 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
Operand ToOperand32(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
+ enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
+ MemOperand ToMemOperand(LOperand* op,
+ StackMode stack_mode = kCanUseStackPointer) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
template <class LI>
@@ -114,6 +116,7 @@ class LCodeGen: public LCodeGenBase {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
@@ -190,6 +193,8 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
@@ -197,7 +202,6 @@ class LCodeGen: public LCodeGenBase {
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -341,7 +345,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
@@ -358,6 +361,15 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
+ // The number of arguments pushed onto the stack, either by this block or by a
+ // predecessor.
+ int pushed_arguments_;
+
+ void RecordPushedArgumentsDelta(int delta) {
+ pushed_arguments_ += delta;
+ DCHECK(pushed_arguments_ >= 0);
+ }
+
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
index d06a37bc4e..7d01f792bc 100644
--- a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
@@ -292,4 +292,5 @@ void LGapResolver::EmitMove(int index) {
moves_[index].Eliminate();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 07e237e0b4..c7d6797416 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -926,8 +926,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
masm_->PushPreamble(size_);
}
- int count = queued_.size();
- int index = 0;
+ size_t count = queued_.size();
+ size_t index = 0;
while (index < count) {
// PushHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly.
@@ -949,8 +949,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
void MacroAssembler::PushPopQueue::PopQueued() {
if (queued_.empty()) return;
- int count = queued_.size();
- int index = 0;
+ size_t count = queued_.size();
+ size_t index = 0;
while (index < count) {
// PopHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly.
@@ -1263,7 +1263,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// system stack pointer (csp).
DCHECK(csp.Is(StackPointer()));
- MemOperand tos(csp, -2 * kXRegSize, PreIndex);
+ MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@@ -3928,6 +3928,7 @@ void MacroAssembler::GetNumberHash(Register key, Register scratch) {
Add(key, key, scratch);
// hash = hash ^ (hash >> 16);
Eor(key, key, Operand(key, LSR, 16));
+ Bic(key, key, Operand(0xc0000000u));
}
@@ -4693,7 +4694,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Check that the function's map is the same as the expected cached map.
Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
- size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
Ldr(scratch2, FieldMemOperand(scratch1, offset));
Cmp(map_in_out, scratch2);
B(ne, no_map_match);
@@ -5115,7 +5116,8 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
// 'check' in the other bits. The possible offset is limited in that we
// use BitField to pack the data, and the underlying data type is a
// uint32_t.
- uint32_t delta = __ InstructionsGeneratedSince(smi_check);
+ uint32_t delta =
+ static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
__ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
} else {
DCHECK(!smi_check->is_bound());
@@ -5136,9 +5138,10 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
// 32-bit values.
DCHECK(is_uint32(payload));
if (payload != 0) {
- int reg_code = RegisterBits::decode(payload);
+ uint32_t payload32 = static_cast<uint32_t>(payload);
+ int reg_code = RegisterBits::decode(payload32);
reg_ = Register::XRegFromCode(reg_code);
- uint64_t smi_check_delta = DeltaBits::decode(payload);
+ int smi_check_delta = DeltaBits::decode(payload32);
DCHECK(smi_check_delta != 0);
smi_check_ = inline_data->preceding(smi_check_delta);
}
@@ -5149,6 +5152,7 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 1160c40bf6..7854ff0e52 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -886,8 +886,8 @@ class MacroAssembler : public Assembler {
template<typename Field>
void DecodeField(Register dst, Register src) {
- static const uint64_t shift = Field::kShift;
- static const uint64_t setbits = CountSetBits(Field::kMask, 32);
+ static const int shift = Field::kShift;
+ static const int setbits = CountSetBits(Field::kMask, 32);
Ubfx(dst, src, shift, setbits);
}
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
index 4315fd6a45..801cc1359b 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
@@ -1611,6 +1611,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
index da6b26b925..ae4393f7ac 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
@@ -20,6 +20,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
RegExpMacroAssemblerARM64(Isolate* isolate, Zone* zone, Mode mode,
int registers_to_save);
virtual ~RegExpMacroAssemblerARM64();
+ virtual void AbortedCodeGeneration() { masm_->AbortedCodeGeneration(); }
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 819a89765d..29d3ea2419 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -490,7 +490,7 @@ class Redirection {
static Redirection* FromHltInstruction(Instruction* redirect_call) {
char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
char* addr_of_redirection =
- addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
+ addr_of_hlt - offsetof(Redirection, redirect_call_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
@@ -500,6 +500,14 @@ class Redirection {
return redirection->external_function<void*>();
}
+ static void DeleteChain(Redirection* redirection) {
+ while (redirection != nullptr) {
+ Redirection* next = redirection->next_;
+ delete redirection;
+ redirection = next;
+ }
+ }
+
private:
void* external_function_;
Instruction redirect_call_;
@@ -508,6 +516,12 @@ class Redirection {
};
+// static
+void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+ Redirection::DeleteChain(first);
+}
+
+
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair structure.
@@ -903,10 +917,11 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
return static_cast<unsignedT>(value) >> amount;
case ASR:
return value >> amount;
- case ROR:
+ case ROR: {
+ unsignedT mask = (static_cast<unsignedT>(1) << amount) - 1;
return (static_cast<unsignedT>(value) >> amount) |
- ((value & ((1L << amount) - 1L)) <<
- (sizeof(unsignedT) * 8 - amount));
+ ((value & mask) << (sizeof(mask) * 8 - amount));
+ }
default:
UNIMPLEMENTED();
return 0;
@@ -1399,7 +1414,8 @@ void Simulator::VisitAddSubShifted(Instruction* instr) {
int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
AddSubHelper(instr, op2);
} else {
- int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
+ int32_t op2 = static_cast<int32_t>(
+ ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount));
AddSubHelper(instr, op2);
}
}
@@ -1410,7 +1426,7 @@ void Simulator::VisitAddSubImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
AddSubHelper<int64_t>(instr, op2);
} else {
- AddSubHelper<int32_t>(instr, op2);
+ AddSubHelper<int32_t>(instr, static_cast<int32_t>(op2));
}
}
@@ -1457,7 +1473,7 @@ void Simulator::VisitLogicalImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
LogicalHelper<int64_t>(instr, instr->ImmLogical());
} else {
- LogicalHelper<int32_t>(instr, instr->ImmLogical());
+ LogicalHelper<int32_t>(instr, static_cast<int32_t>(instr->ImmLogical()));
}
}
@@ -1879,7 +1895,7 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
// Get the shifted immediate.
int64_t shift = instr->ShiftMoveWide() * 16;
- int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
+ int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
// Compute the new value.
switch (mov_op) {
@@ -1912,25 +1928,32 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
void Simulator::VisitConditionalSelect(Instruction* instr) {
+ uint64_t new_val = xreg(instr->Rn());
if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
- uint64_t new_val = xreg(instr->Rm());
+ new_val = xreg(instr->Rm());
switch (instr->Mask(ConditionalSelectMask)) {
- case CSEL_w: set_wreg(instr->Rd(), new_val); break;
- case CSEL_x: set_xreg(instr->Rd(), new_val); break;
- case CSINC_w: set_wreg(instr->Rd(), new_val + 1); break;
- case CSINC_x: set_xreg(instr->Rd(), new_val + 1); break;
- case CSINV_w: set_wreg(instr->Rd(), ~new_val); break;
- case CSINV_x: set_xreg(instr->Rd(), ~new_val); break;
- case CSNEG_w: set_wreg(instr->Rd(), -new_val); break;
- case CSNEG_x: set_xreg(instr->Rd(), -new_val); break;
+ case CSEL_w:
+ case CSEL_x:
+ break;
+ case CSINC_w:
+ case CSINC_x:
+ new_val++;
+ break;
+ case CSINV_w:
+ case CSINV_x:
+ new_val = ~new_val;
+ break;
+ case CSNEG_w:
+ case CSNEG_x:
+ new_val = -new_val;
+ break;
default: UNIMPLEMENTED();
}
+ }
+ if (instr->SixtyFourBits()) {
+ set_xreg(instr->Rd(), new_val);
} else {
- if (instr->SixtyFourBits()) {
- set_xreg(instr->Rd(), xreg(instr->Rn()));
- } else {
- set_wreg(instr->Rd(), wreg(instr->Rn()));
- }
+ set_wreg(instr->Rd(), static_cast<uint32_t>(new_val));
}
}
@@ -1940,13 +1963,27 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
unsigned src = instr->Rn();
switch (instr->Mask(DataProcessing1SourceMask)) {
- case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break;
- case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break;
- case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
- case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
- case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
- case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
- case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
+ case RBIT_w:
+ set_wreg(dst, ReverseBits(wreg(src)));
+ break;
+ case RBIT_x:
+ set_xreg(dst, ReverseBits(xreg(src)));
+ break;
+ case REV16_w:
+ set_wreg(dst, ReverseBytes(wreg(src), 1));
+ break;
+ case REV16_x:
+ set_xreg(dst, ReverseBytes(xreg(src), 1));
+ break;
+ case REV_w:
+ set_wreg(dst, ReverseBytes(wreg(src), 2));
+ break;
+ case REV32_x:
+ set_xreg(dst, ReverseBytes(xreg(src), 2));
+ break;
+ case REV_x:
+ set_xreg(dst, ReverseBytes(xreg(src), 3));
+ break;
case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
break;
case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
@@ -1964,44 +2001,6 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
}
-uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
- DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
- uint64_t result = 0;
- for (unsigned i = 0; i < num_bits; i++) {
- result = (result << 1) | (value & 1);
- value >>= 1;
- }
- return result;
-}
-
-
-uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
- // Split the 64-bit value into an 8-bit array, where b[0] is the least
- // significant byte, and b[7] is the most significant.
- uint8_t bytes[8];
- uint64_t mask = 0xff00000000000000UL;
- for (int i = 7; i >= 0; i--) {
- bytes[i] = (value & mask) >> (i * 8);
- mask >>= 8;
- }
-
- // Permutation tables for REV instructions.
- // permute_table[Reverse16] is used by REV16_x, REV16_w
- // permute_table[Reverse32] is used by REV32_x, REV_w
- // permute_table[Reverse64] is used by REV_x
- DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
- static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
- {4, 5, 6, 7, 0, 1, 2, 3},
- {0, 1, 2, 3, 4, 5, 6, 7} };
- uint64_t result = 0;
- for (int i = 0; i < 8; i++) {
- result <<= 8;
- result |= bytes[permute_table[mode][i]];
- }
- return result;
-}
-
-
template <typename T>
void Simulator::DataProcessing2Source(Instruction* instr) {
Shift shift_op = NO_SHIFT;
@@ -2121,7 +2120,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), result);
} else {
- set_wreg(instr->Rd(), result);
+ set_wreg(instr->Rd(), static_cast<int32_t>(result));
}
}
@@ -2138,8 +2137,9 @@ void Simulator::BitfieldHelper(Instruction* instr) {
mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
: static_cast<T>(-1);
} else {
- mask = ((1L << (S + 1)) - 1);
- mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
+ uint64_t umask = ((1L << (S + 1)) - 1);
+ umask = (umask >> R) | (umask << (reg_size - R));
+ mask = static_cast<T>(umask);
diff += reg_size;
}
@@ -2563,7 +2563,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// Bail out early for zero inputs.
if (mantissa == 0) {
- return sign << sign_offset;
+ return static_cast<T>(sign << sign_offset);
}
// If all bits in the exponent are set, the value is infinite or NaN.
@@ -2580,9 +2580,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// FPTieEven rounding mode handles overflows using infinities.
exponent = infinite_exponent;
mantissa = 0;
- return (sign << sign_offset) |
- (exponent << exponent_offset) |
- (mantissa << mantissa_offset);
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset));
}
// Calculate the shift required to move the top mantissa bit to the proper
@@ -2605,7 +2605,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// non-zero result after rounding.
if (shift > (highest_significant_bit + 1)) {
// The result will always be +/-0.0.
- return sign << sign_offset;
+ return static_cast<T>(sign << sign_offset);
}
// Properly encode the exponent for a subnormal output.
@@ -2624,9 +2624,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
- T result = (sign << sign_offset) |
- (exponent << exponent_offset) |
- ((mantissa >> shift) << mantissa_offset);
+ T result =
+ static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset));
// A very large mantissa can overflow during rounding. If this happens, the
// exponent should be incremented and the mantissa set to 1.0 (encoded as
@@ -2641,9 +2641,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// We have to shift the mantissa to the left (or not at all). The input
// mantissa is exactly representable in the output mantissa, so apply no
// rounding correction.
- return (sign << sign_offset) |
- (exponent << exponent_offset) |
- ((mantissa << -shift) << mantissa_offset);
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset));
}
}
@@ -2838,7 +2838,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
uint32_t sign = raw >> 63;
uint32_t exponent = (1 << 8) - 1;
- uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
+ uint32_t payload =
+ static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw));
payload |= (1 << 22); // Force a quiet NaN.
return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
@@ -2859,7 +2860,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
// Extract the IEEE-754 double components.
uint32_t sign = raw >> 63;
// Extract the exponent and remove the IEEE-754 encoding bias.
- int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
+ int32_t exponent =
+ static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023;
// Extract the mantissa and add the implicit '1' bit.
uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
if (std::fpclassify(value) == FP_NORMAL) {
@@ -3210,11 +3212,11 @@ void Simulator::VisitSystem(Instruction* instr) {
case MSR: {
switch (instr->ImmSystemRegister()) {
case NZCV:
- nzcv().SetRawValue(xreg(instr->Rt()));
+ nzcv().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(NZCV);
break;
case FPCR:
- fpcr().SetRawValue(xreg(instr->Rt()));
+ fpcr().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(FPCR);
break;
default: UNIMPLEMENTED();
@@ -3835,6 +3837,7 @@ void Simulator::DoPrintf(Instruction* instr) {
#endif // USE_SIMULATOR
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 108f6f2b54..64fceb3451 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -72,12 +72,6 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR)
-enum ReverseByteMode {
- Reverse16 = 0,
- Reverse32 = 1,
- Reverse64 = 2
-};
-
// The proper way to initialize a simulated system register (such as NZCV) is as
// follows:
@@ -169,6 +163,8 @@ class Simulator : public DecoderVisitor {
static void Initialize(Isolate* isolate);
+ static void TearDown(HashMap* i_cache, Redirection* first);
+
static Simulator* current(v8::internal::Isolate* isolate);
class CallArgument;
@@ -706,9 +702,6 @@ class Simulator : public DecoderVisitor {
template <typename T>
void BitfieldHelper(Instruction* instr);
- uint64_t ReverseBits(uint64_t value, unsigned num_bits);
- uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
-
template <typename T>
T FPDefaultNaN() const;
@@ -884,10 +877,10 @@ class Simulator : public DecoderVisitor {
FUNCTION_ADDR(entry), \
p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->CallRegExp( \
- entry, \
- p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ static_cast<int>( \
+ Simulator::current(Isolate::Current()) \
+ ->CallRegExp(entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
// The simulator has its own stack. Thus it has a different stack limit from
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
index dbfb87638b..1cd9785417 100644
--- a/deps/v8/src/arm64/utils-arm64.cc
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -74,7 +74,7 @@ int CountSetBits(uint64_t value, int width) {
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
- return value;
+ return static_cast<int>(value);
}
@@ -89,6 +89,7 @@ int MaskToBit(uint64_t mask) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
index c22ed9aed7..eee614d288 100644
--- a/deps/v8/src/arm64/utils-arm64.h
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -61,6 +61,49 @@ uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
+template <typename T>
+T ReverseBits(T value) {
+ DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
+ (sizeof(value) == 8));
+ T result = 0;
+ for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
+template <typename T>
+T ReverseBytes(T value, int block_bytes_log2) {
+ DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
+ DCHECK((1U << block_bytes_log2) <= sizeof(value));
+ // Split the 64-bit value into an 8-bit array, where b[0] is the least
+ // significant byte, and b[7] is the most significant.
+ uint8_t bytes[8];
+ uint64_t mask = 0xff00000000000000;
+ for (int i = 7; i >= 0; i--) {
+ bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
+ mask >>= 8;
+ }
+
+ // Permutation tables for REV instructions.
+ // permute_table[0] is used by REV16_x, REV16_w
+ // permute_table[1] is used by REV32_x, REV_w
+ // permute_table[2] is used by REV_x
+ DCHECK((0 < block_bytes_log2) && (block_bytes_log2 < 4));
+ static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
+ {4, 5, 6, 7, 0, 1, 2, 3},
+ {0, 1, 2, 3, 4, 5, 6, 7}};
+ T result = 0;
+ for (int i = 0; i < 8; i++) {
+ result <<= 8;
+ result |= bytes[permute_table[block_bytes_log2 - 1][i]];
+ }
+ return result;
+}
+
+
// NaN tests.
inline bool IsSignallingNaN(double num) {
uint64_t raw = double_to_rawbits(num);
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
index 24bf7e5c9a..8efabe50e3 100644
--- a/deps/v8/src/array-iterator.js
+++ b/deps/v8/src/array-iterator.js
@@ -2,17 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $iteratorCreateResultObject;
var $arrayValues;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
var GlobalArray = global.Array;
-var GlobalObject = global.Object;
macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array)
@@ -122,19 +120,19 @@ function ArrayKeys() {
}
-%FunctionSetPrototype(ArrayIterator, new GlobalObject());
+%FunctionSetPrototype(ArrayIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
-$installFunctions(ArrayIterator.prototype, DONT_ENUM, [
+utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
'next', ArrayIteratorNext
]);
-$setFunctionName(ArrayIteratorIterator, symbolIterator);
+utils.SetFunctionName(ArrayIteratorIterator, symbolIterator);
%AddNamedProperty(ArrayIterator.prototype, symbolIterator,
ArrayIteratorIterator, DONT_ENUM);
%AddNamedProperty(ArrayIterator.prototype, symbolToStringTag,
"Array Iterator", READ_ONLY | DONT_ENUM);
-$installFunctions(GlobalArray.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
// No 'values' since it breaks webcompat: http://crbug.com/409858
'entries', ArrayEntries,
'keys', ArrayKeys
@@ -153,7 +151,13 @@ endmacro
TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
-$iteratorCreateResultObject = CreateIteratorResultObject;
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.ArrayIteratorCreateResultObject = CreateIteratorResultObject;
+});
+
$arrayValues = ArrayValues;
})
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index dc0b65fc1d..7baabf8361 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
var $arrayConcat;
-var $arrayJoin;
var $arrayPush;
var $arrayPop;
var $arrayShift;
@@ -11,13 +10,34 @@ var $arraySlice;
var $arraySplice;
var $arrayUnshift;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalArray = global.Array;
+var InternalArray = utils.InternalArray;
+var InternalPackedArray = utils.InternalPackedArray;
+
+var Delete;
+var MathMin;
+var ObjectHasOwnProperty;
+var ObjectIsFrozen;
+var ObjectIsSealed;
+var ObjectToString;
+
+utils.Import(function(from) {
+ Delete = from.Delete;
+ MathMin = from.MathMin;
+ ObjectHasOwnProperty = from.ObjectHasOwnProperty;
+ ObjectIsFrozen = from.ObjectIsFrozen;
+ ObjectIsSealed = from.ObjectIsSealed;
+ ObjectToString = from.ObjectToString;
+});
// -------------------------------------------------------------------
@@ -223,7 +243,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = start_i; i < limit; ++i) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
- %AddElement(deleted_elements, i - start_i, current, NONE);
+ %AddElement(deleted_elements, i - start_i, current);
}
}
} else {
@@ -234,7 +254,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
if (key >= start_i) {
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
- %AddElement(deleted_elements, key - start_i, current, NONE);
+ %AddElement(deleted_elements, key - start_i, current);
}
}
}
@@ -251,7 +271,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
var new_array = new InternalArray(
// Clamp array length to 2^32-1 to avoid early RangeError.
- $min(len - del_count + num_additional_args, 0xffffffff));
+ MathMin(len - del_count + num_additional_args, 0xffffffff));
var big_indices;
var indices = %GetArrayKeys(array, len);
if (IS_NUMBER(indices)) {
@@ -283,7 +303,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
if (!IS_UNDEFINED(current) || key in array) {
var new_key = key - del_count + num_additional_args;
new_array[new_key] = current;
- if (new_key > 0xffffffff) {
+ if (new_key > 0xfffffffe) {
big_indices = big_indices || new InternalArray();
big_indices.push(new_key);
}
@@ -316,7 +336,7 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
var current = array[index];
// The spec requires [[DefineOwnProperty]] here, %AddElement is close
// enough (in that it ignores the prototype).
- %AddElement(deleted_elements, i, current, NONE);
+ %AddElement(deleted_elements, i, current);
}
}
}
@@ -372,26 +392,27 @@ function ArrayToString() {
func = array.join;
}
if (!IS_SPEC_FUNCTION(func)) {
- return %_CallFunction(array, $objectToString);
+ return %_CallFunction(array, ObjectToString);
}
return %_CallFunction(array, func);
}
-function ArrayToLocaleString() {
- var array = $toObject(this);
- var arrayLen = array.length;
- var len = TO_UINT32(arrayLen);
+function InnerArrayToLocaleString(array, length) {
+ var len = TO_UINT32(length);
if (len === 0) return "";
return Join(array, len, ',', ConvertToLocaleString);
}
-function ArrayJoin(separator) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
+function ArrayToLocaleString() {
+ var array = $toObject(this);
+ var arrayLen = array.length;
+ return InnerArrayToLocaleString(array, arrayLen);
+}
- var array = TO_OBJECT_INLINE(this);
- var length = TO_UINT32(array.length);
+
+function InnerArrayJoin(separator, array, length) {
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@@ -413,6 +434,16 @@ function ArrayJoin(separator) {
}
+function ArrayJoin(separator) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
+
+ var array = TO_OBJECT_INLINE(this);
+ var length = TO_UINT32(array.length);
+
+ return InnerArrayJoin(separator, array, length);
+}
+
+
function ObservedArrayPop(n) {
n--;
var value = this[n];
@@ -447,7 +478,7 @@ function ArrayPop() {
n--;
var value = array[n];
- $delete(array, $toName(n), true);
+ Delete(array, n, true);
array.length = n;
return value;
}
@@ -557,18 +588,7 @@ function SparseReverse(array, len) {
}
-function ArrayReverse() {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
-
- var array = TO_OBJECT_INLINE(this);
- var len = TO_UINT32(array.length);
-
- if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
- %NormalizeElements(array);
- SparseReverse(array, len);
- return array;
- }
-
+function InnerArrayReverse(array, len) {
var j = len - 1;
for (var i = 0; i < j; i++, j--) {
var current_i = array[i];
@@ -593,6 +613,22 @@ function ArrayReverse() {
}
+function ArrayReverse() {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
+
+ var array = TO_OBJECT_INLINE(this);
+ var len = TO_UINT32(array.length);
+
+ if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
+ %NormalizeElements(array);
+ SparseReverse(array, len);
+ return array;
+ }
+
+ return InnerArrayReverse(array, len);
+}
+
+
function ObservedArrayShift(len) {
var first = this[0];
@@ -620,7 +656,7 @@ function ArrayShift() {
return;
}
- if ($objectIsSealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
+ if (ObjectIsSealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
if (%IsObserved(array))
return ObservedArrayShift.call(array, len);
@@ -671,7 +707,7 @@ function ArrayUnshift(arg1) { // length == 1
var num_arguments = %_ArgumentsLength();
if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
- !$objectIsSealed(array)) {
+ !ObjectIsSealed(array)) {
SparseMove(array, 0, 0, len, num_arguments);
} else {
SimpleMove(array, 0, 0, len, num_arguments);
@@ -817,9 +853,9 @@ function ArraySplice(start, delete_count) {
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
- if (del_count != num_elements_to_add && $objectIsSealed(array)) {
+ if (del_count != num_elements_to_add && ObjectIsSealed(array)) {
throw MakeTypeError(kArrayFunctionsOnSealed);
- } else if (del_count > 0 && $objectIsFrozen(array)) {
+ } else if (del_count > 0 && ObjectIsFrozen(array)) {
throw MakeTypeError(kArrayFunctionsOnFrozen);
}
@@ -854,9 +890,7 @@ function ArraySplice(start, delete_count) {
}
-function ArraySort(comparefn) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
-
+function InnerArraySort(length, comparefn) {
// In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency.
@@ -1101,7 +1135,6 @@ function ArraySort(comparefn) {
return first_undefined;
};
- var length = TO_UINT32(this.length);
if (length < 2) return this;
var is_array = IS_ARRAY(this);
@@ -1140,17 +1173,19 @@ function ArraySort(comparefn) {
}
-// The following functions cannot be made efficient on sparse arrays while
-// preserving the semantics, since the calls to the receiver function can add
-// or delete elements from the array.
-function ArrayFilter(f, receiver) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
+function ArraySort(comparefn) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
var array = $toObject(this);
- var length = $toUint32(array.length);
+ var length = TO_UINT32(array.length);
+ return %_CallFunction(array, length, comparefn, InnerArraySort);
+}
+
+// The following functions cannot be made efficient on sparse arrays while
+// preserving the semantics, since the calls to the receiver function can add
+// or delete elements from the array.
+function InnerArrayFilter(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@@ -1159,7 +1194,6 @@ function ArrayFilter(f, receiver) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
- var result = new GlobalArray();
var accumulator = new InternalArray();
var accumulator_length = 0;
var is_array = IS_ARRAY(array);
@@ -1175,19 +1209,23 @@ function ArrayFilter(f, receiver) {
}
}
}
- %MoveArrayContents(accumulator, result);
- return result;
+ return accumulator;
}
-
-function ArrayForEach(f, receiver) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
+function ArrayFilter(f, receiver) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
- var length = TO_UINT32(array.length);
+ var length = $toUint32(array.length);
+ var accumulator = InnerArrayFilter(f, receiver, array, length);
+ var result = new GlobalArray();
+ %MoveArrayContents(accumulator, result);
+ return result;
+}
+function InnerArrayForEach(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@@ -1209,17 +1247,18 @@ function ArrayForEach(f, receiver) {
}
}
-
-// Executes the function once for each element present in the
-// array until it finds one where callback returns true.
-function ArraySome(f, receiver) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.some");
+function ArrayForEach(f, receiver) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = TO_UINT32(array.length);
+ InnerArrayForEach(f, receiver, array, length);
+}
+
+function InnerArraySome(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@@ -1243,14 +1282,20 @@ function ArraySome(f, receiver) {
}
-function ArrayEvery(f, receiver) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.every");
+// Executes the function once for each element present in the
+// array until it finds one where callback returns true.
+function ArraySome(f, receiver) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.some");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = TO_UINT32(array.length);
+ return InnerArraySome(f, receiver, array, length);
+}
+
+function InnerArrayEvery(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@@ -1273,15 +1318,18 @@ function ArrayEvery(f, receiver) {
return true;
}
-
-function ArrayMap(f, receiver) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
+function ArrayEvery(f, receiver) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.every");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = TO_UINT32(array.length);
+ return InnerArrayEvery(f, receiver, array, length);
+}
+
+function InnerArrayMap(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@@ -1290,7 +1338,6 @@ function ArrayMap(f, receiver) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
- var result = new GlobalArray();
var accumulator = new InternalArray(length);
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@@ -1303,15 +1350,29 @@ function ArrayMap(f, receiver) {
accumulator[i] = %_CallFunction(new_receiver, element, i, array, f);
}
}
+ return accumulator;
+}
+
+
+function ArrayMap(f, receiver) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
+
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
+ var array = $toObject(this);
+ var length = TO_UINT32(array.length);
+ var accumulator = InnerArrayMap(f, receiver, array, length);
+ var result = new GlobalArray();
%MoveArrayContents(accumulator, result);
return result;
}
-function ArrayIndexOf(element, index) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
-
- var length = TO_UINT32(this.length);
+// For .indexOf, we don't need to pass in the number of arguments
+// at the callsite since ToInteger(undefined) == 0; however, for
+// .lastIndexOf, we need to pass it, since the behavior for passing
+// undefined is 0 but for not including the argument is length-1.
+function InnerArrayIndexOf(element, index, length) {
if (length == 0) return -1;
if (IS_UNDEFINED(index)) {
index = 0;
@@ -1365,12 +1426,17 @@ function ArrayIndexOf(element, index) {
}
-function ArrayLastIndexOf(element, index) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
+function ArrayIndexOf(element, index) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
var length = TO_UINT32(this.length);
+ return %_CallFunction(this, element, index, length, InnerArrayIndexOf);
+}
+
+
+function InnerArrayLastIndexOf(element, index, length, argumentsLength) {
if (length == 0) return -1;
- if (%_ArgumentsLength() < 2) {
+ if (argumentsLength < 2) {
index = length - 1;
} else {
index = TO_INTEGER(index);
@@ -1418,21 +1484,23 @@ function ArrayLastIndexOf(element, index) {
}
-function ArrayReduce(callback, current) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduce");
+function ArrayLastIndexOf(element, index) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
+
+ var length = TO_UINT32(this.length);
+ return %_CallFunction(this, element, index, length,
+ %_ArgumentsLength(), InnerArrayLastIndexOf);
+}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = $toObject(this);
- var length = $toUint32(array.length);
+function InnerArrayReduce(callback, current, array, length, argumentsLength) {
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError(kCalledNonCallable, callback);
}
var is_array = IS_ARRAY(array);
var i = 0;
- find_initial: if (%_ArgumentsLength() < 2) {
+ find_initial: if (argumentsLength < 2) {
for (; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
current = array[i++];
@@ -1455,21 +1523,27 @@ function ArrayReduce(callback, current) {
}
-function ArrayReduceRight(callback, current) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduceRight");
+function ArrayReduce(callback, current) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduce");
- // Pull out the length so that side effects are visible before the
- // callback function is checked.
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = $toUint32(array.length);
+ return InnerArrayReduce(callback, current, array, length,
+ %_ArgumentsLength());
+}
+
+function InnerArrayReduceRight(callback, current, array, length,
+ argumentsLength) {
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError(kCalledNonCallable, callback);
}
var is_array = IS_ARRAY(array);
var i = length - 1;
- find_initial: if (%_ArgumentsLength() < 2) {
+ find_initial: if (argumentsLength < 2) {
for (; i >= 0; i--) {
if (HAS_INDEX(array, i, is_array)) {
current = array[i--];
@@ -1491,6 +1565,18 @@ function ArrayReduceRight(callback, current) {
return current;
}
+
+function ArrayReduceRight(callback, current) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduceRight");
+
+ // Pull out the length so that side effects are visible before the
+ // callback function is checked.
+ var array = $toObject(this);
+ var length = $toUint32(array.length);
+ return InnerArrayReduceRight(callback, current, array, length,
+ %_ArgumentsLength());
+}
+
// ES5, 15.4.3.2
function ArrayIsArray(obj) {
return IS_ARRAY(obj);
@@ -1519,7 +1605,7 @@ var unscopables = {
DONT_ENUM | READ_ONLY);
// Set up non-enumerable functions on the Array object.
-$installFunctions(GlobalArray, DONT_ENUM, [
+utils.InstallFunctions(GlobalArray, DONT_ENUM, [
"isArray", ArrayIsArray
]);
@@ -1540,7 +1626,7 @@ var getFunction = function(name, jsBuiltin, len) {
// set their names.
// Manipulate the length of some of the functions to meet
// expectations set by ECMA-262 or Mozilla.
-$installFunctions(GlobalArray.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"toString", getFunction("toString", ArrayToString),
"toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
"join", getFunction("join", ArrayJoin),
@@ -1569,7 +1655,7 @@ $installFunctions(GlobalArray.prototype, DONT_ENUM, [
// The internal Array prototype doesn't need to be fancy, since it's never
// exposed to user code.
// Adding only the functions that are actually used.
-$setUpLockedPrototype(InternalArray, GlobalArray(), [
+utils.SetUpLockedPrototype(InternalArray, GlobalArray(), [
"concat", getFunction("concat", ArrayConcatJS),
"indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
@@ -1579,15 +1665,36 @@ $setUpLockedPrototype(InternalArray, GlobalArray(), [
"splice", getFunction("splice", ArraySplice)
]);
-$setUpLockedPrototype(InternalPackedArray, GlobalArray(), [
+utils.SetUpLockedPrototype(InternalPackedArray, GlobalArray(), [
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush),
"shift", getFunction("shift", ArrayShift)
]);
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.ArrayIndexOf = ArrayIndexOf;
+ to.ArrayJoin = ArrayJoin;
+ to.ArrayToString = ArrayToString;
+ to.InnerArrayEvery = InnerArrayEvery;
+ to.InnerArrayFilter = InnerArrayFilter;
+ to.InnerArrayForEach = InnerArrayForEach;
+ to.InnerArrayIndexOf = InnerArrayIndexOf;
+ to.InnerArrayJoin = InnerArrayJoin;
+ to.InnerArrayLastIndexOf = InnerArrayLastIndexOf;
+ to.InnerArrayMap = InnerArrayMap;
+ to.InnerArrayReduce = InnerArrayReduce;
+ to.InnerArrayReduceRight = InnerArrayReduceRight;
+ to.InnerArrayReverse = InnerArrayReverse;
+ to.InnerArraySome = InnerArraySome;
+ to.InnerArraySort = InnerArraySort;
+ to.InnerArrayToLocaleString = InnerArrayToLocaleString;
+});
+
$arrayConcat = ArrayConcatJS;
-$arrayJoin = ArrayJoin;
$arrayPush = ArrayPush;
$arrayPop = ArrayPop;
$arrayShift = ArrayShift;
@@ -1595,4 +1702,4 @@ $arraySlice = ArraySlice;
$arraySplice = ArraySplice;
$arrayUnshift = ArrayUnshift;
-})
+});
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
index 3e6e084a7b..9657b9e376 100644
--- a/deps/v8/src/arraybuffer.js
+++ b/deps/v8/src/arraybuffer.js
@@ -2,21 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalObject = global.Object;
+var MathMax;
+var MathMin;
+
+utils.Import(function(from) {
+ MathMax = from.MathMax;
+ MathMin = from.MathMin;
+});
+
// -------------------------------------------------------------------
function ArrayBufferConstructor(length) { // length = 1
if (%_IsConstructCall()) {
var byteLength = $toPositiveInteger(length, kInvalidArrayBufferLength);
- %ArrayBufferInitialize(this, byteLength);
+ %ArrayBufferInitialize(this, byteLength, kNotShared);
} else {
throw MakeTypeError(kConstructorNotFunction, "ArrayBuffer");
}
@@ -44,16 +55,16 @@ function ArrayBufferSlice(start, end) {
var first;
var byte_length = %_ArrayBufferGetByteLength(this);
if (relativeStart < 0) {
- first = $max(byte_length + relativeStart, 0);
+ first = MathMax(byte_length + relativeStart, 0);
} else {
- first = $min(relativeStart, byte_length);
+ first = MathMin(relativeStart, byte_length);
}
var relativeEnd = IS_UNDEFINED(end) ? byte_length : end;
var fin;
if (relativeEnd < 0) {
- fin = $max(byte_length + relativeEnd, 0);
+ fin = MathMax(byte_length + relativeEnd, 0);
} else {
- fin = $min(relativeEnd, byte_length);
+ fin = MathMin(relativeEnd, byte_length);
}
if (fin < first) {
@@ -83,13 +94,13 @@ function ArrayBufferIsViewJS(obj) {
%AddNamedProperty(GlobalArrayBuffer.prototype,
symbolToStringTag, "ArrayBuffer", DONT_ENUM | READ_ONLY);
-$installGetter(GlobalArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLen);
+utils.InstallGetter(GlobalArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLen);
-$installFunctions(GlobalArrayBuffer, DONT_ENUM, [
+utils.InstallFunctions(GlobalArrayBuffer, DONT_ENUM, [
"isView", ArrayBufferIsViewJS
]);
-$installFunctions(GlobalArrayBuffer.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalArrayBuffer.prototype, DONT_ENUM, [
"slice", ArrayBufferSlice
]);
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 1464074b89..e6aaa914bf 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -135,7 +135,7 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
predictable_code_size_(false),
// We may use the assembler without an isolate.
serializer_enabled_(isolate && isolate->serializer_enabled()),
- ool_constant_pool_available_(false) {
+ constant_pool_available_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
@@ -881,6 +881,8 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
if (id != Deoptimizer::kNotDeoptimizationEntry) {
os << " (deoptimization bailout " << id << ")";
}
+ } else if (IsConstPool(rmode_)) {
+ os << " (size " << static_cast<int>(data_) << ")";
}
os << "\n";
@@ -1615,26 +1617,227 @@ bool PositionsRecorder::WriteRecordedPositions() {
EnsureSpace ensure_space(assembler_);
assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION,
state_.current_statement_position);
+ state_.written_position = state_.current_statement_position;
+ state_.written_statement_position = state_.current_statement_position;
written = true;
}
- state_.written_statement_position = state_.current_statement_position;
// Write the position if it is different from what was written last time and
// also different from the statement position that was just written.
- if (state_.current_position != state_.written_position &&
- (state_.current_position != state_.written_statement_position ||
- !written)) {
+ if (state_.current_position != state_.written_position) {
EnsureSpace ensure_space(assembler_);
assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
+ state_.written_position = state_.current_position;
written = true;
}
- state_.written_position = state_.current_position;
// Return whether something was written.
return written;
}
+ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
+ int double_reach_bits) {
+ info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
+ info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
+ info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
+}
+
+
+ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
+ ConstantPoolEntry::Type type) const {
+ const PerTypeEntryInfo& info = info_[type];
+
+ if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
+
+ int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
+ int dbl_offset = dbl_count * kDoubleSize;
+ int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
+ int ptr_offset = ptr_count * kPointerSize + dbl_offset;
+
+ if (type == ConstantPoolEntry::DOUBLE) {
+ // Double overflow detection must take into account the reach for both types
+ int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
+ if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
+ (ptr_count > 0 &&
+ !is_uintn(ptr_offset + kDoubleSize - kPointerSize, ptr_reach_bits))) {
+ return ConstantPoolEntry::OVERFLOWED;
+ }
+ } else {
+ DCHECK(type == ConstantPoolEntry::INTPTR);
+ if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
+ return ConstantPoolEntry::OVERFLOWED;
+ }
+ }
+
+ return ConstantPoolEntry::REGULAR;
+}
+
+
+ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
+ ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
+ DCHECK(!emitted_label_.is_bound());
+ PerTypeEntryInfo& info = info_[type];
+ const int entry_size = ConstantPoolEntry::size(type);
+ bool merged = false;
+
+ if (entry.sharing_ok()) {
+ // Try to merge entries
+ std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
+ int end = static_cast<int>(info.shared_entries.size());
+ for (int i = 0; i < end; i++, it++) {
+ if ((entry_size == kPointerSize) ? entry.value() == it->value()
+ : entry.value64() == it->value64()) {
+ // Merge with found entry.
+ entry.set_merged_index(i);
+ merged = true;
+ break;
+ }
+ }
+ }
+
+ // By definition, merged entries have regular access.
+ DCHECK(!merged || entry.merged_index() < info.regular_count);
+ ConstantPoolEntry::Access access =
+ (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
+
+ // Enforce an upper bound on search time by limiting the search to
+ // unique sharable entries which fit in the regular section.
+ if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
+ info.shared_entries.push_back(entry);
+ } else {
+ info.entries.push_back(entry);
+ }
+
+ // We're done if we found a match or have already triggered the
+ // overflow state.
+ if (merged || info.overflow()) return access;
+
+ if (access == ConstantPoolEntry::REGULAR) {
+ info.regular_count++;
+ } else {
+ info.overflow_start = static_cast<int>(info.entries.size()) - 1;
+ }
+
+ return access;
+}
+
+
+void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
+ ConstantPoolEntry::Type type) {
+ PerTypeEntryInfo& info = info_[type];
+ std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
+ const int entry_size = ConstantPoolEntry::size(type);
+ int base = emitted_label_.pos();
+ DCHECK(base > 0);
+ int shared_end = static_cast<int>(shared_entries.size());
+ std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
+ for (int i = 0; i < shared_end; i++, shared_it++) {
+ int offset = assm->pc_offset() - base;
+ shared_it->set_offset(offset); // Save offset for merged entries.
+ if (entry_size == kPointerSize) {
+ assm->dp(shared_it->value());
+ } else {
+ assm->dq(shared_it->value64());
+ }
+ DCHECK(is_uintn(offset, info.regular_reach_bits));
+
+ // Patch load sequence with correct offset.
+ assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
+ ConstantPoolEntry::REGULAR, type);
+ }
+}
+
+
+void ConstantPoolBuilder::EmitGroup(Assembler* assm,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ PerTypeEntryInfo& info = info_[type];
+ const bool overflow = info.overflow();
+ std::vector<ConstantPoolEntry>& entries = info.entries;
+ std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
+ const int entry_size = ConstantPoolEntry::size(type);
+ int base = emitted_label_.pos();
+ DCHECK(base > 0);
+ int begin;
+ int end;
+
+ if (access == ConstantPoolEntry::REGULAR) {
+ // Emit any shared entries first
+ EmitSharedEntries(assm, type);
+ }
+
+ if (access == ConstantPoolEntry::REGULAR) {
+ begin = 0;
+ end = overflow ? info.overflow_start : static_cast<int>(entries.size());
+ } else {
+ DCHECK(access == ConstantPoolEntry::OVERFLOWED);
+ if (!overflow) return;
+ begin = info.overflow_start;
+ end = static_cast<int>(entries.size());
+ }
+
+ std::vector<ConstantPoolEntry>::iterator it = entries.begin();
+ if (begin > 0) std::advance(it, begin);
+ for (int i = begin; i < end; i++, it++) {
+ // Update constant pool if necessary and get the entry's offset.
+ int offset;
+ ConstantPoolEntry::Access entry_access;
+ if (!it->is_merged()) {
+ // Emit new entry
+ offset = assm->pc_offset() - base;
+ entry_access = access;
+ if (entry_size == kPointerSize) {
+ assm->dp(it->value());
+ } else {
+ assm->dq(it->value64());
+ }
+ } else {
+ // Retrieve offset from shared entry.
+ offset = shared_entries[it->merged_index()].offset();
+ entry_access = ConstantPoolEntry::REGULAR;
+ }
+
+ DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
+ is_uintn(offset, info.regular_reach_bits));
+
+ // Patch load sequence with correct offset.
+ assm->PatchConstantPoolAccessInstruction(it->position(), offset,
+ entry_access, type);
+ }
+}
+
+
+// Emit and return position of pool. Zero implies no constant pool.
+int ConstantPoolBuilder::Emit(Assembler* assm) {
+ bool emitted = emitted_label_.is_bound();
+ bool empty = IsEmpty();
+
+ if (!emitted) {
+ // Mark start of constant pool. Align if necessary.
+ if (!empty) assm->DataAlign(kDoubleSize);
+ assm->bind(&emitted_label_);
+ if (!empty) {
+ // Emit in groups based on access and type.
+ // Emit doubles first for alignment purposes.
+ EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
+ EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
+ if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
+ assm->DataAlign(kDoubleSize);
+ EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
+ ConstantPoolEntry::DOUBLE);
+ }
+ if (info_[ConstantPoolEntry::INTPTR].overflow()) {
+ EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
+ ConstantPoolEntry::INTPTR);
+ }
+ }
+ }
+
+ return !empty ? emitted_label_.pos() : 0;
+}
+
+
// Platform specific but identical code for all the platforms.
@@ -1665,8 +1868,16 @@ void Assembler::RecordJSReturn() {
void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
}
-} } // namespace v8::internal
+
+
+void Assembler::DataAlign(int m) {
+ DCHECK(m >= 2 && base::bits::IsPowerOfTwo32(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ db(0);
+ }
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index fd66e0bfdb..fb59ceb7bd 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -79,11 +79,11 @@ class AssemblerBase: public Malloced {
return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
}
- bool is_ool_constant_pool_available() const {
- if (FLAG_enable_ool_constant_pool) {
- return ool_constant_pool_available_;
+ bool is_constant_pool_available() const {
+ if (FLAG_enable_embedded_constant_pool) {
+ return constant_pool_available_;
} else {
- // Out-of-line constant pool not supported on this architecture.
+ // Embedded constant pool not supported on this architecture.
UNREACHABLE();
return false;
}
@@ -108,11 +108,11 @@ class AssemblerBase: public Malloced {
int buffer_size_;
bool own_buffer_;
- void set_ool_constant_pool_available(bool available) {
- if (FLAG_enable_ool_constant_pool) {
- ool_constant_pool_available_ = available;
+ void set_constant_pool_available(bool available) {
+ if (FLAG_enable_embedded_constant_pool) {
+ constant_pool_available_ = available;
} else {
- // Out-of-line constant pool not supported on this architecture.
+ // Embedded constant pool not supported on this architecture.
UNREACHABLE();
}
}
@@ -130,7 +130,7 @@ class AssemblerBase: public Malloced {
// Indicates whether the constant pool can be accessed, which is only possible
// if the pp register points to the current code object's constant pool.
- bool ool_constant_pool_available_;
+ bool constant_pool_available_;
// Constant pool.
friend class FrameAndConstantPoolScope;
@@ -413,9 +413,6 @@ class RelocInfo {
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
}
- RelocInfo(byte* pc, double data64)
- : pc_(pc), rmode_(NONE64), data64_(data64), host_(NULL) {
- }
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE &&
@@ -487,22 +484,11 @@ class RelocInfo {
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
- // Returns true if the first RelocInfo has the same mode and raw data as the
- // second one.
- static inline bool IsEqual(RelocInfo first, RelocInfo second) {
- return first.rmode() == second.rmode() &&
- (first.rmode() == RelocInfo::NONE64 ?
- first.raw_data64() == second.raw_data64() :
- first.data() == second.data());
- }
-
// Accessors
byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
- double data64() const { return data64_; }
- uint64_t raw_data64() { return bit_cast<uint64_t>(data64_); }
Code* host() const { return host_; }
void set_host(Code* host) { host_ = host; }
@@ -645,10 +631,7 @@ class RelocInfo {
// comment).
byte* pc_;
Mode rmode_;
- union {
- intptr_t data_;
- double data64_;
- };
+ intptr_t data_;
Code* host_;
// External-reference pointers are also split across instruction-pairs
// on some platforms, but are accessed via indirect pointers. This location
@@ -1102,30 +1085,11 @@ class PositionsRecorder BASE_EMBEDDED {
// Currently jit_handler_data_ is used to store JITHandler-specific data
// over the lifetime of a PositionsRecorder
void* jit_handler_data_;
- friend class PreservePositionScope;
DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
};
-class PreservePositionScope BASE_EMBEDDED {
- public:
- explicit PreservePositionScope(PositionsRecorder* positions_recorder)
- : positions_recorder_(positions_recorder),
- saved_state_(positions_recorder->state_) {}
-
- ~PreservePositionScope() {
- positions_recorder_->state_ = saved_state_;
- }
-
- private:
- PositionsRecorder* positions_recorder_;
- const PositionState saved_state_;
-
- DISALLOW_COPY_AND_ASSIGN(PreservePositionScope);
-};
-
-
// -----------------------------------------------------------------------------
// Utility functions
@@ -1167,6 +1131,126 @@ class NullCallWrapper : public CallWrapper {
};
+// -----------------------------------------------------------------------------
+// Constant pool support
+
+class ConstantPoolEntry {
+ public:
+ ConstantPoolEntry() {}
+ ConstantPoolEntry(int position, intptr_t value, bool sharing_ok)
+ : position_(position),
+ merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
+ value_(value) {}
+ ConstantPoolEntry(int position, double value)
+ : position_(position), merged_index_(SHARING_ALLOWED), value64_(value) {}
+
+ int position() const { return position_; }
+ bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
+ bool is_merged() const { return merged_index_ >= 0; }
+ int merged_index(void) const {
+ DCHECK(is_merged());
+ return merged_index_;
+ }
+ void set_merged_index(int index) {
+ merged_index_ = index;
+ DCHECK(is_merged());
+ }
+ int offset(void) const {
+ DCHECK(merged_index_ >= 0);
+ return merged_index_;
+ }
+ void set_offset(int offset) {
+ DCHECK(offset >= 0);
+ merged_index_ = offset;
+ }
+ intptr_t value() const { return value_; }
+ uint64_t value64() const { return bit_cast<uint64_t>(value64_); }
+
+ enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
+
+ static int size(Type type) {
+ return (type == INTPTR) ? kPointerSize : kDoubleSize;
+ }
+
+ enum Access { REGULAR, OVERFLOWED };
+
+ private:
+ int position_;
+ int merged_index_;
+ union {
+ intptr_t value_;
+ double value64_;
+ };
+ enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
+};
+
+
+// -----------------------------------------------------------------------------
+// Embedded constant pool support
+
+class ConstantPoolBuilder BASE_EMBEDDED {
+ public:
+ ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
+
+ // Add pointer-sized constant to the embedded constant pool
+ ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
+ bool sharing_ok) {
+ ConstantPoolEntry entry(position, value, sharing_ok);
+ return AddEntry(entry, ConstantPoolEntry::INTPTR);
+ }
+
+ // Add double constant to the embedded constant pool
+ ConstantPoolEntry::Access AddEntry(int position, double value) {
+ ConstantPoolEntry entry(position, value);
+ return AddEntry(entry, ConstantPoolEntry::DOUBLE);
+ }
+
+ // Previews the access type required for the next new entry to be added.
+ ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
+
+ bool IsEmpty() {
+ return info_[ConstantPoolEntry::INTPTR].entries.empty() &&
+ info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
+ info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
+ info_[ConstantPoolEntry::DOUBLE].shared_entries.empty();
+ }
+
+ // Emit the constant pool. Invoke only after all entries have been
+ // added and all instructions have been emitted.
+ // Returns position of the emitted pool (zero implies no constant pool).
+ int Emit(Assembler* assm);
+
+ // Returns the label associated with the start of the constant pool.
+ // Linking to this label in the function prologue may provide an
+ // efficient means of constant pool pointer register initialization
+ // on some architectures.
+ inline Label* EmittedPosition() { return &emitted_label_; }
+
+ private:
+ ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry,
+ ConstantPoolEntry::Type type);
+ void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
+ void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type);
+
+ struct PerTypeEntryInfo {
+ PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {}
+ bool overflow() const {
+ return (overflow_start >= 0 &&
+ overflow_start < static_cast<int>(entries.size()));
+ }
+ int regular_reach_bits;
+ int regular_count;
+ int overflow_start;
+ std::vector<ConstantPoolEntry> entries;
+ std::vector<ConstantPoolEntry> shared_entries;
+ };
+
+ Label emitted_label_; // Records pc_offset of emitted pool
+ PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
+};
+
+
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/ast-literal-reindexer.cc b/deps/v8/src/ast-literal-reindexer.cc
new file mode 100644
index 0000000000..50729be251
--- /dev/null
+++ b/deps/v8/src/ast-literal-reindexer.cc
@@ -0,0 +1,311 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/ast-literal-reindexer.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+void AstLiteralReindexer::VisitVariableDeclaration(VariableDeclaration* node) {
+ VisitVariableProxy(node->proxy());
+}
+
+
+void AstLiteralReindexer::VisitExportDeclaration(ExportDeclaration* node) {
+ VisitVariableProxy(node->proxy());
+}
+
+
+void AstLiteralReindexer::VisitEmptyStatement(EmptyStatement* node) {}
+
+
+void AstLiteralReindexer::VisitContinueStatement(ContinueStatement* node) {}
+
+
+void AstLiteralReindexer::VisitBreakStatement(BreakStatement* node) {}
+
+
+void AstLiteralReindexer::VisitDebuggerStatement(DebuggerStatement* node) {}
+
+
+void AstLiteralReindexer::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* node) {}
+
+
+void AstLiteralReindexer::VisitLiteral(Literal* node) {}
+
+
+void AstLiteralReindexer::VisitRegExpLiteral(RegExpLiteral* node) {
+ UpdateIndex(node);
+}
+
+
+void AstLiteralReindexer::VisitVariableProxy(VariableProxy* node) {}
+
+
+void AstLiteralReindexer::VisitThisFunction(ThisFunction* node) {}
+
+
+void AstLiteralReindexer::VisitSuperPropertyReference(
+ SuperPropertyReference* node) {
+ Visit(node->this_var());
+ Visit(node->home_object());
+}
+
+
+void AstLiteralReindexer::VisitSuperCallReference(SuperCallReference* node) {
+ Visit(node->this_var());
+ Visit(node->new_target_var());
+ Visit(node->this_function_var());
+}
+
+
+void AstLiteralReindexer::VisitImportDeclaration(ImportDeclaration* node) {
+ VisitVariableProxy(node->proxy());
+}
+
+
+void AstLiteralReindexer::VisitExpressionStatement(ExpressionStatement* node) {
+ Visit(node->expression());
+}
+
+
+void AstLiteralReindexer::VisitReturnStatement(ReturnStatement* node) {
+ Visit(node->expression());
+}
+
+
+void AstLiteralReindexer::VisitYield(Yield* node) {
+ Visit(node->generator_object());
+ Visit(node->expression());
+}
+
+
+void AstLiteralReindexer::VisitThrow(Throw* node) { Visit(node->exception()); }
+
+
+void AstLiteralReindexer::VisitUnaryOperation(UnaryOperation* node) {
+ Visit(node->expression());
+}
+
+
+void AstLiteralReindexer::VisitCountOperation(CountOperation* node) {
+ Visit(node->expression());
+}
+
+
+void AstLiteralReindexer::VisitBlock(Block* node) {
+ VisitStatements(node->statements());
+}
+
+
+void AstLiteralReindexer::VisitFunctionDeclaration(FunctionDeclaration* node) {
+ VisitVariableProxy(node->proxy());
+ VisitFunctionLiteral(node->fun());
+}
+
+
+void AstLiteralReindexer::VisitCallRuntime(CallRuntime* node) {
+ VisitArguments(node->arguments());
+}
+
+
+void AstLiteralReindexer::VisitWithStatement(WithStatement* node) {
+ Visit(node->expression());
+ Visit(node->statement());
+}
+
+
+void AstLiteralReindexer::VisitDoWhileStatement(DoWhileStatement* node) {
+ Visit(node->body());
+ Visit(node->cond());
+}
+
+
+void AstLiteralReindexer::VisitWhileStatement(WhileStatement* node) {
+ Visit(node->cond());
+ Visit(node->body());
+}
+
+
+void AstLiteralReindexer::VisitTryCatchStatement(TryCatchStatement* node) {
+ Visit(node->try_block());
+ Visit(node->catch_block());
+}
+
+
+void AstLiteralReindexer::VisitTryFinallyStatement(TryFinallyStatement* node) {
+ Visit(node->try_block());
+ Visit(node->finally_block());
+}
+
+
+void AstLiteralReindexer::VisitProperty(Property* node) {
+ Visit(node->key());
+ Visit(node->obj());
+}
+
+
+void AstLiteralReindexer::VisitAssignment(Assignment* node) {
+ Visit(node->target());
+ Visit(node->value());
+}
+
+
+void AstLiteralReindexer::VisitBinaryOperation(BinaryOperation* node) {
+ Visit(node->left());
+ Visit(node->right());
+}
+
+
+void AstLiteralReindexer::VisitCompareOperation(CompareOperation* node) {
+ Visit(node->left());
+ Visit(node->right());
+}
+
+
+void AstLiteralReindexer::VisitSpread(Spread* node) {
+ Visit(node->expression());
+}
+
+
+void AstLiteralReindexer::VisitForInStatement(ForInStatement* node) {
+ Visit(node->each());
+ Visit(node->enumerable());
+ Visit(node->body());
+}
+
+
+void AstLiteralReindexer::VisitForOfStatement(ForOfStatement* node) {
+ Visit(node->assign_iterator());
+ Visit(node->next_result());
+ Visit(node->result_done());
+ Visit(node->assign_each());
+ Visit(node->body());
+}
+
+
+void AstLiteralReindexer::VisitConditional(Conditional* node) {
+ Visit(node->condition());
+ Visit(node->then_expression());
+ Visit(node->else_expression());
+}
+
+
+void AstLiteralReindexer::VisitIfStatement(IfStatement* node) {
+ Visit(node->condition());
+ Visit(node->then_statement());
+ if (node->HasElseStatement()) {
+ Visit(node->else_statement());
+ }
+}
+
+
+void AstLiteralReindexer::VisitSwitchStatement(SwitchStatement* node) {
+ Visit(node->tag());
+ ZoneList<CaseClause*>* cases = node->cases();
+ for (int i = 0; i < cases->length(); i++) {
+ VisitCaseClause(cases->at(i));
+ }
+}
+
+
+void AstLiteralReindexer::VisitCaseClause(CaseClause* node) {
+ if (!node->is_default()) Visit(node->label());
+ VisitStatements(node->statements());
+}
+
+
+void AstLiteralReindexer::VisitForStatement(ForStatement* node) {
+ if (node->init() != NULL) Visit(node->init());
+ if (node->cond() != NULL) Visit(node->cond());
+ if (node->next() != NULL) Visit(node->next());
+ Visit(node->body());
+}
+
+
+void AstLiteralReindexer::VisitClassLiteral(ClassLiteral* node) {
+ if (node->extends()) Visit(node->extends());
+ if (node->constructor()) Visit(node->constructor());
+ if (node->class_variable_proxy()) {
+ VisitVariableProxy(node->class_variable_proxy());
+ }
+ for (int i = 0; i < node->properties()->length(); i++) {
+ VisitObjectLiteralProperty(node->properties()->at(i));
+ }
+}
+
+
+void AstLiteralReindexer::VisitObjectLiteral(ObjectLiteral* node) {
+ UpdateIndex(node);
+ for (int i = 0; i < node->properties()->length(); i++) {
+ VisitObjectLiteralProperty(node->properties()->at(i));
+ }
+}
+
+
+void AstLiteralReindexer::VisitObjectLiteralProperty(
+ ObjectLiteralProperty* node) {
+ Visit(node->key());
+ Visit(node->value());
+}
+
+
+void AstLiteralReindexer::VisitArrayLiteral(ArrayLiteral* node) {
+ UpdateIndex(node);
+ for (int i = 0; i < node->values()->length(); i++) {
+ Visit(node->values()->at(i));
+ }
+}
+
+
+void AstLiteralReindexer::VisitCall(Call* node) {
+ Visit(node->expression());
+ VisitArguments(node->arguments());
+}
+
+
+void AstLiteralReindexer::VisitCallNew(CallNew* node) {
+ Visit(node->expression());
+ VisitArguments(node->arguments());
+}
+
+
+void AstLiteralReindexer::VisitStatements(ZoneList<Statement*>* statements) {
+ if (statements == NULL) return;
+ for (int i = 0; i < statements->length(); i++) {
+ Visit(statements->at(i));
+ }
+}
+
+
+void AstLiteralReindexer::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
+ for (int i = 0; i < declarations->length(); i++) {
+ Visit(declarations->at(i));
+ }
+}
+
+
+void AstLiteralReindexer::VisitArguments(ZoneList<Expression*>* arguments) {
+ for (int i = 0; i < arguments->length(); i++) {
+ Visit(arguments->at(i));
+ }
+}
+
+
+void AstLiteralReindexer::VisitFunctionLiteral(FunctionLiteral* node) {
+ // We don't recurse into the declarations or body of the function literal:
+}
+
+
+void AstLiteralReindexer::Reindex(Expression* pattern) {
+ pattern->Accept(this);
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ast-literal-reindexer.h b/deps/v8/src/ast-literal-reindexer.h
new file mode 100644
index 0000000000..59b214fecd
--- /dev/null
+++ b/deps/v8/src/ast-literal-reindexer.h
@@ -0,0 +1,46 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_LITERAL_REINDEXER
+#define V8_AST_LITERAL_REINDEXER
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+
+class AstLiteralReindexer final : public AstVisitor {
+ public:
+ AstLiteralReindexer() : AstVisitor(), next_index_(0) {}
+
+ int count() const { return next_index_; }
+ void Reindex(Expression* pattern);
+
+ private:
+#define DEFINE_VISIT(type) virtual void Visit##type(type* node) override;
+ AST_NODE_LIST(DEFINE_VISIT)
+#undef DEFINE_VISIT
+
+ void VisitStatements(ZoneList<Statement*>* statements) override;
+ void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitArguments(ZoneList<Expression*>* arguments);
+ void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
+
+ void UpdateIndex(MaterializedLiteral* literal) {
+ literal->literal_index_ = next_index_++;
+ }
+
+ void Visit(AstNode* node) override { node->Accept(this); }
+
+ int next_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(AstLiteralReindexer);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_AST_LITERAL_REINDEXER
diff --git a/deps/v8/src/ast-numbering.cc b/deps/v8/src/ast-numbering.cc
index 35b8cef4f1..151cc8abc1 100644
--- a/deps/v8/src/ast-numbering.cc
+++ b/deps/v8/src/ast-numbering.cc
@@ -18,7 +18,7 @@ class AstNumberingVisitor final : public AstVisitor {
: AstVisitor(),
next_id_(BailoutId::FirstUsable().ToInt()),
properties_(zone),
- ic_slot_cache_(FLAG_vector_ics ? 4 : 0),
+ ic_slot_cache_(zone),
dont_optimize_reason_(kNoReason) {
InitializeAstVisitor(isolate, zone);
}
@@ -52,10 +52,12 @@ class AstNumberingVisitor final : public AstVisitor {
dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
- void DisableCaching(BailoutReason reason) {
+ void DisableCrankshaft(BailoutReason reason) {
+ if (FLAG_turbo_shipping) {
+ return properties_.flags()->Add(kDontCrankshaft);
+ }
dont_optimize_reason_ = reason;
DisableSelfOptimization();
- properties_.flags()->Add(kDontCache);
}
template <typename Node>
@@ -71,10 +73,8 @@ class AstNumberingVisitor final : public AstVisitor {
node->SetFirstFeedbackICSlot(FeedbackVectorICSlot(ic_slots),
&ic_slot_cache_);
properties_.increase_ic_slots(reqs.ic_slots());
- if (FLAG_vector_ics) {
- for (int i = 0; i < reqs.ic_slots(); i++) {
- properties_.SetKind(ic_slots + i, node->FeedbackICSlotKind(i));
- }
+ for (int i = 0; i < reqs.ic_slots(); i++) {
+ properties_.SetKind(ic_slots + i, node->FeedbackICSlotKind(i));
}
}
}
@@ -83,8 +83,7 @@ class AstNumberingVisitor final : public AstVisitor {
int next_id_;
AstProperties properties_;
- // The slot cache allows us to reuse certain vector IC slots. It's only used
- // if FLAG_vector_ics is true.
+ // The slot cache allows us to reuse certain vector IC slots.
ICSlotCache ic_slot_cache_;
BailoutReason dont_optimize_reason_;
@@ -151,7 +150,7 @@ void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
IncrementNodeCount();
if (node->var()->IsLookupSlot()) {
- DisableOptimization(kReferenceToAVariableWhichRequiresDynamicLookup);
+ DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
}
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
@@ -164,12 +163,23 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
}
-void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
+void AstNumberingVisitor::VisitSuperPropertyReference(
+ SuperPropertyReference* node) {
IncrementNodeCount();
DisableOptimization(kSuperReference);
- ReserveFeedbackSlots(node);
- node->set_base_id(ReserveIdRange(SuperReference::num_ids()));
+ node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
+ Visit(node->this_var());
+ Visit(node->home_object());
+}
+
+
+void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
+ IncrementNodeCount();
+ DisableOptimization(kSuperReference);
+ node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
Visit(node->this_var());
+ Visit(node->new_target_var());
+ Visit(node->this_function_var());
}
@@ -220,6 +230,7 @@ void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CountOperation::num_ids()));
Visit(node->expression());
+ ReserveFeedbackSlots(node);
}
@@ -252,7 +263,7 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount();
- DisableOptimization(kWithStatement);
+ DisableCrankshaft(kWithStatement);
node->set_base_id(ReserveIdRange(WithStatement::num_ids()));
Visit(node->expression());
Visit(node->statement());
@@ -280,6 +291,7 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
IncrementNodeCount();
DisableOptimization(kTryCatchStatement);
+ node->set_base_id(ReserveIdRange(TryCatchStatement::num_ids()));
Visit(node->try_block());
Visit(node->catch_block());
}
@@ -288,6 +300,7 @@ void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
DisableOptimization(kTryFinallyStatement);
+ node->set_base_id(ReserveIdRange(TryFinallyStatement::num_ids()));
Visit(node->try_block());
Visit(node->finally_block());
}
@@ -308,6 +321,7 @@ void AstNumberingVisitor::VisitAssignment(Assignment* node) {
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
Visit(node->target());
Visit(node->value());
+ ReserveFeedbackSlots(node);
}
@@ -327,29 +341,34 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
}
-void AstNumberingVisitor::VisitSpread(Spread* node) { UNREACHABLE(); }
+void AstNumberingVisitor::VisitSpread(Spread* node) {
+ IncrementNodeCount();
+ DisableOptimization(kSpread);
+ Visit(node->expression());
+}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
- ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
Visit(node->each());
Visit(node->enumerable());
Visit(node->body());
+ ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
- DisableOptimization(kForOfStatement);
+ DisableCrankshaft(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator());
Visit(node->next_result());
Visit(node->result_done());
Visit(node->assign_each());
Visit(node->body());
+ ReserveFeedbackSlots(node);
}
@@ -405,7 +424,7 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
- DisableOptimization(kClassLiteral);
+ DisableCrankshaft(kClassLiteral);
node->set_base_id(ReserveIdRange(node->num_ids()));
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
@@ -415,6 +434,7 @@ void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
VisitObjectLiteralProperty(node->properties()->at(i));
}
+ ReserveFeedbackSlots(node);
}
@@ -424,12 +444,18 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
VisitObjectLiteralProperty(node->properties()->at(i));
}
+ node->BuildConstantProperties(isolate());
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code will be is emitted.
+ node->CalculateEmitStore(zone());
+ ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitObjectLiteralProperty(
ObjectLiteralProperty* node) {
- if (node->is_computed_name()) DisableOptimization(kComputedPropertyName);
+ if (node->is_computed_name()) DisableCrankshaft(kComputedPropertyName);
Visit(node->key());
Visit(node->value());
}
@@ -510,7 +536,7 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
}
if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
- DisableOptimization(kContextAllocatedArguments);
+ DisableCrankshaft(kContextAllocatedArguments);
}
VisitDeclarations(scope->declarations());
@@ -529,5 +555,5 @@ bool AstNumbering::Renumber(Isolate* isolate, Zone* zone,
AstNumberingVisitor visitor(isolate, zone);
return visitor.Renumber(function);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ast-value-factory.cc b/deps/v8/src/ast-value-factory.cc
index 14badacdfe..68cf015200 100644
--- a/deps/v8/src/ast-value-factory.cc
+++ b/deps/v8/src/ast-value-factory.cc
@@ -140,6 +140,7 @@ bool AstValue::BooleanValue() const {
case SYMBOL:
UNREACHABLE();
break;
+ case NUMBER_WITH_DOT:
case NUMBER:
return DoubleToBoolean(number_);
case SMI:
@@ -167,9 +168,15 @@ void AstValue::Internalize(Isolate* isolate) {
DCHECK(!string_->string().is_null());
break;
case SYMBOL:
- DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
- value_ = isolate->factory()->iterator_symbol();
+ if (symbol_name_[0] == 'i') {
+ DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
+ value_ = isolate->factory()->iterator_symbol();
+ } else {
+ DCHECK_EQ(0, strcmp(symbol_name_, "home_object_symbol"));
+ value_ = isolate->factory()->home_object_symbol();
+ }
break;
+ case NUMBER_WITH_DOT:
case NUMBER:
value_ = isolate->factory()->NewNumber(number_, TENURED);
break;
@@ -285,8 +292,8 @@ const AstValue* AstValueFactory::NewSymbol(const char* name) {
}
-const AstValue* AstValueFactory::NewNumber(double number) {
- AstValue* value = new (zone_) AstValue(number);
+const AstValue* AstValueFactory::NewNumber(double number, bool with_dot) {
+ AstValue* value = new (zone_) AstValue(number, with_dot);
if (isolate_) {
value->Internalize(isolate_);
}
@@ -378,4 +385,5 @@ bool AstValueFactory::AstRawStringCompare(void* a, void* b) {
if (rhs->byte_length() != len) return false;
return memcmp(lhs->raw_data(), rhs->raw_data(), len) == 0;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ast-value-factory.h b/deps/v8/src/ast-value-factory.h
index 454a755b79..2fee0396fd 100644
--- a/deps/v8/src/ast-value-factory.h
+++ b/deps/v8/src/ast-value-factory.h
@@ -142,9 +142,11 @@ class AstValue : public ZoneObject {
}
bool IsNumber() const {
- return type_ == NUMBER || type_ == SMI;
+ return type_ == NUMBER || type_ == NUMBER_WITH_DOT || type_ == SMI;
}
+ bool ContainsDot() const { return type_ == NUMBER_WITH_DOT; }
+
const AstRawString* AsString() const {
if (type_ == STRING)
return string_;
@@ -153,7 +155,7 @@ class AstValue : public ZoneObject {
}
double AsNumber() const {
- if (type_ == NUMBER)
+ if (type_ == NUMBER || type_ == NUMBER_WITH_DOT)
return number_;
if (type_ == SMI)
return smi_;
@@ -169,6 +171,8 @@ class AstValue : public ZoneObject {
bool BooleanValue() const;
+ bool IsTheHole() const { return type_ == THE_HOLE; }
+
void Internalize(Isolate* isolate);
// Can be called after Internalize has been called.
@@ -187,6 +191,7 @@ class AstValue : public ZoneObject {
STRING,
SYMBOL,
NUMBER,
+ NUMBER_WITH_DOT,
SMI,
BOOLEAN,
NULL_TYPE,
@@ -198,7 +203,14 @@ class AstValue : public ZoneObject {
explicit AstValue(const char* name) : type_(SYMBOL) { symbol_name_ = name; }
- explicit AstValue(double n) : type_(NUMBER) { number_ = n; }
+ explicit AstValue(double n, bool with_dot) {
+ if (with_dot) {
+ type_ = NUMBER_WITH_DOT;
+ } else {
+ type_ = NUMBER;
+ }
+ number_ = n;
+ }
AstValue(Type t, int i) : type_(t) {
DCHECK(type_ == SMI);
@@ -232,6 +244,7 @@ class AstValue : public ZoneObject {
#define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
+ F(concat_iterable_to_array, "$concatIterableToArray") \
F(constructor, "constructor") \
F(default, "default") \
F(done, "done") \
@@ -250,9 +263,9 @@ class AstValue : public ZoneObject {
F(is_construct_call, "_IsConstructCall") \
F(is_spec_object, "_IsSpecObject") \
F(let, "let") \
- F(make_reference_error, "MakeReferenceErrorEmbedded") \
- F(make_syntax_error, "MakeSyntaxErrorEmbedded") \
- F(make_type_error, "MakeTypeErrorEmbedded") \
+ F(make_reference_error, "MakeReferenceError") \
+ F(make_syntax_error, "MakeSyntaxError") \
+ F(make_type_error, "MakeTypeError") \
F(native, "native") \
F(new_target, "new.target") \
F(next, "next") \
@@ -263,6 +276,7 @@ class AstValue : public ZoneObject {
F(spread_arguments, "$spreadArguments") \
F(spread_iterable, "$spreadIterable") \
F(this, "this") \
+ F(this_function, ".this_function") \
F(throw_iterator_result_not_an_object, "ThrowIteratorResultNotAnObject") \
F(to_string, "$toString") \
F(undefined, "undefined") \
@@ -330,7 +344,7 @@ class AstValueFactory {
const AstValue* NewString(const AstRawString* string);
// A JavaScript symbol (ECMA-262 edition 6).
const AstValue* NewSymbol(const char* name);
- const AstValue* NewNumber(double number);
+ const AstValue* NewNumber(double number, bool with_dot = false);
const AstValue* NewSmi(int number);
const AstValue* NewBoolean(bool b);
const AstValue* NewStringList(ZoneList<const AstRawString*>* strings);
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 5038716003..e52504a86f 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -54,7 +54,7 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
Variable* var = var_proxy->var();
// The global identifier "undefined" is immutable. Everything
// else could be reassigned.
- return var != NULL && var->location() == Variable::UNALLOCATED &&
+ return var != NULL && var->IsUnallocatedOrGlobalSlot() &&
var_proxy->raw_name()->IsOneByteEqualTo("undefined");
}
@@ -95,8 +95,8 @@ void VariableProxy::BindTo(Variable* var) {
void VariableProxy::SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) {
variable_feedback_slot_ = slot;
- if (var()->IsUnallocated()) {
- cache->Add(VariableICSlotPair(var(), slot));
+ if (var()->IsUnallocatedOrGlobalSlot()) {
+ cache->Put(var(), slot);
}
}
@@ -106,13 +106,12 @@ FeedbackVectorRequirements VariableProxy::ComputeFeedbackRequirements(
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
// make their loads from the same IC slot.
- if (var()->IsUnallocated()) {
- for (int i = 0; i < cache->length(); i++) {
- VariableICSlotPair& pair = cache->at(i);
- if (pair.variable() == var()) {
- variable_feedback_slot_ = pair.slot();
- return FeedbackVectorRequirements(0, 0);
- }
+ if (var()->IsUnallocatedOrGlobalSlot()) {
+ ZoneHashMap::Entry* entry = cache->Get(var());
+ if (entry != NULL) {
+ variable_feedback_slot_ = FeedbackVectorICSlot(
+ static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
+ return FeedbackVectorRequirements(0, 0);
}
}
return FeedbackVectorRequirements(0, 1);
@@ -121,16 +120,73 @@ FeedbackVectorRequirements VariableProxy::ComputeFeedbackRequirements(
}
+static int GetStoreICSlots(Expression* expr) {
+ int ic_slots = 0;
+ if (FLAG_vector_stores) {
+ Property* property = expr->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+ if ((assign_type == VARIABLE &&
+ expr->AsVariableProxy()->var()->IsUnallocatedOrGlobalSlot()) ||
+ assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
+ ic_slots++;
+ }
+ }
+ return ic_slots;
+}
+
+
+static Code::Kind GetStoreICKind(Expression* expr) {
+ LhsKind assign_type = Property::GetAssignType(expr->AsProperty());
+ return assign_type == KEYED_PROPERTY ? Code::KEYED_STORE_IC : Code::STORE_IC;
+}
+
+
+FeedbackVectorRequirements ForEachStatement::ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) {
+ int ic_slots = GetStoreICSlots(each());
+ return FeedbackVectorRequirements(0, ic_slots);
+}
+
+
+Code::Kind ForEachStatement::FeedbackICSlotKind(int index) {
+ return GetStoreICKind(each());
+}
+
+
Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
Expression* value, int pos)
: Expression(zone, pos),
- bit_field_(IsUninitializedField::encode(false) |
- KeyTypeField::encode(ELEMENT) |
- StoreModeField::encode(STANDARD_STORE) |
- TokenField::encode(op)),
+ bit_field_(
+ IsUninitializedField::encode(false) | KeyTypeField::encode(ELEMENT) |
+ StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
target_(target),
value_(value),
- binary_operation_(NULL) {}
+ binary_operation_(NULL),
+ slot_(FeedbackVectorICSlot::Invalid()) {}
+
+
+FeedbackVectorRequirements Assignment::ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) {
+ int ic_slots = GetStoreICSlots(target());
+ return FeedbackVectorRequirements(0, ic_slots);
+}
+
+
+Code::Kind Assignment::FeedbackICSlotKind(int index) {
+ return GetStoreICKind(target());
+}
+
+
+FeedbackVectorRequirements CountOperation::ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) {
+ int ic_slots = GetStoreICSlots(expression());
+ return FeedbackVectorRequirements(0, ic_slots);
+}
+
+
+Code::Kind CountOperation::FeedbackICSlotKind(int index) {
+ return GetStoreICKind(expression());
+}
Token::Value Assignment::binary_op() const {
@@ -177,28 +233,10 @@ LanguageMode FunctionLiteral::language_mode() const {
}
-bool FunctionLiteral::uses_super_property() const {
- DCHECK_NOT_NULL(scope());
- return scope()->uses_super_property() || scope()->inner_uses_super_property();
-}
-
-
-// Helper to find an existing shared function info in the baseline code for the
-// given function literal. Used to canonicalize SharedFunctionInfo objects.
-void FunctionLiteral::InitializeSharedInfo(
- Handle<Code> unoptimized_code) {
- for (RelocIterator it(*unoptimized_code); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
- Object* obj = rinfo->target_object();
- if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (shared->start_position() == start_position()) {
- shared_info_ = Handle<SharedFunctionInfo>(shared);
- break;
- }
- }
- }
+bool FunctionLiteral::NeedsHomeObject(Expression* expr) {
+ if (expr == nullptr || !expr->IsFunctionLiteral()) return false;
+ DCHECK_NOT_NULL(expr->AsFunctionLiteral()->scope());
+ return expr->AsFunctionLiteral()->scope()->NeedsHomeObject();
}
@@ -236,6 +274,46 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
+FeedbackVectorRequirements ClassLiteral::ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) {
+ if (!FLAG_vector_stores) return FeedbackVectorRequirements(0, 0);
+
+ // This logic that computes the number of slots needed for vector store
+ // ICs must mirror FullCodeGenerator::VisitClassLiteral.
+ int ic_slots = 0;
+ for (int i = 0; i < properties()->length(); i++) {
+ ObjectLiteral::Property* property = properties()->at(i);
+
+ Expression* value = property->value();
+ if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++;
+ }
+
+ if (scope() != NULL &&
+ class_variable_proxy()->var()->IsUnallocatedOrGlobalSlot()) {
+ ic_slots++;
+ }
+
+#ifdef DEBUG
+ // FullCodeGenerator::VisitClassLiteral verifies that it consumes slot_count_
+ // slots.
+ slot_count_ = ic_slots;
+#endif
+ return FeedbackVectorRequirements(0, ic_slots);
+}
+
+
+FeedbackVectorICSlot ClassLiteral::SlotForHomeObject(Expression* value,
+ int* slot_index) const {
+ if (FLAG_vector_stores && FunctionLiteral::NeedsHomeObject(value)) {
+ DCHECK(slot_index != NULL && *slot_index >= 0 && *slot_index < slot_count_);
+ FeedbackVectorICSlot slot = GetNthSlot(*slot_index);
+ *slot_index += 1;
+ return slot;
+ }
+ return FeedbackVectorICSlot::Invalid();
+}
+
+
bool ObjectLiteral::Property::IsCompileTimeValue() {
return kind_ == CONSTANT ||
(kind_ == MATERIALIZED_LITERAL &&
@@ -253,6 +331,56 @@ bool ObjectLiteral::Property::emit_store() {
}
+FeedbackVectorRequirements ObjectLiteral::ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) {
+ if (!FLAG_vector_stores) return FeedbackVectorRequirements(0, 0);
+
+ // This logic that computes the number of slots needed for vector store
+ // ics must mirror FullCodeGenerator::VisitObjectLiteral.
+ int ic_slots = 0;
+ for (int i = 0; i < properties()->length(); i++) {
+ ObjectLiteral::Property* property = properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Expression* value = property->value();
+ if (property->is_computed_name() &&
+ property->kind() != ObjectLiteral::Property::PROTOTYPE) {
+ if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++;
+ } else if (property->emit_store()) {
+ if (property->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL ||
+ property->kind() == ObjectLiteral::Property::COMPUTED) {
+ Literal* key = property->key()->AsLiteral();
+ if (key->value()->IsInternalizedString()) ic_slots++;
+ if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++;
+ } else if (property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER) {
+ // We might need a slot for the home object.
+ if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++;
+ }
+ }
+ }
+
+#ifdef DEBUG
+ // FullCodeGenerator::VisitObjectLiteral verifies that it consumes slot_count_
+ // slots.
+ slot_count_ = ic_slots;
+#endif
+ return FeedbackVectorRequirements(0, ic_slots);
+}
+
+
+FeedbackVectorICSlot ObjectLiteral::SlotForHomeObject(Expression* value,
+ int* slot_index) const {
+ if (FLAG_vector_stores && FunctionLiteral::NeedsHomeObject(value)) {
+ DCHECK(slot_index != NULL && *slot_index >= 0 && *slot_index < slot_count_);
+ FeedbackVectorICSlot slot = GetNthSlot(*slot_index);
+ *slot_index += 1;
+ return slot;
+ }
+ return FeedbackVectorICSlot::Invalid();
+}
+
+
void ObjectLiteral::CalculateEmitStore(Zone* zone) {
const auto GETTER = ObjectLiteral::Property::GETTER;
const auto SETTER = ObjectLiteral::Property::SETTER;
@@ -378,16 +506,18 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
if (!constant_elements_.is_null()) return;
// Allocate a fixed array to hold all the object literals.
- Handle<JSArray> array =
- isolate->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
- JSArray::Expand(array, values()->length());
+ Handle<JSArray> array = isolate->factory()->NewJSArray(
+ FAST_HOLEY_SMI_ELEMENTS, values()->length(), values()->length(),
+ Strength::WEAK, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
// Fill in the literals.
bool is_simple = true;
int depth_acc = 1;
bool is_holey = false;
- for (int i = 0, n = values()->length(); i < n; i++) {
- Expression* element = values()->at(i);
+ int array_index = 0;
+ for (int n = values()->length(); array_index < n; array_index++) {
+ Expression* element = values()->at(array_index);
+ if (element->IsSpread()) break;
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->BuildConstants(isolate);
@@ -395,23 +525,33 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
depth_acc = m_literal->depth() + 1;
}
}
+
+ // New handle scope here, needs to be after BuildContants().
+ HandleScope scope(isolate);
Handle<Object> boilerplate_value = GetBoilerplateValue(element, isolate);
if (boilerplate_value->IsTheHole()) {
is_holey = true;
- } else if (boilerplate_value->IsUninitialized()) {
+ continue;
+ }
+
+ if (boilerplate_value->IsUninitialized()) {
+ boilerplate_value = handle(Smi::FromInt(0), isolate);
is_simple = false;
- JSObject::SetOwnElement(
- array, i, handle(Smi::FromInt(0), isolate), SLOPPY).Assert();
- } else {
- JSObject::SetOwnElement(array, i, boilerplate_value, SLOPPY).Assert();
}
+
+ JSObject::AddDataElement(array, array_index, boilerplate_value, NONE)
+ .Assert();
}
+ if (array_index != values()->length()) {
+ JSArray::SetLength(array, array_index);
+ }
+ JSObject::ValidateElements(array);
Handle<FixedArrayBase> element_values(array->elements());
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
- if (is_simple && depth_acc == 1 && values()->length() > 0 &&
+ if (is_simple && depth_acc == 1 && array_index > 0 &&
array->HasFastSmiOrObjectElements()) {
element_values->set_map(isolate->heap()->fixed_cow_array_map());
}
@@ -582,7 +722,10 @@ void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
bool Call::IsUsingCallFeedbackICSlot(Isolate* isolate) const {
CallType call_type = GetCallType(isolate);
- if (IsUsingCallFeedbackSlot(isolate) || call_type == POSSIBLY_EVAL_CALL) {
+ if (call_type == POSSIBLY_EVAL_CALL) {
+ return false;
+ }
+ if (call_type == SUPER_CALL && !FLAG_vector_stores) {
return false;
}
return true;
@@ -591,7 +734,7 @@ bool Call::IsUsingCallFeedbackICSlot(Isolate* isolate) const {
bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
// SuperConstructorCall uses a CallConstructStub, which wants
- // a Slot, not an IC slot.
+ // a Slot, in addition to any IC slots requested elsewhere.
return GetCallType(isolate) == SUPER_CALL;
}
@@ -600,8 +743,6 @@ FeedbackVectorRequirements Call::ComputeFeedbackRequirements(
Isolate* isolate, const ICSlotCache* cache) {
int ic_slots = IsUsingCallFeedbackICSlot(isolate) ? 1 : 0;
int slots = IsUsingCallFeedbackSlot(isolate) ? 1 : 0;
- // A Call uses either a slot or an IC slot.
- DCHECK((ic_slots & slots) == 0);
return FeedbackVectorRequirements(slots, ic_slots);
}
@@ -611,14 +752,14 @@ Call::CallType Call::GetCallType(Isolate* isolate) const {
if (proxy != NULL) {
if (proxy->var()->is_possibly_eval(isolate)) {
return POSSIBLY_EVAL_CALL;
- } else if (proxy->var()->IsUnallocated()) {
+ } else if (proxy->var()->IsUnallocatedOrGlobalSlot()) {
return GLOBAL_CALL;
} else if (proxy->var()->IsLookupSlot()) {
return LOOKUP_SLOT_CALL;
}
}
- if (expression()->AsSuperReference() != NULL) return SUPER_CALL;
+ if (expression()->IsSuperCallReference()) return SUPER_CALL;
Property* property = expression()->AsProperty();
return property != NULL ? PROPERTY_CALL : OTHER_CALL;
@@ -1009,4 +1150,5 @@ bool Literal::Match(void* literal1, void* literal2) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index c669756d52..115c59ff80 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -87,7 +87,8 @@ namespace internal {
V(CompareOperation) \
V(Spread) \
V(ThisFunction) \
- V(SuperReference) \
+ V(SuperPropertyReference) \
+ V(SuperCallReference) \
V(CaseClause)
#define AST_NODE_LIST(V) \
@@ -126,9 +127,8 @@ AST_NODE_LIST(DEF_FORWARD_DECLARATION)
// Typedef only introduced to avoid unreadable code.
-// Please do appreciate the required space in "> >".
-typedef ZoneList<Handle<String> > ZoneStringList;
-typedef ZoneList<Handle<Object> > ZoneObjectList;
+typedef ZoneList<Handle<String>> ZoneStringList;
+typedef ZoneList<Handle<Object>> ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \
@@ -137,11 +137,7 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
friend class AstNodeFactory;
-enum AstPropertiesFlag {
- kDontSelfOptimize,
- kDontSoftInline,
- kDontCache
-};
+enum AstPropertiesFlag { kDontSelfOptimize, kDontCrankshaft };
class FeedbackVectorRequirements {
@@ -158,25 +154,29 @@ class FeedbackVectorRequirements {
};
-class VariableICSlotPair final {
+class ICSlotCache {
public:
- VariableICSlotPair(Variable* variable, FeedbackVectorICSlot slot)
- : variable_(variable), slot_(slot) {}
- VariableICSlotPair()
- : variable_(NULL), slot_(FeedbackVectorICSlot::Invalid()) {}
+ explicit ICSlotCache(Zone* zone)
+ : zone_(zone),
+ hash_map_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)) {}
- Variable* variable() const { return variable_; }
- FeedbackVectorICSlot slot() const { return slot_; }
+ void Put(Variable* variable, FeedbackVectorICSlot slot) {
+ ZoneHashMap::Entry* entry = hash_map_.LookupOrInsert(
+ variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
+ entry->value = reinterpret_cast<void*>(slot.ToInt());
+ }
+
+ ZoneHashMap::Entry* Get(Variable* variable) const {
+ return hash_map_.Lookup(variable, ComputePointerHash(variable));
+ }
private:
- Variable* variable_;
- FeedbackVectorICSlot slot_;
+ Zone* zone_;
+ ZoneHashMap hash_map_;
};
-typedef List<VariableICSlotPair> ICSlotCache;
-
-
class AstProperties final BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
@@ -363,19 +363,6 @@ class Expression : public AstNode {
Bounds bounds() const { return bounds_; }
void set_bounds(Bounds bounds) { bounds_ = bounds; }
- // Whether the expression is parenthesized
- bool is_single_parenthesized() const {
- return IsSingleParenthesizedField::decode(bit_field_);
- }
- bool is_multi_parenthesized() const {
- return IsMultiParenthesizedField::decode(bit_field_);
- }
- void increase_parenthesization_level() {
- bit_field_ = IsMultiParenthesizedField::update(bit_field_,
- is_single_parenthesized());
- bit_field_ = IsSingleParenthesizedField::update(bit_field_, true);
- }
-
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
@@ -396,7 +383,7 @@ class Expression : public AstNode {
// TODO(rossberg): this should move to its own AST node eventually.
virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
- byte to_boolean_types() const {
+ uint16_t to_boolean_types() const {
return ToBooleanTypesField::decode(bit_field_);
}
@@ -412,7 +399,7 @@ class Expression : public AstNode {
bounds_(Bounds::Unbounded(zone)),
bit_field_(0) {}
static int parent_num_ids() { return 0; }
- void set_to_boolean_types(byte types) {
+ void set_to_boolean_types(uint16_t types) {
bit_field_ = ToBooleanTypesField::update(bit_field_, types);
}
@@ -426,9 +413,7 @@ class Expression : public AstNode {
int base_id_;
Bounds bounds_;
- class ToBooleanTypesField : public BitField16<byte, 0, 8> {};
- class IsSingleParenthesizedField : public BitField16<bool, 8, 1> {};
- class IsMultiParenthesizedField : public BitField16<bool, 9, 1> {};
+ class ToBooleanTypesField : public BitField16<uint16_t, 0, 9> {};
uint16_t bit_field_;
// Ends with 16-bit field; deriving classes in turn begin with
// 16-bit fields for optimum packing efficiency.
@@ -497,7 +482,7 @@ class Block final : public BreakableStatement {
}
ZoneList<Statement*>* statements() { return &statements_; }
- bool is_initializer_block() const { return is_initializer_block_; }
+ bool ignore_completion_value() const { return ignore_completion_value_; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId DeclsId() const { return BailoutId(local_id(0)); }
@@ -512,10 +497,10 @@ class Block final : public BreakableStatement {
protected:
Block(Zone* zone, ZoneList<const AstRawString*>* labels, int capacity,
- bool is_initializer_block, int pos)
+ bool ignore_completion_value, int pos)
: BreakableStatement(zone, labels, TARGET_FOR_NAMED_ONLY, pos),
statements_(capacity, zone),
- is_initializer_block_(is_initializer_block),
+ ignore_completion_value_(ignore_completion_value),
scope_(NULL) {}
static int parent_num_ids() { return BreakableStatement::num_ids(); }
@@ -523,7 +508,7 @@ class Block final : public BreakableStatement {
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
ZoneList<Statement*> statements_;
- bool is_initializer_block_;
+ bool ignore_completion_value_;
Scope* scope_;
};
@@ -810,13 +795,26 @@ class ForEachStatement : public IterationStatement {
Expression* each() const { return each_; }
Expression* subject() const { return subject_; }
+ FeedbackVectorRequirements ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) override;
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) override {
+ each_slot_ = slot;
+ }
+ Code::Kind FeedbackICSlotKind(int index) override;
+ FeedbackVectorICSlot EachFeedbackSlot() const { return each_slot_; }
+
protected:
ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos), each_(NULL), subject_(NULL) {}
+ : IterationStatement(zone, labels, pos),
+ each_(NULL),
+ subject_(NULL),
+ each_slot_(FeedbackVectorICSlot::Invalid()) {}
private:
Expression* each_;
Expression* subject_;
+ FeedbackVectorICSlot each_slot_;
};
@@ -829,9 +827,12 @@ class ForInStatement final : public ForEachStatement {
}
// Type feedback information.
- virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+ FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate, const ICSlotCache* cache) override {
- return FeedbackVectorRequirements(1, 0);
+ FeedbackVectorRequirements base =
+ ForEachStatement::ComputeFeedbackRequirements(isolate, cache);
+ DCHECK(base.slots() == 0 && base.ic_slots() <= 1);
+ return FeedbackVectorRequirements(1, base.ic_slots());
}
void SetFirstFeedbackSlot(FeedbackVectorSlot slot) override {
for_in_feedback_slot_ = slot;
@@ -1158,18 +1159,29 @@ class IfStatement final : public Statement {
class TryStatement : public Statement {
public:
- int index() const { return index_; }
Block* try_block() const { return try_block_; }
+ void set_base_id(int id) { base_id_ = id; }
+ static int num_ids() { return parent_num_ids() + 1; }
+ BailoutId HandlerId() const { return BailoutId(local_id(0)); }
+
protected:
- TryStatement(Zone* zone, int index, Block* try_block, int pos)
- : Statement(zone, pos), index_(index), try_block_(try_block) {}
+ TryStatement(Zone* zone, Block* try_block, int pos)
+ : Statement(zone, pos),
+ try_block_(try_block),
+ base_id_(BailoutId::None().ToInt()) {}
+ static int parent_num_ids() { return 0; }
+
+ int base_id() const {
+ DCHECK(!BailoutId(base_id_).IsNone());
+ return base_id_;
+ }
private:
- // Unique (per-function) index of this handler. This is not an AST ID.
- int index_;
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Block* try_block_;
+ int base_id_;
};
@@ -1182,18 +1194,12 @@ class TryCatchStatement final : public TryStatement {
Block* catch_block() const { return catch_block_; }
protected:
- TryCatchStatement(Zone* zone,
- int index,
- Block* try_block,
- Scope* scope,
- Variable* variable,
- Block* catch_block,
- int pos)
- : TryStatement(zone, index, try_block, pos),
+ TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
+ Variable* variable, Block* catch_block, int pos)
+ : TryStatement(zone, try_block, pos),
scope_(scope),
variable_(variable),
- catch_block_(catch_block) {
- }
+ catch_block_(catch_block) {}
private:
Scope* scope_;
@@ -1209,10 +1215,9 @@ class TryFinallyStatement final : public TryStatement {
Block* finally_block() const { return finally_block_; }
protected:
- TryFinallyStatement(
- Zone* zone, int index, Block* try_block, Block* finally_block, int pos)
- : TryStatement(zone, index, try_block, pos),
- finally_block_(finally_block) { }
+ TryFinallyStatement(Zone* zone, Block* try_block, Block* finally_block,
+ int pos)
+ : TryStatement(zone, try_block, pos), finally_block_(finally_block) {}
private:
Block* finally_block_;
@@ -1297,6 +1302,8 @@ class Literal final : public Expression {
};
+class AstLiteralReindexer;
+
// Base class for literals that needs space in the corresponding JSFunction.
class MaterializedLiteral : public Expression {
public:
@@ -1310,11 +1317,14 @@ class MaterializedLiteral : public Expression {
return depth_;
}
+ bool is_strong() const { return is_strong_; }
+
protected:
- MaterializedLiteral(Zone* zone, int literal_index, int pos)
+ MaterializedLiteral(Zone* zone, int literal_index, bool is_strong, int pos)
: Expression(zone, pos),
literal_index_(literal_index),
is_simple_(false),
+ is_strong_(is_strong),
depth_(0) {}
// A materialized literal is simple if the values consist of only
@@ -1343,7 +1353,10 @@ class MaterializedLiteral : public Expression {
private:
int literal_index_;
bool is_simple_;
+ bool is_strong_;
int depth_;
+
+ friend class AstLiteralReindexer;
};
@@ -1437,6 +1450,9 @@ class ObjectLiteral final : public MaterializedLiteral {
if (disable_mementos) {
flags |= kDisableMementos;
}
+ if (is_strong()) {
+ flags |= kIsStrong;
+ }
return flags;
}
@@ -1445,7 +1461,8 @@ class ObjectLiteral final : public MaterializedLiteral {
kFastElements = 1,
kHasFunction = 1 << 1,
kShallowProperties = 1 << 2,
- kDisableMementos = 1 << 3
+ kDisableMementos = 1 << 3,
+ kIsStrong = 1 << 4
};
struct Accessors: public ZoneObject {
@@ -1463,16 +1480,44 @@ class ObjectLiteral final : public MaterializedLiteral {
// ObjectLiteral can vary, so num_ids() is not a static method.
int num_ids() const { return parent_num_ids() + 1 + properties()->length(); }
+ // Object literals need one feedback slot for each non-trivial value, as well
+ // as some slots for home objects.
+ FeedbackVectorRequirements ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) override;
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) override {
+ slot_ = slot;
+ }
+ Code::Kind FeedbackICSlotKind(int index) override { return Code::STORE_IC; }
+ FeedbackVectorICSlot GetNthSlot(int n) const {
+ return FeedbackVectorICSlot(slot_.ToInt() + n);
+ }
+
+ // If value needs a home object, returns a valid feedback vector ic slot
+ // given by slot_index, and increments slot_index.
+ FeedbackVectorICSlot SlotForHomeObject(Expression* value,
+ int* slot_index) const;
+
+#ifdef DEBUG
+ int slot_count() const { return slot_count_; }
+#endif
+
protected:
ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
- int boilerplate_properties, bool has_function, int pos)
- : MaterializedLiteral(zone, literal_index, pos),
+ int boilerplate_properties, bool has_function, bool is_strong,
+ int pos)
+ : MaterializedLiteral(zone, literal_index, is_strong, pos),
properties_(properties),
boilerplate_properties_(boilerplate_properties),
fast_elements_(false),
has_elements_(false),
may_store_doubles_(false),
- has_function_(has_function) {}
+ has_function_(has_function),
+#ifdef DEBUG
+ slot_count_(0),
+#endif
+ slot_(FeedbackVectorICSlot::Invalid()) {
+ }
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
private:
@@ -1484,6 +1529,12 @@ class ObjectLiteral final : public MaterializedLiteral {
bool has_elements_;
bool may_store_doubles_;
bool has_function_;
+#ifdef DEBUG
+ // slot_count_ helps validate that the logic to allocate ic slots and the
+ // logic to use them are in sync.
+ int slot_count_;
+#endif
+ FeedbackVectorICSlot slot_;
};
@@ -1497,8 +1548,9 @@ class RegExpLiteral final : public MaterializedLiteral {
protected:
RegExpLiteral(Zone* zone, const AstRawString* pattern,
- const AstRawString* flags, int literal_index, int pos)
- : MaterializedLiteral(zone, literal_index, pos),
+ const AstRawString* flags, int literal_index, bool is_strong,
+ int pos)
+ : MaterializedLiteral(zone, literal_index, is_strong, pos),
pattern_(pattern),
flags_(flags) {
set_depth(1);
@@ -1543,19 +1595,24 @@ class ArrayLiteral final : public MaterializedLiteral {
if (disable_mementos) {
flags |= kDisableMementos;
}
+ if (is_strong()) {
+ flags |= kIsStrong;
+ }
return flags;
}
enum Flags {
kNoFlags = 0,
kShallowElements = 1,
- kDisableMementos = 1 << 1
+ kDisableMementos = 1 << 1,
+ kIsStrong = 1 << 2
};
protected:
ArrayLiteral(Zone* zone, ZoneList<Expression*>* values, int literal_index,
- int pos)
- : MaterializedLiteral(zone, literal_index, pos), values_(values) {}
+ bool is_strong, int pos)
+ : MaterializedLiteral(zone, literal_index, is_strong, pos),
+ values_(values) {}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
private:
@@ -1607,7 +1664,7 @@ class VariableProxy final : public Expression {
void BindTo(Variable* var);
bool UsesVariableFeedbackSlot() const {
- return FLAG_vector_ics && (var()->IsUnallocated() || var()->IsLookupSlot());
+ return var()->IsUnallocatedOrGlobalSlot() || var()->IsLookupSlot();
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
@@ -1621,6 +1678,9 @@ class VariableProxy final : public Expression {
return variable_feedback_slot_;
}
+ static int num_ids() { return parent_num_ids() + 1; }
+ BailoutId BeforeId() const { return BailoutId(local_id(0)); }
+
protected:
VariableProxy(Zone* zone, Variable* var, int start_position,
int end_position);
@@ -1628,6 +1688,8 @@ class VariableProxy final : public Expression {
VariableProxy(Zone* zone, const AstRawString* name,
Variable::Kind variable_kind, int start_position,
int end_position);
+ static int parent_num_ids() { return Expression::num_ids(); }
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
class IsThisField : public BitField8<bool, 0, 1> {};
class IsAssignedField : public BitField8<bool, 1, 1> {};
@@ -1648,6 +1710,17 @@ class VariableProxy final : public Expression {
};
+// Left-hand side can only be a property, a global or a (parameter or local)
+// slot.
+enum LhsKind {
+ VARIABLE,
+ NAMED_PROPERTY,
+ KEYED_PROPERTY,
+ NAMED_SUPER_PROPERTY,
+ KEYED_SUPER_PROPERTY
+};
+
+
class Property final : public Expression {
public:
DECLARE_NODE_TYPE(Property)
@@ -1657,9 +1730,8 @@ class Property final : public Expression {
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
- static int num_ids() { return parent_num_ids() + 2; }
+ static int num_ids() { return parent_num_ids() + 1; }
BailoutId LoadId() const { return BailoutId(local_id(0)); }
- TypeFeedbackId PropertyFeedbackId() { return TypeFeedbackId(local_id(1)); }
bool IsStringAccess() const {
return IsStringAccessField::decode(bit_field_);
@@ -1695,13 +1767,11 @@ class Property final : public Expression {
}
bool is_for_call() const { return IsForCallField::decode(bit_field_); }
- bool IsSuperAccess() {
- return obj()->IsSuperReference();
- }
+ bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate, const ICSlotCache* cache) override {
- return FeedbackVectorRequirements(0, FLAG_vector_ics ? 1 : 0);
+ return FeedbackVectorRequirements(0, 1);
}
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) override {
@@ -1712,10 +1782,18 @@ class Property final : public Expression {
}
FeedbackVectorICSlot PropertyFeedbackSlot() const {
- DCHECK(!FLAG_vector_ics || !property_feedback_slot_.IsInvalid());
+ DCHECK(!property_feedback_slot_.IsInvalid());
return property_feedback_slot_;
}
+ static LhsKind GetAssignType(Property* property) {
+ if (property == NULL) return VARIABLE;
+ bool super_access = property->IsSuperAccess();
+ return (property->key()->IsPropertyName())
+ ? (super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY)
+ : (super_access ? KEYED_SUPER_PROPERTY : KEYED_PROPERTY);
+ }
+
protected:
Property(Zone* zone, Expression* obj, Expression* key, int pos)
: Expression(zone, pos),
@@ -1754,22 +1832,14 @@ class Call final : public Expression {
Isolate* isolate, const ICSlotCache* cache) override;
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) override {
- ic_slot_or_slot_ = slot.ToInt();
- }
- void SetFirstFeedbackSlot(FeedbackVectorSlot slot) override {
- ic_slot_or_slot_ = slot.ToInt();
+ ic_slot_ = slot;
}
+ void SetFirstFeedbackSlot(FeedbackVectorSlot slot) override { slot_ = slot; }
Code::Kind FeedbackICSlotKind(int index) override { return Code::CALL_IC; }
- FeedbackVectorSlot CallFeedbackSlot() const {
- DCHECK(ic_slot_or_slot_ != FeedbackVectorSlot::Invalid().ToInt());
- return FeedbackVectorSlot(ic_slot_or_slot_);
- }
+ FeedbackVectorSlot CallFeedbackSlot() const { return slot_; }
- FeedbackVectorICSlot CallFeedbackICSlot() const {
- DCHECK(ic_slot_or_slot_ != FeedbackVectorICSlot::Invalid().ToInt());
- return FeedbackVectorICSlot(ic_slot_or_slot_);
- }
+ FeedbackVectorICSlot CallFeedbackICSlot() const { return ic_slot_; }
SmallMapList* GetReceiverTypes() override {
if (expression()->IsProperty()) {
@@ -1787,7 +1857,7 @@ class Call final : public Expression {
bool global_call() const {
VariableProxy* proxy = expression_->AsVariableProxy();
- return proxy != NULL && proxy->var()->IsUnallocated();
+ return proxy != NULL && proxy->var()->IsUnallocatedOrGlobalSlot();
}
bool known_global_function() const {
@@ -1807,9 +1877,10 @@ class Call final : public Expression {
allocation_site_ = site;
}
- static int num_ids() { return parent_num_ids() + 2; }
+ static int num_ids() { return parent_num_ids() + 3; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); }
- BailoutId EvalOrLookupId() const { return BailoutId(local_id(1)); }
+ BailoutId EvalId() const { return BailoutId(local_id(1)); }
+ BailoutId LookupId() const { return BailoutId(local_id(2)); }
bool is_uninitialized() const {
return IsUninitializedField::decode(bit_field_);
@@ -1841,7 +1912,8 @@ class Call final : public Expression {
Call(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
int pos)
: Expression(zone, pos),
- ic_slot_or_slot_(FeedbackVectorICSlot::Invalid().ToInt()),
+ ic_slot_(FeedbackVectorICSlot::Invalid()),
+ slot_(FeedbackVectorSlot::Invalid()),
expression_(expression),
arguments_(arguments),
bit_field_(IsUninitializedField::encode(false)) {
@@ -1854,9 +1926,8 @@ class Call final : public Expression {
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- // We store this as an integer because we don't know if we have a slot or
- // an ic slot until scoping time.
- int ic_slot_or_slot_;
+ FeedbackVectorICSlot ic_slot_;
+ FeedbackVectorSlot slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
@@ -1949,9 +2020,7 @@ class CallRuntime final : public Expression {
bool is_jsruntime() const { return function_ == NULL; }
// Type feedback information.
- bool HasCallRuntimeFeedbackSlot() const {
- return FLAG_vector_ics && is_jsruntime();
- }
+ bool HasCallRuntimeFeedbackSlot() const { return is_jsruntime(); }
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate, const ICSlotCache* cache) override {
return FeedbackVectorRequirements(0, HasCallRuntimeFeedbackSlot() ? 1 : 0);
@@ -1969,9 +2038,7 @@ class CallRuntime final : public Expression {
}
static int num_ids() { return parent_num_ids() + 1; }
- TypeFeedbackId CallRuntimeFeedbackId() const {
- return TypeFeedbackId(local_id(0));
- }
+ BailoutId CallId() { return BailoutId(local_id(0)); }
protected:
CallRuntime(Zone* zone, const AstRawString* name,
@@ -2122,16 +2189,25 @@ class CountOperation final : public Expression {
return TypeFeedbackId(local_id(3));
}
+ FeedbackVectorRequirements ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) override;
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) override {
+ slot_ = slot;
+ }
+ Code::Kind FeedbackICSlotKind(int index) override;
+ FeedbackVectorICSlot CountSlot() const { return slot_; }
+
protected:
CountOperation(Zone* zone, Token::Value op, bool is_prefix, Expression* expr,
int pos)
: Expression(zone, pos),
- bit_field_(IsPrefixField::encode(is_prefix) |
- KeyTypeField::encode(ELEMENT) |
- StoreModeField::encode(STANDARD_STORE) |
- TokenField::encode(op)),
+ bit_field_(
+ IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
+ StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
type_(NULL),
- expression_(expr) {}
+ expression_(expr),
+ slot_(FeedbackVectorICSlot::Invalid()) {}
static int parent_num_ids() { return Expression::num_ids(); }
private:
@@ -2148,6 +2224,7 @@ class CountOperation final : public Expression {
Type* type_;
Expression* expression_;
SmallMapList receiver_types_;
+ FeedbackVectorICSlot slot_;
};
@@ -2290,6 +2367,15 @@ class Assignment final : public Expression {
bit_field_ = StoreModeField::update(bit_field_, mode);
}
+ FeedbackVectorRequirements ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) override;
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) override {
+ slot_ = slot;
+ }
+ Code::Kind FeedbackICSlotKind(int index) override;
+ FeedbackVectorICSlot AssignmentSlot() const { return slot_; }
+
protected:
Assignment(Zone* zone, Token::Value op, Expression* target, Expression* value,
int pos);
@@ -2310,6 +2396,7 @@ class Assignment final : public Expression {
Expression* value_;
BinaryOperation* binary_operation_;
SmallMapList receiver_types_;
+ FeedbackVectorICSlot slot_;
};
@@ -2328,22 +2415,8 @@ class Yield final : public Expression {
Expression* expression() const { return expression_; }
Kind yield_kind() const { return yield_kind_; }
- // Delegating yield surrounds the "yield" in a "try/catch". This index
- // locates the catch handler in the handler table, and is equivalent to
- // TryCatchStatement::index().
- int index() const {
- DCHECK_EQ(kDelegating, yield_kind());
- return index_;
- }
- void set_index(int index) {
- DCHECK_EQ(kDelegating, yield_kind());
- index_ = index;
- }
-
// Type feedback information.
- bool HasFeedbackSlots() const {
- return FLAG_vector_ics && (yield_kind() == kDelegating);
- }
+ bool HasFeedbackSlots() const { return yield_kind() == kDelegating; }
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate, const ICSlotCache* cache) override {
return FeedbackVectorRequirements(0, HasFeedbackSlots() ? 3 : 0);
@@ -2374,14 +2447,12 @@ class Yield final : public Expression {
generator_object_(generator_object),
expression_(expression),
yield_kind_(yield_kind),
- index_(-1),
yield_first_feedback_slot_(FeedbackVectorICSlot::Invalid()) {}
private:
Expression* generator_object_;
Expression* expression_;
Kind yield_kind_;
- int index_;
FeedbackVectorICSlot yield_first_feedback_slot_;
};
@@ -2443,23 +2514,16 @@ class FunctionLiteral final : public Expression {
bool is_expression() const { return IsExpression::decode(bitfield_); }
bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
LanguageMode language_mode() const;
- bool uses_super_property() const;
- static bool NeedsHomeObject(Expression* literal) {
- return literal != NULL && literal->IsFunctionLiteral() &&
- literal->AsFunctionLiteral()->uses_super_property();
- }
+ static bool NeedsHomeObject(Expression* expr);
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
- int handler_count() { return handler_count_; }
int parameter_count() { return parameter_count_; }
bool AllowsLazyCompilation();
bool AllowsLazyCompilationWithoutContext();
- void InitializeSharedInfo(Handle<Code> code);
-
Handle<String> debug_name() const {
if (raw_name_ != NULL && !raw_name_->IsEmpty()) {
return raw_name_->string();
@@ -2494,9 +2558,6 @@ class FunctionLiteral final : public Expression {
inferred_name_ = Handle<String>();
}
- // shared_info may be null if it's not cached in full code.
- Handle<SharedFunctionInfo> shared_info() { return shared_info_; }
-
bool pretenure() { return Pretenure::decode(bitfield_); }
void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
@@ -2527,7 +2588,7 @@ class FunctionLiteral final : public Expression {
bitfield_ = ShouldBeUsedOnceHintBit::update(bitfield_, kShouldBeUsedOnce);
}
- FunctionKind kind() { return FunctionKindBits::decode(bitfield_); }
+ FunctionKind kind() const { return FunctionKindBits::decode(bitfield_); }
int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags* flags() { return ast_properties_.flags(); }
@@ -2547,8 +2608,8 @@ class FunctionLiteral final : public Expression {
FunctionLiteral(Zone* zone, const AstRawString* name,
AstValueFactory* ast_value_factory, Scope* scope,
ZoneList<Statement*>* body, int materialized_literal_count,
- int expected_property_count, int handler_count,
- int parameter_count, FunctionType function_type,
+ int expected_property_count, int parameter_count,
+ FunctionType function_type,
ParameterFlag has_duplicate_parameters,
IsFunctionFlag is_function,
EagerCompileHint eager_compile_hint, FunctionKind kind,
@@ -2562,7 +2623,6 @@ class FunctionLiteral final : public Expression {
dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
- handler_count_(handler_count),
parameter_count_(parameter_count),
function_token_position_(RelocInfo::kNoPosition) {
bitfield_ = IsExpression::encode(function_type != DECLARATION) |
@@ -2579,7 +2639,6 @@ class FunctionLiteral final : public Expression {
private:
const AstRawString* raw_name_;
Handle<String> name_;
- Handle<SharedFunctionInfo> shared_info_;
Scope* scope_;
ZoneList<Statement*>* body_;
const AstString* raw_inferred_name_;
@@ -2589,7 +2648,6 @@ class FunctionLiteral final : public Expression {
int materialized_literal_count_;
int expected_property_count_;
- int handler_count_;
int parameter_count_;
int function_token_position_;
@@ -2634,6 +2692,28 @@ class ClassLiteral final : public Expression {
// ClassLiteral can vary, so num_ids() is not a static method.
int num_ids() const { return parent_num_ids() + 4 + properties()->length(); }
+ // Object literals need one feedback slot for each non-trivial value, as well
+ // as some slots for home objects.
+ FeedbackVectorRequirements ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) override;
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) override {
+ slot_ = slot;
+ }
+ Code::Kind FeedbackICSlotKind(int index) override { return Code::STORE_IC; }
+ FeedbackVectorICSlot GetNthSlot(int n) const {
+ return FeedbackVectorICSlot(slot_.ToInt() + n);
+ }
+
+ // If value needs a home object, returns a valid feedback vector ic slot
+ // given by slot_index, and increments slot_index.
+ FeedbackVectorICSlot SlotForHomeObject(Expression* value,
+ int* slot_index) const;
+
+#ifdef DEBUG
+ int slot_count() const { return slot_count_; }
+#endif
+
protected:
ClassLiteral(Zone* zone, const AstRawString* name, Scope* scope,
VariableProxy* class_variable_proxy, Expression* extends,
@@ -2646,7 +2726,13 @@ class ClassLiteral final : public Expression {
extends_(extends),
constructor_(constructor),
properties_(properties),
- end_position_(end_position) {}
+ end_position_(end_position),
+#ifdef DEBUG
+ slot_count_(0),
+#endif
+ slot_(FeedbackVectorICSlot::Invalid()) {
+ }
+
static int parent_num_ids() { return Expression::num_ids(); }
private:
@@ -2659,6 +2745,12 @@ class ClassLiteral final : public Expression {
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
int end_position_;
+#ifdef DEBUG
+ // slot_count_ helps validate that the logic to allocate ic slots and the
+ // logic to use them are in sync.
+ int slot_count_;
+#endif
+ FeedbackVectorICSlot slot_;
};
@@ -2689,45 +2781,52 @@ class ThisFunction final : public Expression {
};
-class SuperReference final : public Expression {
+class SuperPropertyReference final : public Expression {
public:
- DECLARE_NODE_TYPE(SuperReference)
+ DECLARE_NODE_TYPE(SuperPropertyReference)
VariableProxy* this_var() const { return this_var_; }
+ Expression* home_object() const { return home_object_; }
- static int num_ids() { return parent_num_ids() + 1; }
- TypeFeedbackId HomeObjectFeedbackId() { return TypeFeedbackId(local_id(0)); }
-
- // Type feedback information.
- virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override {
- return FeedbackVectorRequirements(0, FLAG_vector_ics ? 1 : 0);
- }
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- homeobject_feedback_slot_ = slot;
+ protected:
+ SuperPropertyReference(Zone* zone, VariableProxy* this_var,
+ Expression* home_object, int pos)
+ : Expression(zone, pos), this_var_(this_var), home_object_(home_object) {
+ DCHECK(this_var->is_this());
+ DCHECK(home_object->IsProperty());
}
- Code::Kind FeedbackICSlotKind(int index) override { return Code::LOAD_IC; }
- FeedbackVectorICSlot HomeObjectFeedbackSlot() {
- DCHECK(!FLAG_vector_ics || !homeobject_feedback_slot_.IsInvalid());
- return homeobject_feedback_slot_;
- }
+ private:
+ VariableProxy* this_var_;
+ Expression* home_object_;
+};
+
+
+class SuperCallReference final : public Expression {
+ public:
+ DECLARE_NODE_TYPE(SuperCallReference)
+
+ VariableProxy* this_var() const { return this_var_; }
+ VariableProxy* new_target_var() const { return new_target_var_; }
+ VariableProxy* this_function_var() const { return this_function_var_; }
protected:
- SuperReference(Zone* zone, VariableProxy* this_var, int pos)
+ SuperCallReference(Zone* zone, VariableProxy* this_var,
+ VariableProxy* new_target_var,
+ VariableProxy* this_function_var, int pos)
: Expression(zone, pos),
this_var_(this_var),
- homeobject_feedback_slot_(FeedbackVectorICSlot::Invalid()) {
+ new_target_var_(new_target_var),
+ this_function_var_(this_function_var) {
DCHECK(this_var->is_this());
+ DCHECK(new_target_var->raw_name()->IsOneByteEqualTo("new.target"));
+ DCHECK(this_function_var->raw_name()->IsOneByteEqualTo(".this_function"));
}
- static int parent_num_ids() { return Expression::num_ids(); }
private:
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
VariableProxy* this_var_;
- FeedbackVectorICSlot homeobject_feedback_slot_;
+ VariableProxy* new_target_var_;
+ VariableProxy* this_function_var_;
};
@@ -2788,6 +2887,9 @@ class RegExpDisjunction final : public RegExpTree {
int max_match() override { return max_match_; }
ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
private:
+ bool SortConsecutiveAtoms(RegExpCompiler* compiler);
+ void RationalizeConsecutiveAtoms(RegExpCompiler* compiler);
+ void FixSingleCharacterDisjunctions(RegExpCompiler* compiler);
ZoneList<RegExpTree*>* alternatives_;
int min_match_;
int max_match_;
@@ -3184,12 +3286,10 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) ExportDeclaration(zone_, proxy, scope, pos);
}
- Block* NewBlock(ZoneList<const AstRawString*>* labels,
- int capacity,
- bool is_initializer_block,
- int pos) {
+ Block* NewBlock(ZoneList<const AstRawString*>* labels, int capacity,
+ bool ignore_completion_value, int pos) {
return new (zone_)
- Block(zone_, labels, capacity, is_initializer_block, pos);
+ Block(zone_, labels, capacity, ignore_completion_value, pos);
}
#define STATEMENT_WITH_LABELS(NodeType) \
@@ -3248,22 +3348,17 @@ class AstNodeFactory final BASE_EMBEDDED {
IfStatement(zone_, condition, then_statement, else_statement, pos);
}
- TryCatchStatement* NewTryCatchStatement(int index,
- Block* try_block,
- Scope* scope,
+ TryCatchStatement* NewTryCatchStatement(Block* try_block, Scope* scope,
Variable* variable,
- Block* catch_block,
- int pos) {
- return new (zone_) TryCatchStatement(zone_, index, try_block, scope,
- variable, catch_block, pos);
+ Block* catch_block, int pos) {
+ return new (zone_)
+ TryCatchStatement(zone_, try_block, scope, variable, catch_block, pos);
}
- TryFinallyStatement* NewTryFinallyStatement(int index,
- Block* try_block,
- Block* finally_block,
- int pos) {
+ TryFinallyStatement* NewTryFinallyStatement(Block* try_block,
+ Block* finally_block, int pos) {
return new (zone_)
- TryFinallyStatement(zone_, index, try_block, finally_block, pos);
+ TryFinallyStatement(zone_, try_block, finally_block, pos);
}
DebuggerStatement* NewDebuggerStatement(int pos) {
@@ -3289,9 +3384,9 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) Literal(zone_, ast_value_factory_->NewSymbol(name), pos);
}
- Literal* NewNumberLiteral(double number, int pos) {
+ Literal* NewNumberLiteral(double number, int pos, bool with_dot = false) {
return new (zone_)
- Literal(zone_, ast_value_factory_->NewNumber(number), pos);
+ Literal(zone_, ast_value_factory_->NewNumber(number, with_dot), pos);
}
Literal* NewSmiLiteral(int number, int pos) {
@@ -3319,9 +3414,11 @@ class AstNodeFactory final BASE_EMBEDDED {
int literal_index,
int boilerplate_properties,
bool has_function,
+ bool is_strong,
int pos) {
return new (zone_) ObjectLiteral(zone_, properties, literal_index,
- boilerplate_properties, has_function, pos);
+ boilerplate_properties, has_function,
+ is_strong, pos);
}
ObjectLiteral::Property* NewObjectLiteralProperty(
@@ -3342,14 +3439,18 @@ class AstNodeFactory final BASE_EMBEDDED {
RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern,
const AstRawString* flags,
int literal_index,
+ bool is_strong,
int pos) {
- return new (zone_) RegExpLiteral(zone_, pattern, flags, literal_index, pos);
+ return new (zone_) RegExpLiteral(zone_, pattern, flags, literal_index,
+ is_strong, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int literal_index,
+ bool is_strong,
int pos) {
- return new (zone_) ArrayLiteral(zone_, values, literal_index, pos);
+ return new (zone_) ArrayLiteral(zone_, values, literal_index, is_strong,
+ pos);
}
VariableProxy* NewVariableProxy(Variable* var,
@@ -3362,6 +3463,7 @@ class AstNodeFactory final BASE_EMBEDDED {
Variable::Kind variable_kind,
int start_position = RelocInfo::kNoPosition,
int end_position = RelocInfo::kNoPosition) {
+ DCHECK_NOT_NULL(name);
return new (zone_)
VariableProxy(zone_, name, variable_kind, start_position, end_position);
}
@@ -3458,7 +3560,7 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral* NewFunctionLiteral(
const AstRawString* name, AstValueFactory* ast_value_factory,
Scope* scope, ZoneList<Statement*>* body, int materialized_literal_count,
- int expected_property_count, int handler_count, int parameter_count,
+ int expected_property_count, int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
@@ -3466,7 +3568,7 @@ class AstNodeFactory final BASE_EMBEDDED {
int position) {
return new (zone_) FunctionLiteral(
zone_, name, ast_value_factory, scope, body, materialized_literal_count,
- expected_property_count, handler_count, parameter_count, function_type,
+ expected_property_count, parameter_count, function_type,
has_duplicate_parameters, is_function, eager_compile_hint, kind,
position);
}
@@ -3491,8 +3593,19 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) ThisFunction(zone_, pos);
}
- SuperReference* NewSuperReference(VariableProxy* this_var, int pos) {
- return new (zone_) SuperReference(zone_, this_var, pos);
+ SuperPropertyReference* NewSuperPropertyReference(VariableProxy* this_var,
+ Expression* home_object,
+ int pos) {
+ return new (zone_)
+ SuperPropertyReference(zone_, this_var, home_object, pos);
+ }
+
+ SuperCallReference* NewSuperCallReference(VariableProxy* this_var,
+ VariableProxy* new_target_var,
+ VariableProxy* this_function_var,
+ int pos) {
+ return new (zone_) SuperCallReference(zone_, this_var, new_target_var,
+ this_function_var, pos);
}
private:
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index dcc5eb4e6c..d7df9b4717 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -71,5 +71,5 @@ void BackgroundParsingTask::Run() {
delete script_data;
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/bailout-reason.cc b/deps/v8/src/bailout-reason.cc
index 93d43dde26..cd01851380 100644
--- a/deps/v8/src/bailout-reason.cc
+++ b/deps/v8/src/bailout-reason.cc
@@ -16,5 +16,5 @@ const char* GetBailoutReason(BailoutReason reason) {
#undef ERROR_MESSAGES_TEXTS
return error_messages_[reason];
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index cd73674383..16816348c6 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -179,6 +179,7 @@ namespace internal {
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
+ V(kOperandIsNotADate, "Operand is not a date") \
V(kOperandIsNotAName, "Operand is not a name") \
V(kOperandIsNotANumber, "Operand is not a number") \
V(kOperandIsNotASmi, "Operand is not a smi") \
@@ -210,6 +211,7 @@ namespace internal {
V(kScriptContext, "Allocation of script context") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
+ V(kSpread, "Spread in array literal") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSuperReference, "Super reference") \
diff --git a/deps/v8/src/base/atomicops_internals_arm_gcc.h b/deps/v8/src/base/atomicops_internals_arm_gcc.h
index 069b1ffa88..e399657e13 100644
--- a/deps/v8/src/base/atomicops_internals_arm_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_arm_gcc.h
@@ -59,11 +59,11 @@ inline void MemoryBarrier() {
// variant of the target architecture is being used. This tests against
// any known ARMv6 or ARMv7 variant, where it is possible to directly
// use ldrex/strex instructions to implement fast atomic operations.
-#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
- defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
- defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_6T2__)
+ defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 5c7cd74c61..abcfd9a9ed 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -44,6 +44,17 @@ inline unsigned CountPopulation64(uint64_t value) {
}
+// Overloaded versions of CountPopulation32/64.
+inline unsigned CountPopulation(uint32_t value) {
+ return CountPopulation32(value);
+}
+
+
+inline unsigned CountPopulation(uint64_t value) {
+ return CountPopulation64(value);
+}
+
+
// CountLeadingZeros32(value) returns the number of zero bits following the most
// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
inline unsigned CountLeadingZeros32(uint32_t value) {
@@ -225,6 +236,19 @@ int32_t SignedDiv32(int32_t lhs, int32_t rhs);
int32_t SignedMod32(int32_t lhs, int32_t rhs);
+// UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs|
+// and |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the unsigned summation resulted in an overflow.
+inline bool UnsignedAddOverflow32(uint32_t lhs, uint32_t rhs, uint32_t* val) {
+#if V8_HAS_BUILTIN_SADD_OVERFLOW
+ return __builtin_uadd_overflow(lhs, rhs, val);
+#else
+ *val = lhs + rhs;
+ return *val < (lhs | rhs);
+#endif
+}
+
+
// UnsignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
// truncated to uint32. If |rhs| is zero, then zero is returned.
inline uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs) {
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index e98b4f569f..6dc96f4f3b 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -15,12 +15,9 @@
#include "src/base/logging.h"
-// The expression OFFSET_OF(type, field) computes the byte-offset
-// of the specified field relative to the containing type. This
-// corresponds to 'offsetof' (in stddef.h), except that it doesn't
-// use 0 or NULL, which causes a problem with the compiler warnings
-// we have enabled (which is also why 'offsetof' doesn't seem to work).
-// Here we simply use the aligned, non-zero value 16.
+// TODO(all) Replace all uses of this macro with C++'s offsetof. To do that, we
+// have to make sure that only standard-layout types and simple field
+// designators are used.
#define OFFSET_OF(type, field) \
(reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(16)->field)) - 16)
diff --git a/deps/v8/src/base/safe_math_impl.h b/deps/v8/src/base/safe_math_impl.h
index 055e2a0275..eb2a151191 100644
--- a/deps/v8/src/base/safe_math_impl.h
+++ b/deps/v8/src/base/safe_math_impl.h
@@ -208,8 +208,8 @@ typename enable_if<std::numeric_limits<T>::is_integer &&
(sizeof(T) * 2 > sizeof(uintmax_t)),
T>::type
CheckedMul(T x, T y, RangeConstraint* validity) {
- // if either side is zero then the result will be zero.
- if (!(x || y)) {
+ // If either side is zero then the result will be zero.
+ if (!x || !y) {
return RANGE_VALID;
} else if (x > 0) {
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index 617a88ea81..a2dc6ab27f 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -44,7 +44,7 @@ int SysInfo::NumberOfProcessors() {
}
return static_cast<int>(result);
#elif V8_OS_WIN
- SYSTEM_INFO system_info = {0};
+ SYSTEM_INFO system_info = {};
::GetNativeSystemInfo(&system_info);
return static_cast<int>(system_info.dwNumberOfProcessors);
#endif
diff --git a/deps/v8/src/basic-block-profiler.h b/deps/v8/src/basic-block-profiler.h
index 99fa3b172d..0f7b15533d 100644
--- a/deps/v8/src/basic-block-profiler.h
+++ b/deps/v8/src/basic-block-profiler.h
@@ -14,9 +14,6 @@
namespace v8 {
namespace internal {
-class Schedule;
-class Graph;
-
class BasicBlockProfiler {
public:
class Data {
diff --git a/deps/v8/src/bignum-dtoa.cc b/deps/v8/src/bignum-dtoa.cc
index f9a0c95ef5..ace9e37193 100644
--- a/deps/v8/src/bignum-dtoa.cc
+++ b/deps/v8/src/bignum-dtoa.cc
@@ -632,4 +632,5 @@ static void FixupMultiply10(int estimated_power, bool is_even,
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index 254cb012b6..e70987a82d 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -750,4 +750,5 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index c56c429937..43fc0eb835 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -148,9 +148,8 @@ void Bootstrapper::TearDown() {
class Genesis BASE_EMBEDDED {
public:
- Genesis(Isolate* isolate,
- MaybeHandle<JSGlobalProxy> maybe_global_proxy,
- v8::Handle<v8::ObjectTemplate> global_proxy_template,
+ Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+ v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions);
~Genesis() { }
@@ -185,7 +184,7 @@ class Genesis BASE_EMBEDDED {
// we have to used the deserialized ones that are linked together with the
// rest of the context snapshot.
Handle<GlobalObject> CreateNewGlobals(
- v8::Handle<v8::ObjectTemplate> global_proxy_template,
+ v8::Local<v8::ObjectTemplate> global_proxy_template,
Handle<JSGlobalProxy> global_proxy);
// Hooks the given global proxy into the context. If the context was created
// by deserialization then this will unhook the global proxy that was
@@ -197,6 +196,11 @@ class Genesis BASE_EMBEDDED {
// other objects in the snapshot.
void HookUpGlobalObject(Handle<GlobalObject> global_object,
Handle<FixedArray> outdated_contexts);
+ // The native context has a ScriptContextTable that store declarative bindings
+ // made in script scopes. Add a "this" binding to that table pointing to the
+ // global proxy.
+ void InstallGlobalThisBinding();
+ void HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts);
// New context initialization. Used for creating a context from scratch.
void InitializeGlobal(Handle<GlobalObject> global_object,
Handle<JSFunction> empty_function);
@@ -217,7 +221,7 @@ class Genesis BASE_EMBEDDED {
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
#undef DECLARE_FEATURE_INITIALIZATION
- Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
+ Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind);
bool InstallNatives();
@@ -230,6 +234,7 @@ class Genesis BASE_EMBEDDED {
bool InstallExperimentalNatives();
bool InstallExtraNatives();
void InstallBuiltinFunctionIds();
+ void InstallExperimentalBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
void InitializeNormalizedMapCaches();
@@ -268,7 +273,7 @@ class Genesis BASE_EMBEDDED {
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
bool ConfigureGlobalObjects(
- v8::Handle<v8::ObjectTemplate> global_proxy_template);
+ v8::Local<v8::ObjectTemplate> global_proxy_template);
// Migrates all properties from the 'from' object to the 'to'
// object and overrides the prototype in 'to' with the one from
@@ -310,16 +315,13 @@ class Genesis BASE_EMBEDDED {
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
static bool CompileExtraBuiltin(Isolate* isolate, int index);
- static bool CompileNative(Isolate* isolate,
- Vector<const char> name,
- Handle<String> source);
- static bool CompileScriptCached(Isolate* isolate,
- Vector<const char> name,
- Handle<String> source,
- SourceCodeCache* cache,
- v8::Extension* extension,
- Handle<Context> top_context,
- bool use_runtime_context);
+ static bool CompileNative(Isolate* isolate, Vector<const char> name,
+ Handle<String> source, int argc,
+ Handle<Object> argv[]);
+
+ static bool CallUtilsFunction(Isolate* isolate, const char* name);
+
+ static bool CompileExtension(Isolate* isolate, v8::Extension* extension);
Isolate* isolate_;
Handle<Context> result_;
@@ -347,7 +349,7 @@ void Bootstrapper::Iterate(ObjectVisitor* v) {
Handle<Context> Bootstrapper::CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
- v8::Handle<v8::ObjectTemplate> global_proxy_template,
+ v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions) {
HandleScope scope(isolate_);
Genesis genesis(
@@ -534,6 +536,12 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
native_context()->set_initial_array_prototype(*object_function_prototype);
Accessors::FunctionSetPrototype(object_fun, object_function_prototype)
.Assert();
+
+ // Allocate initial strong object map.
+ Handle<Map> strong_object_map =
+ Map::Copy(Handle<Map>(object_fun->initial_map()), "EmptyStrongObject");
+ strong_object_map->set_is_strong();
+ native_context()->set_js_object_strong_map(*strong_object_map);
}
// Allocate the empty function as the prototype for function - ES6 19.2.3
@@ -554,10 +562,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Handle<String> source = factory->NewStringFromStaticChars("() {}");
Handle<Script> script = factory->NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- empty_function->shared()->set_script(*script);
empty_function->shared()->set_start_position(0);
empty_function->shared()->set_end_position(source->length());
empty_function->shared()->DontAdaptArguments();
+ SharedFunctionInfo::SetScript(handle(empty_function->shared()), script);
// Set prototypes for the function maps.
Handle<Map> sloppy_function_map(native_context()->sloppy_function_map(),
@@ -590,7 +598,7 @@ void Genesis::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
// Add length.
if (function_mode == BOUND_FUNCTION) {
Handle<String> length_string = isolate()->factory()->length_string();
- DataDescriptor d(length_string, 0, ro_attribs, Representation::Tagged());
+ DataDescriptor d(length_string, 0, roc_attribs, Representation::Tagged());
map->AppendDescriptor(&d);
} else {
DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
@@ -709,7 +717,7 @@ Handle<Map> Genesis::CreateStrongFunctionMap(
map->set_function_with_prototype(is_constructor);
Map::SetPrototype(map, empty_function);
map->set_is_extensible(is_constructor);
- // TODO(rossberg): mark strong
+ map->set_is_strong();
return map;
}
@@ -787,7 +795,8 @@ static void AddToWeakNativeContextList(Context* context) {
}
}
#endif
- context->set(Context::NEXT_CONTEXT_LINK, heap->native_contexts_list());
+ context->set(Context::NEXT_CONTEXT_LINK, heap->native_contexts_list(),
+ UPDATE_WEAK_WRITE_BARRIER);
heap->set_native_contexts_list(context);
}
@@ -809,8 +818,43 @@ void Genesis::CreateRoots() {
}
+void Genesis::InstallGlobalThisBinding() {
+ Handle<ScriptContextTable> script_contexts(
+ native_context()->script_context_table());
+ Handle<ScopeInfo> scope_info = ScopeInfo::CreateGlobalThisBinding(isolate());
+ Handle<JSFunction> closure(native_context()->closure());
+ Handle<Context> context = factory()->NewScriptContext(closure, scope_info);
+
+ // Go ahead and hook it up while we're at it.
+ int slot = scope_info->ReceiverContextSlotIndex();
+ DCHECK_EQ(slot, Context::MIN_CONTEXT_SLOTS);
+ context->set(slot, native_context()->global_proxy());
+
+ Handle<ScriptContextTable> new_script_contexts =
+ ScriptContextTable::Extend(script_contexts, context);
+ native_context()->set_script_context_table(*new_script_contexts);
+}
+
+
+void Genesis::HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts) {
+ // One of these contexts should be the one that declares the global "this"
+ // binding.
+ for (int i = 0; i < outdated_contexts->length(); ++i) {
+ Context* context = Context::cast(outdated_contexts->get(i));
+ if (context->IsScriptContext()) {
+ ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+ int slot = scope_info->ReceiverContextSlotIndex();
+ if (slot >= 0) {
+ DCHECK_EQ(slot, Context::MIN_CONTEXT_SLOTS);
+ context->set(slot, native_context()->global_proxy());
+ }
+ }
+ }
+}
+
+
Handle<GlobalObject> Genesis::CreateNewGlobals(
- v8::Handle<v8::ObjectTemplate> global_proxy_template,
+ v8::Local<v8::ObjectTemplate> global_proxy_template,
Handle<JSGlobalProxy> global_proxy) {
// The argument global_proxy_template aka data is an ObjectTemplateInfo.
// It has a constructor pointer that points at global_constructor which is a
@@ -935,8 +979,9 @@ void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object,
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Runtime::DefineObjectProperty(builtins_global, factory()->global_string(),
- global_object, attributes).Assert();
+ JSObject::SetOwnPropertyIgnoreAttributes(builtins_global,
+ factory()->global_string(),
+ global_object, attributes).Assert();
// Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(*global_object)->set_builtins(*builtins_global);
TransferNamedProperties(global_object_from_snapshot, global_object);
@@ -967,6 +1012,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Handle<ScriptContextTable> script_context_table =
factory->NewScriptContextTable();
native_context()->set_script_context_table(*script_context_table);
+ InstallGlobalThisBinding();
Handle<String> object_name = factory->Object_string();
JSObject::AddProperty(
@@ -1020,6 +1066,11 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
ArrayConstructorStub array_constructor_stub(isolate);
Handle<Code> code = array_constructor_stub.GetCode();
array_function->shared()->set_construct_stub(*code);
+
+ Handle<Map> initial_strong_map =
+ Map::Copy(initial_map, "SetInstancePrototype");
+ initial_strong_map->set_is_strong();
+ CacheInitialJSArrayMaps(native_context(), initial_strong_map);
}
{ // --- N u m b e r ---
@@ -1186,7 +1237,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
isolate->initial_object_prototype(),
Builtins::kIllegal);
native_context()->set_array_buffer_fun(*array_buffer_fun);
- native_context()->set_array_buffer_map(array_buffer_fun->initial_map());
}
{ // -- T y p e d A r r a y s
@@ -1213,13 +1263,19 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
native_context()->set_data_view_fun(*data_view_fun);
}
- // -- M a p
- InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
+ { // -- M a p
+ Handle<JSFunction> js_map_fun = InstallFunction(
+ global, "Map", JS_MAP_TYPE, JSMap::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ native_context()->set_js_map_fun(*js_map_fun);
+ }
- // -- S e t
- InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
+ { // -- S e t
+ Handle<JSFunction> js_set_fun = InstallFunction(
+ global, "Set", JS_SET_TYPE, JSSet::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ native_context()->set_js_set_fun(*js_set_fun);
+ }
{ // Set up the iterator result object
STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
@@ -1300,12 +1356,17 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
DCHECK(IsFastObjectElementsKind(map->elements_kind()));
}
- { // --- aliased arguments map
- Handle<Map> map =
- Map::Copy(isolate->sloppy_arguments_map(), "AliasedArguments");
- map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
+ { // --- fast and slow aliased arguments map
+ Handle<Map> map = isolate->sloppy_arguments_map();
+ map = Map::Copy(map, "FastAliasedArguments");
+ map->set_elements_kind(FAST_SLOPPY_ARGUMENTS_ELEMENTS);
DCHECK_EQ(2, map->pre_allocated_property_fields());
- native_context()->set_aliased_arguments_map(*map);
+ native_context()->set_fast_aliased_arguments_map(*map);
+
+ map = Map::Copy(map, "SlowAliasedArguments");
+ map->set_elements_kind(SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
+ DCHECK_EQ(2, map->pre_allocated_property_fields());
+ native_context()->set_slow_aliased_arguments_map(*map);
}
{ // --- strict mode arguments map
@@ -1445,109 +1506,130 @@ bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> source_code =
isolate->bootstrapper()->SourceLookup<Natives>(index);
- return CompileNative(isolate, name, source_code);
+ Handle<Object> global = isolate->global_object();
+ Handle<Object> utils = isolate->natives_utils_object();
+ Handle<Object> args[] = {global, utils};
+ return CompileNative(isolate, name, source_code, arraysize(args), args);
}
bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
+ HandleScope scope(isolate);
Vector<const char> name = ExperimentalNatives::GetScriptName(index);
Handle<String> source_code =
isolate->bootstrapper()->SourceLookup<ExperimentalNatives>(index);
- return CompileNative(isolate, name, source_code);
+ Handle<Object> global = isolate->global_object();
+ Handle<Object> utils = isolate->natives_utils_object();
+ Handle<Object> args[] = {global, utils};
+ return CompileNative(isolate, name, source_code, arraysize(args), args);
}
bool Genesis::CompileExtraBuiltin(Isolate* isolate, int index) {
+ HandleScope scope(isolate);
Vector<const char> name = ExtraNatives::GetScriptName(index);
Handle<String> source_code =
isolate->bootstrapper()->SourceLookup<ExtraNatives>(index);
- return CompileNative(isolate, name, source_code);
+ Handle<Object> global = isolate->global_object();
+ Handle<Object> exports = isolate->extras_exports_object();
+ Handle<Object> args[] = {global, exports};
+ return CompileNative(isolate, name, source_code, arraysize(args), args);
}
-bool Genesis::CompileNative(Isolate* isolate,
- Vector<const char> name,
- Handle<String> source) {
- HandleScope scope(isolate);
+bool Genesis::CompileNative(Isolate* isolate, Vector<const char> name,
+ Handle<String> source, int argc,
+ Handle<Object> argv[]) {
SuppressDebug compiling_natives(isolate->debug());
// During genesis, the boilerplate for stack overflow won't work until the
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return false;
-
- bool result = CompileScriptCached(isolate,
- name,
- source,
- NULL,
- NULL,
- Handle<Context>(isolate->context()),
- true);
- DCHECK(isolate->has_pending_exception() != result);
- if (!result) isolate->clear_pending_exception();
- return result;
+ if (check.JsHasOverflowed(1 * KB)) {
+ isolate->StackOverflow();
+ return false;
+ }
+
+ Handle<Context> context(isolate->context());
+
+ Handle<String> script_name =
+ isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
+ Handle<SharedFunctionInfo> function_info = Compiler::CompileScript(
+ source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
+ context, NULL, NULL, ScriptCompiler::kNoCompileOptions, NATIVES_CODE,
+ false);
+ if (function_info.is_null()) return false;
+
+ DCHECK(context->IsNativeContext());
+
+ Handle<Context> runtime_context(context->runtime_context());
+ Handle<JSBuiltinsObject> receiver(context->builtins());
+ Handle<JSFunction> fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(function_info,
+ runtime_context);
+
+ // For non-extension scripts, run script to get the function wrapper.
+ Handle<Object> wrapper;
+ if (!Execution::Call(isolate, fun, receiver, 0, NULL).ToHandle(&wrapper)) {
+ return false;
+ }
+ // Then run the function wrapper.
+ return !Execution::Call(isolate, Handle<JSFunction>::cast(wrapper), receiver,
+ argc, argv).is_null();
+}
+
+
+bool Genesis::CallUtilsFunction(Isolate* isolate, const char* name) {
+ Handle<JSObject> utils =
+ Handle<JSObject>::cast(isolate->natives_utils_object());
+ Handle<String> name_string =
+ isolate->factory()->NewStringFromAsciiChecked(name);
+ Handle<Object> fun = JSObject::GetDataProperty(utils, name_string);
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ Handle<Object> args[] = {utils};
+ return !Execution::Call(isolate, fun, receiver, 1, args).is_null();
}
-bool Genesis::CompileScriptCached(Isolate* isolate,
- Vector<const char> name,
- Handle<String> source,
- SourceCodeCache* cache,
- v8::Extension* extension,
- Handle<Context> top_context,
- bool use_runtime_context) {
+bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
Handle<SharedFunctionInfo> function_info;
+ Handle<String> source =
+ isolate->factory()
+ ->NewExternalStringFromOneByte(extension->source())
+ .ToHandleChecked();
+ DCHECK(source->IsOneByteRepresentation());
+
// If we can't find the function in the cache, we compile a new
// function and insert it into the cache.
- if (cache == NULL || !cache->Lookup(name, &function_info)) {
- DCHECK(source->IsOneByteRepresentation());
+ Vector<const char> name = CStrVector(extension->name());
+ SourceCodeCache* cache = isolate->bootstrapper()->extensions_cache();
+ Handle<Context> context(isolate->context());
+ DCHECK(context->IsNativeContext());
+
+ if (!cache->Lookup(name, &function_info)) {
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
function_info = Compiler::CompileScript(
- source, script_name, 0, 0, false, false, Handle<Object>(), top_context,
- extension, NULL, ScriptCompiler::kNoCompileOptions,
- use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE, false);
+ source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
+ context, extension, NULL, ScriptCompiler::kNoCompileOptions,
+ NOT_NATIVES_CODE, false);
if (function_info.is_null()) return false;
- if (cache != NULL) cache->Add(name, function_info);
+ cache->Add(name, function_info);
}
// Set up the function context. Conceptually, we should clone the
// function before overwriting the context but since we're in a
// single-threaded environment it is not strictly necessary.
- DCHECK(top_context->IsNativeContext());
- Handle<Context> context =
- Handle<Context>(use_runtime_context
- ? Handle<Context>(top_context->runtime_context())
- : top_context);
Handle<JSFunction> fun =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
// Call function using either the runtime object or the global
// object as the receiver. Provide no parameters.
- Handle<Object> receiver =
- Handle<Object>(use_runtime_context
- ? top_context->builtins()
- : top_context->global_object(),
- isolate);
- MaybeHandle<Object> result;
- if (extension == NULL) {
- // For non-extension scripts, run script to get the function wrapper.
- Handle<Object> wrapper;
- if (!Execution::Call(isolate, fun, receiver, 0, NULL).ToHandle(&wrapper)) {
- return false;
- }
- // Then run the function wrapper.
- Handle<Object> global_obj(top_context->global_object(), isolate);
- Handle<Object> args[] = {global_obj};
- result = Execution::Call(isolate, Handle<JSFunction>::cast(wrapper),
- receiver, arraysize(args), args);
- } else {
- result = Execution::Call(isolate, fun, receiver, 0, NULL);
- }
- return !result.is_null();
+ Handle<Object> receiver = isolate->global_object();
+ return !Execution::Call(isolate, fun, receiver, 0, NULL).is_null();
}
@@ -1611,6 +1693,7 @@ void Genesis::InstallNativeFunctions() {
to_complete_property_descriptor);
INSTALL_NATIVE(Symbol, "$promiseStatus", promise_status);
+ INSTALL_NATIVE(Symbol, "$promiseValue", promise_value);
INSTALL_NATIVE(JSFunction, "$promiseCreate", promise_create);
INSTALL_NATIVE(JSFunction, "$promiseResolve", promise_resolve);
INSTALL_NATIVE(JSFunction, "$promiseReject", promise_reject);
@@ -1632,6 +1715,15 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "$observeNativeObjectNotifierPerformChange",
native_object_notifier_perform_change);
INSTALL_NATIVE(JSFunction, "$arrayValues", array_values_iterator);
+ INSTALL_NATIVE(JSFunction, "$mapGet", map_get);
+ INSTALL_NATIVE(JSFunction, "$mapSet", map_set);
+ INSTALL_NATIVE(JSFunction, "$mapHas", map_has);
+ INSTALL_NATIVE(JSFunction, "$mapDelete", map_delete);
+ INSTALL_NATIVE(JSFunction, "$setAdd", set_add);
+ INSTALL_NATIVE(JSFunction, "$setHas", set_has);
+ INSTALL_NATIVE(JSFunction, "$setDelete", set_delete);
+ INSTALL_NATIVE(JSFunction, "$mapFromArray", map_from_array);
+ INSTALL_NATIVE(JSFunction, "$setFromArray", set_from_array);
}
@@ -1710,10 +1802,7 @@ void Genesis::InitializeBuiltinTypedArrays() {
void Genesis::InstallNativeFunctions_##id() {}
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_modules)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrays)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_array_includes)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_classes)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_object_literals)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_regexps)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrow_functions)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_tostring)
@@ -1726,6 +1815,11 @@ EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_reflect)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_spreadcalls)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_destructuring)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_object)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_spread_arrays)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sharedarraybuffer)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_atomics)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_new_target)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_concat_spreadable)
void Genesis::InstallNativeFunctions_harmony_proxies() {
@@ -1737,16 +1831,14 @@ void Genesis::InstallNativeFunctions_harmony_proxies() {
}
}
+
#undef INSTALL_NATIVE
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrays)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_array_includes)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_classes)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_literals)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
@@ -1756,6 +1848,10 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_rest_parameters)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spreadcalls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spread_arrays)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_atomics)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_new_target)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_concat_spreadable)
void Genesis::InitializeGlobal_harmony_regexps() {
Handle<JSObject> builtins(native_context()->builtins());
@@ -1818,10 +1914,23 @@ void Genesis::InitializeGlobal_harmony_tostring() {
}
-Handle<JSFunction> Genesis::InstallInternalArray(
- Handle<JSBuiltinsObject> builtins,
- const char* name,
- ElementsKind elements_kind) {
+void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
+ if (!FLAG_harmony_sharedarraybuffer) return;
+
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(native_context()->global_object()));
+
+ Handle<JSFunction> shared_array_buffer_fun = InstallFunction(
+ global, "SharedArrayBuffer", JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSizeWithInternalFields,
+ isolate()->initial_object_prototype(), Builtins::kIllegal);
+ native_context()->set_shared_array_buffer_fun(*shared_array_buffer_fun);
+}
+
+
+Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
+ const char* name,
+ ElementsKind elements_kind) {
// --- I n t e r n a l A r r a y ---
// An array constructor on the builtins object that works like
// the public Array constructor, except that its prototype
@@ -1830,9 +1939,9 @@ Handle<JSFunction> Genesis::InstallInternalArray(
// must not be leaked to user code.
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSFunction> array_function = InstallFunction(
- builtins, name, JS_ARRAY_TYPE, JSArray::kSize,
- prototype, Builtins::kInternalArrayCode);
+ Handle<JSFunction> array_function =
+ InstallFunction(target, name, JS_ARRAY_TYPE, JSArray::kSize, prototype,
+ Builtins::kInternalArrayCode);
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
Handle<Code> code = internal_array_constructor_stub.GetCode();
@@ -1910,6 +2019,23 @@ bool Genesis::InstallNatives() {
native_context()->set_runtime_context(*context);
+ // Set up the utils object as shared container between native scripts.
+ Handle<JSObject> utils = factory()->NewJSObject(isolate()->object_function());
+ JSObject::NormalizeProperties(utils, CLEAR_INOBJECT_PROPERTIES, 16,
+ "utils container for native scripts");
+ native_context()->set_natives_utils_object(*utils);
+
+ Handle<JSObject> extras_exports =
+ factory()->NewJSObject(isolate()->object_function());
+ JSObject::NormalizeProperties(extras_exports, CLEAR_INOBJECT_PROPERTIES, 2,
+ "container to export to extra natives");
+ native_context()->set_extras_exports_object(*extras_exports);
+
+ if (FLAG_expose_natives_as != NULL) {
+ Handle<String> utils_key = factory()->NewStringFromAsciiChecked("utils");
+ JSObject::AddProperty(builtins, utils_key, utils, NONE);
+ }
+
{ // -- S c r i p t
// Builtin functions for Script.
Handle<JSFunction> script_fun = InstallFunction(
@@ -2082,13 +2208,13 @@ bool Genesis::InstallNatives() {
// through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
// transition easy to trap. Moreover, they rarely are smi-only.
{
+ HandleScope scope(isolate());
+ Handle<JSObject> utils =
+ Handle<JSObject>::cast(isolate()->natives_utils_object());
Handle<JSFunction> array_function =
- InstallInternalArray(builtins, "InternalArray", FAST_HOLEY_ELEMENTS);
+ InstallInternalArray(utils, "InternalArray", FAST_HOLEY_ELEMENTS);
native_context()->set_internal_array_function(*array_function);
- }
-
- {
- InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS);
+ InstallInternalArray(utils, "InternalPackedArray", FAST_ELEMENTS);
}
{ // -- S e t I t e r a t o r
@@ -2110,10 +2236,17 @@ bool Genesis::InstallNatives() {
{
// Create generator meta-objects and install them on the builtins object.
Handle<JSObject> builtins(native_context()->builtins());
+ Handle<JSObject> iterator_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSObject> generator_object_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSObject> generator_function_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
+ SetObjectPrototype(generator_object_prototype, iterator_prototype);
+ JSObject::AddProperty(
+ builtins, factory()->InternalizeUtf8String("$iteratorPrototype"),
+ iterator_prototype,
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
JSObject::AddProperty(
builtins,
factory()->InternalizeUtf8String("GeneratorFunctionPrototype"),
@@ -2132,9 +2265,11 @@ bool Genesis::InstallNatives() {
Builtins::kIllegal, kUseStrictFunctionMap);
// Create maps for generator functions and their prototypes. Store those
- // maps in the native context. Generator functions do not have writable
- // prototypes, nor do they have "caller" or "arguments" accessors.
- Handle<Map> strict_function_map(native_context()->strict_function_map());
+ // maps in the native context. The "prototype" property descriptor is
+ // writable, non-enumerable, and non-configurable (as per ES6 draft
+ // 04-14-15, section 25.2.4.3).
+ Handle<Map> strict_function_map(strict_function_map_writable_prototype_);
+ // Generator functions do not have "caller" or "arguments" accessors.
Handle<Map> sloppy_generator_function_map =
Map::Copy(strict_function_map, "SloppyGeneratorFunction");
Map::SetPrototype(sloppy_generator_function_map,
@@ -2181,7 +2316,6 @@ bool Genesis::InstallNatives() {
#undef INSTALL_PUBLIC_SYMBOL
}
- // Install natives.
int i = Natives::GetDebuggerCount();
if (!CompileBuiltin(isolate(), i)) return false;
if (!InstallJSBuiltins(builtins)) return false;
@@ -2190,6 +2324,8 @@ bool Genesis::InstallNatives() {
if (!CompileBuiltin(isolate(), i)) return false;
}
+ if (!CallUtilsFunction(isolate(), "PostNatives")) return false;
+
InstallNativeFunctions();
auto function_cache =
@@ -2308,7 +2444,14 @@ bool Genesis::InstallNatives() {
{
AccessorConstantDescriptor d(factory()->iterator_symbol(),
arguments_iterator, attribs);
- Handle<Map> map(native_context()->aliased_arguments_map());
+ Handle<Map> map(native_context()->fast_aliased_arguments_map());
+ Map::EnsureDescriptorSlack(map, 1);
+ map->AppendDescriptor(&d);
+ }
+ {
+ AccessorConstantDescriptor d(factory()->iterator_symbol(),
+ arguments_iterator, attribs);
+ Handle<Map> map(native_context()->slow_aliased_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
}
@@ -2332,14 +2475,10 @@ bool Genesis::InstallNatives() {
bool Genesis::InstallExperimentalNatives() {
- static const char* harmony_arrays_natives[] = {
- "native harmony-array.js", "native harmony-typedarray.js", nullptr};
static const char* harmony_array_includes_natives[] = {
"native harmony-array-includes.js", nullptr};
static const char* harmony_proxies_natives[] = {"native proxy.js", nullptr};
- static const char* harmony_classes_natives[] = {nullptr};
static const char* harmony_modules_natives[] = {nullptr};
- static const char* harmony_object_literals_natives[] = {nullptr};
static const char* harmony_regexps_natives[] = {"native harmony-regexp.js",
nullptr};
static const char* harmony_arrow_functions_natives[] = {nullptr};
@@ -2357,6 +2496,14 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_destructuring_natives[] = {nullptr};
static const char* harmony_object_natives[] = {"native harmony-object.js",
NULL};
+ static const char* harmony_spread_arrays_natives[] = {nullptr};
+ static const char* harmony_sharedarraybuffer_natives[] = {
+ "native harmony-sharedarraybuffer.js", NULL};
+ static const char* harmony_atomics_natives[] = {"native harmony-atomics.js",
+ nullptr};
+ static const char* harmony_new_target_natives[] = {nullptr};
+ static const char* harmony_concat_spreadable_natives[] = {
+ "native harmony-concat-spreadable.js", nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2376,7 +2523,10 @@ bool Genesis::InstallExperimentalNatives() {
#undef INSTALL_EXPERIMENTAL_NATIVES
}
+ if (!CallUtilsFunction(isolate(), "PostExperimentals")) return false;
+
InstallExperimentalNativeFunctions();
+ InstallExperimentalBuiltinFunctionIds();
return true;
}
@@ -2402,6 +2552,11 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder,
}
+#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
+ { #holder_expr, #fun_name, k##name } \
+ ,
+
+
void Genesis::InstallBuiltinFunctionIds() {
HandleScope scope(isolate());
struct BuiltinFunctionIds {
@@ -2410,12 +2565,8 @@ void Genesis::InstallBuiltinFunctionIds() {
BuiltinFunctionId id;
};
-#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
- { #holder_expr, #fun_name, k##name } \
- ,
const BuiltinFunctionIds builtins[] = {
FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)};
-#undef INSTALL_BUILTIN_ID
for (const BuiltinFunctionIds& builtin : builtins) {
Handle<JSObject> holder =
@@ -2425,6 +2576,29 @@ void Genesis::InstallBuiltinFunctionIds() {
}
+void Genesis::InstallExperimentalBuiltinFunctionIds() {
+ if (FLAG_harmony_atomics) {
+ struct BuiltinFunctionIds {
+ const char* holder_expr;
+ const char* fun_name;
+ BuiltinFunctionId id;
+ };
+
+ const BuiltinFunctionIds atomic_builtins[] = {
+ ATOMIC_FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)};
+
+ for (const BuiltinFunctionIds& builtin : atomic_builtins) {
+ Handle<JSObject> holder =
+ ResolveBuiltinIdHolder(native_context(), builtin.holder_expr);
+ InstallBuiltinFunctionId(holder, builtin.fun_name, builtin.id);
+ }
+ }
+}
+
+
+#undef INSTALL_BUILTIN_ID
+
+
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
@@ -2502,14 +2676,17 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
+ // By now the utils object is useless and can be removed.
+ native_context->set_natives_utils_object(*factory->undefined_value());
+
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
- Handle<String> natives =
+ Handle<String> natives_key =
factory->InternalizeUtf8String(FLAG_expose_natives_as);
uint32_t dummy_index;
- if (natives->AsArrayIndex(&dummy_index)) return true;
- JSObject::AddProperty(global, natives, handle(global->builtins()),
- DONT_ENUM);
+ if (natives_key->AsArrayIndex(&dummy_index)) return true;
+ Handle<JSBuiltinsObject> natives(global->builtins());
+ JSObject::AddProperty(global, natives_key, natives, DONT_ENUM);
}
// Expose the stack trace symbol to native JS.
@@ -2653,17 +2830,7 @@ bool Genesis::InstallExtension(Isolate* isolate,
}
}
// We do not expect this to throw an exception. Change this if it does.
- Handle<String> source_code =
- isolate->factory()
- ->NewExternalStringFromOneByte(extension->source())
- .ToHandleChecked();
- bool result = CompileScriptCached(isolate,
- CStrVector(extension->name()),
- source_code,
- isolate->bootstrapper()->extensions_cache(),
- extension,
- Handle<Context>(isolate->context()),
- false);
+ bool result = CompileExtension(isolate, extension);
DCHECK(isolate->has_pending_exception() != result);
if (!result) {
// We print out the name of the extension that fail to install.
@@ -2694,7 +2861,7 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
bool Genesis::ConfigureGlobalObjects(
- v8::Handle<v8::ObjectTemplate> global_proxy_template) {
+ v8::Local<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy(
JSObject::cast(native_context()->global_proxy()));
Handle<JSObject> global_object(
@@ -2720,6 +2887,12 @@ bool Genesis::ConfigureGlobalObjects(
native_context()->set_initial_array_prototype(
JSArray::cast(native_context()->array_function()->prototype()));
+ native_context()->set_array_buffer_map(
+ native_context()->array_buffer_fun()->initial_map());
+ native_context()->set_js_map_map(
+ native_context()->js_map_fun()->initial_map());
+ native_context()->set_js_set_map(
+ native_context()->js_set_fun()->initial_map());
return true;
}
@@ -2793,6 +2966,29 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
}
}
+ } else if (from->IsGlobalObject()) {
+ Handle<GlobalDictionary> properties =
+ Handle<GlobalDictionary>(from->global_dictionary());
+ int capacity = properties->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* raw_key(properties->KeyAt(i));
+ if (properties->IsKey(raw_key)) {
+ DCHECK(raw_key->IsName());
+ // If the property is already there we skip it.
+ Handle<Name> key(Name::cast(raw_key));
+ LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+ if (it.IsFound()) continue;
+ // Set the property.
+ DCHECK(properties->ValueAt(i)->IsPropertyCell());
+ Handle<PropertyCell> cell(PropertyCell::cast(properties->ValueAt(i)));
+ Handle<Object> value(cell->value(), isolate());
+ if (value->IsTheHole()) continue;
+ PropertyDetails details = cell->property_details();
+ DCHECK_EQ(kData, details.kind());
+ JSObject::AddProperty(to, key, value, details.attributes());
+ }
+ }
} else {
Handle<NameDictionary> properties =
Handle<NameDictionary>(from->property_dictionary());
@@ -2810,10 +3006,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<Object> value = Handle<Object>(properties->ValueAt(i),
isolate());
DCHECK(!value->IsCell());
- if (value->IsPropertyCell()) {
- value = handle(PropertyCell::cast(*value)->value(), isolate());
- }
- if (value->IsTheHole()) continue;
+ DCHECK(!value->IsTheHole());
PropertyDetails details = properties->DetailsAt(i);
DCHECK_EQ(kData, details.kind());
JSObject::AddProperty(to, key, value, details.attributes());
@@ -2889,10 +3082,9 @@ class NoTrackDoubleFieldsForSerializerScope {
Genesis::Genesis(Isolate* isolate,
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
- v8::Handle<v8::ObjectTemplate> global_proxy_template,
+ v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions)
- : isolate_(isolate),
- active_(isolate->bootstrapper()) {
+ : isolate_(isolate), active_(isolate->bootstrapper()) {
NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
result_ = Handle<Context>::null();
// Before creating the roots we must save the context and restore it
@@ -2903,7 +3095,10 @@ Genesis::Genesis(Isolate* isolate,
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return;
+ if (check.HasOverflowed()) {
+ isolate->StackOverflow();
+ return;
+ }
// The deserializer needs to hook up references to the global proxy.
// Create an uninitialized global proxy now if we don't have one
@@ -2915,6 +3110,7 @@ Genesis::Genesis(Isolate* isolate,
// We can only de-serialize a context if the isolate was initialized from
// a snapshot. Otherwise we have to build the context from scratch.
+ // Also create a context from scratch to expose natives, if required by flag.
Handle<FixedArray> outdated_contexts;
if (!isolate->initialized_from_snapshot() ||
!Snapshot::NewContextFromSnapshot(isolate, global_proxy,
@@ -2943,6 +3139,7 @@ Genesis::Genesis(Isolate* isolate,
HookUpGlobalObject(global_object, outdated_contexts);
native_context()->builtins()->set_global_proxy(
native_context()->global_proxy());
+ HookUpGlobalThisBinding(outdated_contexts);
if (!ConfigureGlobalObjects(global_proxy_template)) return;
} else {
@@ -3010,4 +3207,5 @@ void Bootstrapper::FreeThreadResources() {
DCHECK(!IsActive());
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 1247337cd3..cb0d918b17 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -77,7 +77,7 @@ class Bootstrapper final {
// The returned value is a global handle casted to V8Environment*.
Handle<Context> CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
- v8::Handle<v8::ObjectTemplate> global_object_template,
+ v8::Local<v8::ObjectTemplate> global_object_template,
v8::ExtensionConfiguration* extensions);
// Detach the environment from its outer global object.
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 5b1eeed154..869ad2cc00 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -11,11 +11,13 @@
#include "src/bootstrapper.h"
#include "src/builtins.h"
#include "src/cpu-profiler.h"
+#include "src/elements.h"
#include "src/gdb-jit.h"
#include "src/heap/mark-compact.h"
#include "src/heap-profiler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/messages.h"
#include "src/prototype.h"
#include "src/vm-state-inl.h"
@@ -183,38 +185,34 @@ static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index,
}
-static bool ArrayPrototypeHasNoElements(Heap* heap, PrototypeIterator* iter) {
+static bool ArrayPrototypeHasNoElements(PrototypeIterator* iter) {
DisallowHeapAllocation no_gc;
for (; !iter->IsAtEnd(); iter->Advance()) {
if (iter->GetCurrent()->IsJSProxy()) return false;
- if (JSObject::cast(iter->GetCurrent())->elements() !=
- heap->empty_fixed_array()) {
- return false;
- }
+ JSObject* current = JSObject::cast(iter->GetCurrent());
+ if (current->IsAccessCheckNeeded()) return false;
+ if (current->HasIndexedInterceptor()) return false;
+ if (current->elements()->length() != 0) return false;
}
return true;
}
-static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
+static inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
JSArray* receiver) {
DisallowHeapAllocation no_gc;
- Isolate* isolate = heap->isolate();
- if (!isolate->IsFastArrayConstructorPrototypeChainIntact()) {
- return false;
- }
-
// If the array prototype chain is intact (and free of elements), and if the
// receiver's prototype is the array prototype, then we are done.
Object* prototype = receiver->map()->prototype();
if (prototype->IsJSArray() &&
- isolate->is_initial_array_prototype(JSArray::cast(prototype))) {
+ isolate->is_initial_array_prototype(JSArray::cast(prototype)) &&
+ isolate->IsFastArrayConstructorPrototypeChainIntact()) {
return true;
}
// Slow case.
PrototypeIterator iter(isolate, receiver);
- return ArrayPrototypeHasNoElements(heap, &iter);
+ return ArrayPrototypeHasNoElements(&iter);
}
@@ -230,7 +228,7 @@ static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
// If there may be elements accessors in the prototype chain, the fast path
// cannot be used if there arguments to add to the array.
Heap* heap = isolate->heap();
- if (args != NULL && !IsJSArrayFastElementMovingAllowed(heap, *array)) {
+ if (args != NULL && !IsJSArrayFastElementMovingAllowed(isolate, *array)) {
return MaybeHandle<FixedArrayBase>();
}
if (array->map()->is_observed()) return MaybeHandle<FixedArrayBase>();
@@ -437,23 +435,19 @@ BUILTIN(ArrayPop) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
DCHECK(!array->map()->is_observed());
- int len = Smi::cast(array->length())->value();
+ uint32_t len = static_cast<uint32_t>(Smi::cast(array->length())->value());
if (len == 0) return isolate->heap()->undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
return CallJsBuiltin(isolate, "$arrayPop", args);
}
- ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = len - 1;
- Handle<Object> element =
- accessor->Get(array, array, new_length, elms_obj).ToHandleChecked();
- if (element->IsTheHole()) {
- return CallJsBuiltin(isolate, "$arrayPop", args);
- }
- RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- accessor->SetLength(array, handle(Smi::FromInt(new_length), isolate)));
+ uint32_t new_length = len - 1;
+ Handle<Object> element;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, element, Object::GetElement(isolate, array, new_length));
+
+ JSArray::SetLength(array, new_length);
return *element;
}
@@ -466,7 +460,7 @@ BUILTIN(ArrayShift) {
EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
Handle<FixedArrayBase> elms_obj;
if (!maybe_elms_obj.ToHandle(&elms_obj) ||
- !IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(*receiver))) {
+ !IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
return CallJsBuiltin(isolate, "$arrayShift", args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -480,12 +474,9 @@ BUILTIN(ArrayShift) {
}
// Get first element
- ElementsAccessor* accessor = array->GetElementsAccessor();
- Handle<Object> first =
- accessor->Get(array, array, 0, elms_obj).ToHandleChecked();
- if (first->IsTheHole()) {
- return CallJsBuiltin(isolate, "$arrayShift", args);
- }
+ Handle<Object> first;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, first,
+ Object::GetElement(isolate, array, 0));
if (heap->CanMoveObjectStart(*elms_obj)) {
array->set_elements(heap->LeftTrimFixedArray(*elms_obj, 1));
@@ -572,7 +563,6 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
HandleScope scope(isolate);
- Heap* heap = isolate->heap();
Handle<Object> receiver = args.receiver();
int len = -1;
int relative_start = 0;
@@ -581,7 +571,7 @@ BUILTIN(ArraySlice) {
DisallowHeapAllocation no_gc;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(*receiver);
- if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
+ if (!IsJSArrayFastElementMovingAllowed(isolate, array)) {
AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "$arraySlice", args);
}
@@ -940,12 +930,11 @@ BUILTIN(ArrayConcat) {
bool has_double = false;
{
DisallowHeapAllocation no_gc;
- Heap* heap = isolate->heap();
Context* native_context = isolate->context()->native_context();
Object* array_proto = native_context->array_function()->prototype();
PrototypeIterator iter(isolate, array_proto,
PrototypeIterator::START_AT_RECEIVER);
- if (!ArrayPrototypeHasNoElements(heap, &iter)) {
+ if (!ArrayPrototypeHasNoElements(&iter)) {
AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "$arrayConcat", args);
}
@@ -991,11 +980,8 @@ BUILTIN(ArrayConcat) {
ArrayStorageAllocationMode mode =
has_double && IsFastObjectElementsKind(elements_kind)
? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
- Handle<JSArray> result_array =
- isolate->factory()->NewJSArray(elements_kind,
- result_len,
- result_len,
- mode);
+ Handle<JSArray> result_array = isolate->factory()->NewJSArray(
+ elements_kind, result_len, result_len, Strength::WEAK, mode);
if (result_len == 0) return *result_array;
int j = 0;
@@ -1026,17 +1012,15 @@ BUILTIN(ArrayConcat) {
BUILTIN(RestrictedFunctionPropertiesThrower) {
HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewTypeError("restricted_function_properties",
- HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kRestrictedFunctionProperties));
}
BUILTIN(RestrictedStrictArgumentsPropertiesThrower) {
HandleScope scope(isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError("strict_poison_pill", HandleVector<Object>(NULL, 0)));
+ isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
}
@@ -1080,9 +1064,8 @@ MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort!
- THROW_NEW_ERROR(
- isolate, NewTypeError("illegal_invocation", HandleVector(&function, 1)),
- Object);
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIllegalInvocation),
+ Object);
}
Object* raw_call_data = fun_data->call_code();
@@ -1106,7 +1089,7 @@ MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
args.length() - 1,
is_construct);
- v8::Handle<v8::Value> value = custom.Call(callback);
+ v8::Local<v8::Value> value = custom.Call(callback);
Handle<Object> result;
if (value.IsEmpty()) {
result = isolate->factory()->undefined_value();
@@ -1243,7 +1226,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
&args[0] - 1,
args.length() - 1,
is_construct_call);
- v8::Handle<v8::Value> value = custom.Call(callback);
+ v8::Local<v8::Value> value = custom.Call(callback);
if (value.IsEmpty()) {
result = heap->undefined_value();
} else {
@@ -1277,7 +1260,12 @@ static void Generate_LoadIC_Miss(MacroAssembler* masm) {
static void Generate_LoadIC_Normal(MacroAssembler* masm) {
- LoadIC::GenerateNormal(masm);
+ LoadIC::GenerateNormal(masm, SLOPPY);
+}
+
+
+static void Generate_LoadIC_Normal_Strong(MacroAssembler* masm) {
+ LoadIC::GenerateNormal(masm, STRONG);
}
@@ -1287,17 +1275,22 @@ static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
static void Generate_LoadIC_Slow(MacroAssembler* masm) {
- LoadIC::GenerateRuntimeGetProperty(masm);
+ LoadIC::GenerateRuntimeGetProperty(masm, SLOPPY);
}
-static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
- KeyedLoadIC::GenerateInitialize(masm);
+static void Generate_LoadIC_Slow_Strong(MacroAssembler* masm) {
+ LoadIC::GenerateRuntimeGetProperty(masm, STRONG);
}
static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
- KeyedLoadIC::GenerateRuntimeGetProperty(masm);
+ KeyedLoadIC::GenerateRuntimeGetProperty(masm, SLOPPY);
+}
+
+
+static void Generate_KeyedLoadIC_Slow_Strong(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateRuntimeGetProperty(masm, STRONG);
}
@@ -1307,12 +1300,12 @@ static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
static void Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMegamorphic(masm);
+ KeyedLoadIC::GenerateMegamorphic(masm, SLOPPY);
}
-static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
- KeyedLoadIC::GeneratePreMonomorphic(masm);
+static void Generate_KeyedLoadIC_Megamorphic_Strong(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateMegamorphic(masm, STRONG);
}
@@ -1376,41 +1369,11 @@ static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
}
-static void Generate_KeyedStoreIC_SloppyArguments(MacroAssembler* masm) {
- KeyedStoreIC::GenerateSloppyArguments(masm);
-}
-
-
static void Generate_CallICStub_DebugBreak(MacroAssembler* masm) {
DebugCodegen::GenerateCallICStubDebugBreak(masm);
}
-static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateLoadICDebugBreak(masm);
-}
-
-
-static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateStoreICDebugBreak(masm);
-}
-
-
-static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateKeyedLoadICDebugBreak(masm);
-}
-
-
-static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateKeyedStoreICDebugBreak(masm);
-}
-
-
-static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateCompareNilICDebugBreak(masm);
-}
-
-
static void Generate_Return_DebugBreak(MacroAssembler* masm) {
DebugCodegen::GenerateReturnDebugBreak(masm);
}
@@ -1678,4 +1641,5 @@ BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
#undef DEFINE_BUILTIN_ACCESSOR_A
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index fd00e595bc..e1e202b3f7 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -64,78 +64,80 @@ enum BuiltinExtraArguments {
V(RestrictedStrictArgumentsPropertiesThrower, NO_EXTRA_ARGUMENTS)
// Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubForDerived, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
- V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, kNoExtraICState) \
- V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, StoreIC::kStrictModeState) \
- \
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- StoreIC::kStrictModeState) \
- V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
- StoreIC::kStrictModeState) \
- V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
- StoreIC::kStrictModeState) \
- V(KeyedStoreIC_SloppyArguments, KEYED_STORE_IC, MONOMORPHIC, \
- kNoExtraICState) \
- \
- /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
- V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(StringConstructCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+#define BUILTIN_LIST_A(V) \
+ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubForDerived, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubNewTarget, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
+ V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
+ \
+ V(KeyedLoadIC_Megamorphic_Strong, KEYED_LOAD_IC, MEGAMORPHIC, \
+ LoadICState::kStrongModeState) \
+ \
+ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
+ StoreICState::kStrictModeState) \
+ \
+ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
+ kNoExtraICState) \
+ V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
+ \
+ V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
+ StoreICState::kStrictModeState) \
+ V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
+ StoreICState::kStrictModeState) \
+ V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
+ StoreICState::kStrictModeState) \
+ \
+ V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(StringConstructCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtin handlers implemented in assembly.
-#define BUILTIN_LIST_H(V) \
- V(LoadIC_Slow, LOAD_IC) \
- V(KeyedLoadIC_Slow, KEYED_LOAD_IC) \
- V(StoreIC_Slow, STORE_IC) \
- V(KeyedStoreIC_Slow, KEYED_STORE_IC) \
- V(LoadIC_Normal, LOAD_IC) \
- V(StoreIC_Normal, STORE_IC)
+#define BUILTIN_LIST_H(V) \
+ V(LoadIC_Slow, LOAD_IC) \
+ V(LoadIC_Slow_Strong, LOAD_IC) \
+ V(KeyedLoadIC_Slow, KEYED_LOAD_IC) \
+ V(KeyedLoadIC_Slow_Strong, KEYED_LOAD_IC) \
+ V(StoreIC_Slow, STORE_IC) \
+ V(KeyedStoreIC_Slow, KEYED_STORE_IC) \
+ V(LoadIC_Normal, LOAD_IC) \
+ V(LoadIC_Normal_Strong, LOAD_IC) \
+ V(StoreIC_Normal, STORE_IC)
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
@@ -149,16 +151,6 @@ enum BuiltinExtraArguments {
DEBUG_BREAK) \
V(CallICStub_DebugBreak, CALL_IC, DEBUG_STUB, \
DEBUG_BREAK) \
- V(LoadIC_DebugBreak, LOAD_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(StoreIC_DebugBreak, STORE_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(CompareNilIC_DebugBreak, COMPARE_NIL_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \
@@ -171,6 +163,7 @@ enum BuiltinExtraArguments {
V(EQUALS, 1) \
V(STRICT_EQUALS, 1) \
V(COMPARE, 2) \
+ V(COMPARE_STRONG, 2) \
V(ADD, 1) \
V(ADD_STRONG, 1) \
V(SUB, 1) \
@@ -196,7 +189,6 @@ enum BuiltinExtraArguments {
V(DELETE, 2) \
V(IN, 1) \
V(INSTANCE_OF, 1) \
- V(FILTER_KEY, 1) \
V(CALL_NON_FUNCTION, 0) \
V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
V(CALL_FUNCTION_PROXY, 1) \
@@ -212,6 +204,7 @@ enum BuiltinExtraArguments {
V(APPLY_PREPARE, 1) \
V(REFLECT_APPLY_PREPARE, 1) \
V(REFLECT_CONSTRUCT_PREPARE, 2) \
+ V(CONCAT_ITERABLE_TO_ARRAY, 1) \
V(STACK_OVERFLOW, 1)
class BuiltinFunctionTable;
@@ -324,6 +317,7 @@ class Builtins {
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubForDerived(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
+ static void Generate_JSConstructStubNewTarget(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index ccf8882e76..5bd0884eba 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -156,4 +156,5 @@ void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
DCHECK(requested_exponent < *found_exponent + kDecimalExponentDistance);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/char-predicates.cc b/deps/v8/src/char-predicates.cc
index ab551d808e..66508375a1 100644
--- a/deps/v8/src/char-predicates.cc
+++ b/deps/v8/src/char-predicates.cc
@@ -38,5 +38,5 @@ bool SupplementaryPlanes::IsIDPart(uc32 c) {
return false;
#endif // V8_I18N_SUPPORT
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 30448d8733..bdceb572a1 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -11,50 +11,48 @@
namespace v8 {
namespace internal {
-// static
-Callable CodeFactory::LoadGlobalIC(Isolate* isolate,
- Handle<GlobalObject> global,
- Handle<String> name) {
- return Callable(LoadIC::load_global(isolate, global, name),
- LoadDescriptor(isolate));
-}
-
// static
-Callable CodeFactory::LoadIC(Isolate* isolate, ContextualMode mode) {
+Callable CodeFactory::LoadIC(Isolate* isolate, ContextualMode mode,
+ LanguageMode language_mode) {
return Callable(
- LoadIC::initialize_stub(isolate, LoadICState(mode).GetExtraICState()),
+ LoadIC::initialize_stub(
+ isolate, LoadICState(mode, language_mode).GetExtraICState()),
LoadDescriptor(isolate));
}
// static
Callable CodeFactory::LoadICInOptimizedCode(
- Isolate* isolate, ContextualMode mode,
+ Isolate* isolate, ContextualMode mode, LanguageMode language_mode,
InlineCacheState initialization_state) {
auto code = LoadIC::initialize_stub_in_optimized_code(
- isolate, LoadICState(mode).GetExtraICState(), initialization_state);
- if (FLAG_vector_ics) {
- return Callable(code, VectorLoadICDescriptor(isolate));
- }
- return Callable(code, LoadDescriptor(isolate));
+ isolate, LoadICState(mode, language_mode).GetExtraICState(),
+ initialization_state);
+ return Callable(code, LoadWithVectorDescriptor(isolate));
}
// static
-Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
- return Callable(KeyedLoadIC::initialize_stub(isolate),
+Callable CodeFactory::KeyedLoadIC(Isolate* isolate,
+ LanguageMode language_mode) {
+ ExtraICState state = is_strong(language_mode) ? LoadICState::kStrongModeState
+ : kNoExtraICState;
+ return Callable(KeyedLoadIC::initialize_stub(isolate, state),
LoadDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedLoadICInOptimizedCode(
- Isolate* isolate, InlineCacheState initialization_state) {
+ Isolate* isolate, LanguageMode language_mode,
+ InlineCacheState initialization_state) {
+ ExtraICState state = is_strong(language_mode) ? LoadICState::kStrongModeState
+ : kNoExtraICState;
auto code = KeyedLoadIC::initialize_stub_in_optimized_code(
- isolate, initialization_state);
- if (FLAG_vector_ics && initialization_state != MEGAMORPHIC) {
- return Callable(code, VectorLoadICDescriptor(isolate));
+ isolate, initialization_state, state);
+ if (initialization_state != MEGAMORPHIC) {
+ return Callable(code, LoadWithVectorDescriptor(isolate));
}
return Callable(code, LoadDescriptor(isolate));
}
@@ -81,7 +79,22 @@ Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
return Callable(
StoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
- StoreDescriptor(isolate));
+ FLAG_vector_stores ? VectorStoreICTrampolineDescriptor(isolate)
+ : StoreDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::StoreICInOptimizedCode(
+ Isolate* isolate, LanguageMode language_mode,
+ InlineCacheState initialization_state) {
+ CallInterfaceDescriptor descriptor =
+ FLAG_vector_stores && initialization_state != MEGAMORPHIC
+ ? VectorStoreICDescriptor(isolate)
+ : StoreDescriptor(isolate);
+ return Callable(StoreIC::initialize_stub_in_optimized_code(
+ isolate, language_mode, initialization_state),
+ descriptor);
}
@@ -90,7 +103,8 @@ Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
LanguageMode language_mode) {
return Callable(
KeyedStoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
- StoreDescriptor(isolate));
+ FLAG_vector_stores ? VectorStoreICTrampolineDescriptor(isolate)
+ : StoreDescriptor(isolate));
}
@@ -98,23 +112,36 @@ Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
Callable CodeFactory::KeyedStoreICInOptimizedCode(
Isolate* isolate, LanguageMode language_mode,
InlineCacheState initialization_state) {
- return Callable(KeyedStoreIC::initialize_stub(isolate, language_mode,
- initialization_state),
- StoreDescriptor(isolate));
+ CallInterfaceDescriptor descriptor =
+ FLAG_vector_stores && initialization_state != MEGAMORPHIC
+ ? VectorStoreICDescriptor(isolate)
+ : StoreDescriptor(isolate);
+ return Callable(KeyedStoreIC::initialize_stub_in_optimized_code(
+ isolate, language_mode, initialization_state),
+ descriptor);
}
// static
-Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
- Handle<Code> code = CompareIC::GetUninitialized(isolate, op);
+Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op,
+ Strength strength) {
+ Handle<Code> code = CompareIC::GetUninitialized(isolate, op, strength);
return Callable(code, CompareDescriptor(isolate));
}
// static
Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
- LanguageMode language_mode) {
- BinaryOpICStub stub(isolate, op, language_mode);
+ Strength strength) {
+ BinaryOpICStub stub(isolate, op, strength);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::Instanceof(Isolate* isolate,
+ InstanceofStub::Flags flags) {
+ InstanceofStub stub(isolate, flags);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 1007be85ac..947770db42 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -32,31 +32,37 @@ class Callable final BASE_EMBEDDED {
class CodeFactory final {
public:
// Initial states for ICs.
- static Callable LoadGlobalIC(Isolate* isolate, Handle<GlobalObject> global,
- Handle<String> name);
- static Callable LoadIC(Isolate* isolate, ContextualMode mode);
+ static Callable LoadIC(Isolate* isolate, ContextualMode mode,
+ LanguageMode language_mode);
static Callable LoadICInOptimizedCode(Isolate* isolate, ContextualMode mode,
+ LanguageMode language_mode,
InlineCacheState initialization_state);
- static Callable KeyedLoadIC(Isolate* isolate);
+ static Callable KeyedLoadIC(Isolate* isolate, LanguageMode language_mode);
static Callable KeyedLoadICInOptimizedCode(
- Isolate* isolate, InlineCacheState initialization_state);
+ Isolate* isolate, LanguageMode language_mode,
+ InlineCacheState initialization_state);
static Callable CallIC(Isolate* isolate, int argc,
CallICState::CallType call_type);
static Callable CallICInOptimizedCode(Isolate* isolate, int argc,
CallICState::CallType call_type);
static Callable StoreIC(Isolate* isolate, LanguageMode mode);
+ static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode,
+ InlineCacheState initialization_state);
static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
static Callable KeyedStoreICInOptimizedCode(
Isolate* isolate, LanguageMode mode,
InlineCacheState initialization_state);
- static Callable CompareIC(Isolate* isolate, Token::Value op);
+ static Callable CompareIC(Isolate* isolate, Token::Value op,
+ Strength strength);
static Callable BinaryOpIC(Isolate* isolate, Token::Value op,
- LanguageMode language_mode);
+ Strength strength);
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
+ static Callable Instanceof(Isolate* isolate, InstanceofStub::Flags flags);
+
static Callable ToBoolean(
Isolate* isolate, ToBooleanStub::ResultMode mode,
ToBooleanStub::Types types = ToBooleanStub::Types());
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 70f6e6150c..d1cabde1bd 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -41,17 +41,27 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
info_(info),
descriptor_(info->code_stub()),
context_(NULL) {
- int parameter_count = descriptor_.GetEnvironmentParameterCount();
+ int parameter_count = GetParameterCount();
parameters_.Reset(new HParameter*[parameter_count]);
}
virtual bool BuildGraph();
protected:
virtual HValue* BuildCodeStub() = 0;
+ int GetParameterCount() const {
+ return descriptor_.GetRegisterParameterCount();
+ }
HParameter* GetParameter(int parameter) {
- DCHECK(parameter < descriptor_.GetEnvironmentParameterCount());
+ DCHECK(parameter < GetParameterCount());
return parameters_[parameter];
}
+ Representation GetParameterRepresentation(int parameter) {
+ return RepresentationFromType(descriptor_.GetParameterType(parameter));
+ }
+ bool IsParameterCountRegister(int index) const {
+ return descriptor_.GetRegisterParameter(index)
+ .is(descriptor_.stack_parameter_count());
+ }
HValue* GetArgumentsLength() {
// This is initialized in BuildGraph()
DCHECK(arguments_length_ != NULL);
@@ -62,8 +72,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; }
Isolate* isolate() { return info_->isolate(); }
- HLoadNamedField* BuildLoadNamedField(HValue* object,
- FieldIndex index);
+ HLoadNamedField* BuildLoadNamedField(HValue* object, FieldIndex index);
void BuildStoreNamedField(HValue* object, HValue* value, FieldIndex index,
Representation representation,
bool transition_to_field);
@@ -74,7 +83,9 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
MULTIPLE
};
- HValue* UnmappedCase(HValue* elements, HValue* key);
+ HValue* UnmappedCase(HValue* elements, HValue* key, HValue* value);
+ HValue* EmitKeyedSloppyArguments(HValue* receiver, HValue* key,
+ HValue* value);
HValue* BuildArrayConstructor(ElementsKind kind,
AllocationSiteOverrideMode override_mode,
@@ -91,6 +102,8 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
IfBuilder* builder,
HValue* optimized_map,
HValue* map_index);
+ void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
+ HValue* code_object, HValue* literals);
void BuildInstallCode(HValue* js_function, HValue* shared_info);
HInstruction* LoadFromOptimizedCodeMap(HValue* optimized_map,
@@ -124,7 +137,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
isolate()->GetHTracer()->TraceCompilation(info());
}
- int param_count = descriptor_.GetEnvironmentParameterCount();
+ int param_count = GetParameterCount();
HEnvironment* start_environment = graph()->start_environment();
HBasicBlock* next_block = CreateBasicBlock(start_environment);
Goto(next_block);
@@ -134,12 +147,12 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
bool runtime_stack_params = descriptor_.stack_parameter_count().is_valid();
HInstruction* stack_parameter_count = NULL;
for (int i = 0; i < param_count; ++i) {
- Representation r = descriptor_.GetEnvironmentParameterRepresentation(i);
+ Representation r = GetParameterRepresentation(i);
HParameter* param = Add<HParameter>(i,
HParameter::REGISTER_PARAMETER, r);
start_environment->Bind(i, param);
parameters_[i] = param;
- if (descriptor_.IsEnvironmentParameterCountRegister(i)) {
+ if (IsParameterCountRegister(i)) {
param->set_type(HType::Smi());
stack_parameter_count = param;
arguments_length_ = stack_parameter_count;
@@ -639,7 +652,7 @@ HValue* CodeStubGraphBuilder<CreateWeakCellStub>::BuildCodeStub() {
HInstruction* value = GetParameter(CreateWeakCellDescriptor::kValueIndex);
Add<HStoreNamedField>(object, HObjectAccess::ForWeakCellValue(), value);
Add<HStoreNamedField>(object, HObjectAccess::ForWeakCellNext(),
- graph()->GetConstantUndefined());
+ graph()->GetConstantHole());
HInstruction* feedback_vector =
GetParameter(CreateWeakCellDescriptor::kVectorIndex);
@@ -696,10 +709,11 @@ HValue* CodeStubGraphBuilder<GrowArrayElementsStub>::BuildCodeStub() {
HValue* object = GetParameter(GrowArrayElementsDescriptor::kObjectIndex);
HValue* key = GetParameter(GrowArrayElementsDescriptor::kKeyIndex);
- HValue* current_capacity =
- GetParameter(GrowArrayElementsDescriptor::kCapacityIndex);
HValue* elements = AddLoadElements(object);
+ HValue* current_capacity = Add<HLoadNamedField>(
+ elements, nullptr, HObjectAccess::ForFixedArrayLength());
+
HValue* length =
casted_stub()->is_js_array()
? Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
@@ -795,8 +809,9 @@ HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
-HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
- HValue* result;
+HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key,
+ HValue* value) {
+ HValue* result = NULL;
HInstruction* backing_store =
Add<HLoadKeyed>(elements, graph()->GetConstant1(), nullptr, FAST_ELEMENTS,
ALLOW_RETURN_HOLE);
@@ -808,8 +823,12 @@ HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
Token::LT);
in_unmapped_range.Then();
{
- result = Add<HLoadKeyed>(backing_store, key, nullptr, FAST_HOLEY_ELEMENTS,
- NEVER_RETURN_HOLE);
+ if (value == NULL) {
+ result = Add<HLoadKeyed>(backing_store, key, nullptr, FAST_HOLEY_ELEMENTS,
+ NEVER_RETURN_HOLE);
+ } else {
+ Add<HStoreKeyed>(backing_store, key, value, FAST_HOLEY_ELEMENTS);
+ }
}
in_unmapped_range.ElseDeopt(Deoptimizer::kOutsideOfRange);
in_unmapped_range.End();
@@ -817,11 +836,9 @@ HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
}
-template <>
-HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
- HValue* key = GetParameter(LoadDescriptor::kNameIndex);
-
+HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
+ HValue* key,
+ HValue* value) {
// Mapped arguments are actual arguments. Unmapped arguments are values added
// to the arguments object after it was created for the call. Mapped arguments
// are stored in the context at indexes given by elements[key + 2]. Unmapped
@@ -848,6 +865,8 @@ HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
// index into the context array given at elements[0]. Return the value at
// context[t].
+ bool is_load = value == NULL;
+
key = AddUncasted<HForceRepresentation>(key, Representation::Smi());
IfBuilder positive_smi(this);
positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
@@ -879,21 +898,27 @@ HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
HValue* the_context = Add<HLoadKeyed>(elements, graph()->GetConstant0(),
nullptr, FAST_ELEMENTS);
STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
- HValue* result = Add<HLoadKeyed>(the_context, mapped_index, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
- environment()->Push(result);
+ if (is_load) {
+ HValue* result = Add<HLoadKeyed>(the_context, mapped_index, nullptr,
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ environment()->Push(result);
+ } else {
+ DCHECK(value != NULL);
+ Add<HStoreKeyed>(the_context, mapped_index, value, FAST_ELEMENTS);
+ environment()->Push(value);
+ }
}
is_valid.Else();
{
- HValue* result = UnmappedCase(elements, key);
- environment()->Push(result);
+ HValue* result = UnmappedCase(elements, key, value);
+ environment()->Push(is_load ? result : value);
}
is_valid.End();
}
in_range.Else();
{
- HValue* result = UnmappedCase(elements, key);
- environment()->Push(result);
+ HValue* result = UnmappedCase(elements, key, value);
+ environment()->Push(is_load ? result : value);
}
in_range.End();
@@ -901,11 +926,35 @@ HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
}
+template <>
+HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+ HValue* key = GetParameter(LoadDescriptor::kNameIndex);
+
+ return EmitKeyedSloppyArguments(receiver, key, NULL);
+}
+
+
Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() {
return DoGenerateCode(this);
}
+template <>
+HValue* CodeStubGraphBuilder<KeyedStoreSloppyArgumentsStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(StoreDescriptor::kReceiverIndex);
+ HValue* key = GetParameter(StoreDescriptor::kNameIndex);
+ HValue* value = GetParameter(StoreDescriptor::kValueIndex);
+
+ return EmitKeyedSloppyArguments(receiver, key, value);
+}
+
+
+Handle<Code> KeyedStoreSloppyArgumentsStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
void CodeStubGraphBuilderBase::BuildStoreNamedField(
HValue* object, HValue* value, FieldIndex index,
Representation representation, bool transition_to_field) {
@@ -1323,19 +1372,16 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
if_leftisstring.If<HIsStringAndBranch>(left);
if_leftisstring.Then();
{
- Push(BuildBinaryOperation(
- state.op(), left, right,
- Type::String(zone()), right_type,
- result_type, state.fixed_right_arg(),
- allocation_mode, state.language_mode()));
+ Push(BuildBinaryOperation(state.op(), left, right, Type::String(zone()),
+ right_type, result_type,
+ state.fixed_right_arg(), allocation_mode,
+ state.strength()));
}
if_leftisstring.Else();
{
Push(BuildBinaryOperation(
- state.op(), left, right,
- left_type, right_type, result_type,
- state.fixed_right_arg(), allocation_mode,
- state.language_mode()));
+ state.op(), left, right, left_type, right_type, result_type,
+ state.fixed_right_arg(), allocation_mode, state.strength()));
}
if_leftisstring.End();
result = Pop();
@@ -1344,28 +1390,24 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
if_rightisstring.If<HIsStringAndBranch>(right);
if_rightisstring.Then();
{
- Push(BuildBinaryOperation(
- state.op(), left, right,
- left_type, Type::String(zone()),
- result_type, state.fixed_right_arg(),
- allocation_mode, state.language_mode()));
+ Push(BuildBinaryOperation(state.op(), left, right, left_type,
+ Type::String(zone()), result_type,
+ state.fixed_right_arg(), allocation_mode,
+ state.strength()));
}
if_rightisstring.Else();
{
Push(BuildBinaryOperation(
- state.op(), left, right,
- left_type, right_type, result_type,
- state.fixed_right_arg(), allocation_mode,
- state.language_mode()));
+ state.op(), left, right, left_type, right_type, result_type,
+ state.fixed_right_arg(), allocation_mode, state.strength()));
}
if_rightisstring.End();
result = Pop();
}
} else {
result = BuildBinaryOperation(
- state.op(), left, right,
- left_type, right_type, result_type,
- state.fixed_right_arg(), allocation_mode, state.language_mode());
+ state.op(), left, right, left_type, right_type, result_type,
+ state.fixed_right_arg(), allocation_mode, state.strength());
}
// If we encounter a generic argument, the number conversion is
@@ -1397,10 +1439,9 @@ HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
Type* result_type = state.GetResultType(zone());
HAllocationMode allocation_mode(allocation_site);
- return BuildBinaryOperation(state.op(), left, right,
- left_type, right_type, result_type,
- state.fixed_right_arg(), allocation_mode,
- state.language_mode());
+ return BuildBinaryOperation(state.op(), left, right, left_type, right_type,
+ result_type, state.fixed_right_arg(),
+ allocation_mode, state.strength());
}
@@ -1620,6 +1661,16 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
map_index, SharedFunctionInfo::kLiteralsOffset);
+ BuildInstallOptimizedCode(js_function, native_context, code_object, literals);
+
+ // The builder continues in the "then" after this function.
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(HValue* js_function,
+ HValue* native_context,
+ HValue* code_object,
+ HValue* literals) {
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->fast_new_closure_install_optimized());
@@ -1642,8 +1693,6 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
Add<HStoreNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
js_function);
-
- // The builder continues in the "then" after this function.
}
@@ -1681,6 +1730,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* shared_info,
HValue* native_context) {
Counters* counters = isolate()->counters();
+ Factory* factory = isolate()->factory();
IfBuilder is_optimized(this);
HInstruction* optimized_map = Add<HLoadNamedField>(
shared_info, nullptr, HObjectAccess::ForOptimizedCodeMap());
@@ -1733,15 +1783,31 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
}
loop_builder.EndBody();
- // If slot_iterator equals first entry index, then we failed to find and
- // install optimized code
+ // If slot_iterator equals first entry index, then we failed to find a
+ // context-dependent code and try context-independent code next.
IfBuilder no_optimized_code_check(this);
no_optimized_code_check.If<HCompareNumericAndBranch>(
slot_iterator, first_entry_index, Token::EQ);
no_optimized_code_check.Then();
{
- // Store the unoptimized code
- BuildInstallCode(js_function, shared_info);
+ IfBuilder shared_code_check(this);
+ HValue* shared_code = Add<HLoadNamedField>(
+ optimized_map, nullptr,
+ HObjectAccess::ForOptimizedCodeMapSharedCode());
+ shared_code_check.IfNot<HCompareObjectEqAndBranch>(
+ shared_code, graph()->GetConstantUndefined());
+ shared_code_check.Then();
+ {
+ // Store the context-independent optimized code.
+ HValue* literals = Add<HConstant>(factory->empty_fixed_array());
+ BuildInstallOptimizedCode(js_function, native_context, shared_code,
+ literals);
+ }
+ shared_code_check.Else();
+ {
+ // Store the unoptimized code.
+ BuildInstallCode(js_function, shared_info);
+ }
}
}
}
@@ -1787,14 +1853,10 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
context());
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- if (FLAG_cache_optimized_code) {
- BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
- } else {
- BuildInstallCode(js_function, shared_info);
- }
+ // Initialize the code pointer in the function to be the one found in the
+ // shared function info object. But first check if there is an optimized
+ // version for our context.
+ BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
return js_function;
}
@@ -1871,7 +1933,8 @@ HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() {
HValue* hash = BuildElementIndexHash(key);
- return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash);
+ return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash,
+ casted_stub()->language_mode());
}
@@ -1985,7 +2048,6 @@ void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildExternalElementLoad(
HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
HValue* key = GetParameter(LoadDescriptor::kNameIndex);
-
// Split into a smi/integer case and unique string case.
HIfContinuation index_name_split_continuation(graph()->CreateBasicBlock(),
graph()->CreateBasicBlock());
@@ -2029,13 +2091,16 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
HValue* hash = BuildElementIndexHash(key);
- Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash));
+ Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash,
+ casted_stub()->language_mode()));
}
kind_if.Else();
- // The SLOPPY_ARGUMENTS_ELEMENTS check generates a "kind_if.Then"
+ // The SLOW_SLOPPY_ARGUMENTS_ELEMENTS check generates a "kind_if.Then"
+ STATIC_ASSERT(FAST_SLOPPY_ARGUMENTS_ELEMENTS <
+ SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
BuildElementsKindLimitCheck(&kind_if, bit_field2,
- SLOPPY_ARGUMENTS_ELEMENTS);
+ SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
// Non-strict elements are not handled.
Add<HDeoptimize>(Deoptimizer::kNonStrictElementsInKeyedLoadGenericStub,
Deoptimizer::EAGER);
@@ -2107,10 +2172,8 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift));
- HValue* value = BuildUncheckedDictionaryElementLoad(receiver,
- properties,
- key,
- hash);
+ HValue* value = BuildUncheckedDictionaryElementLoad(
+ receiver, properties, key, hash, casted_stub()->language_mode());
Push(value);
}
if_dict_properties.Else();
@@ -2188,7 +2251,10 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
Add<HPushArguments>(receiver, key);
Push(Add<HCallRuntime>(
isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kKeyedGetProperty), 2));
+ Runtime::FunctionForId(is_strong(casted_stub()->language_mode())
+ ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty),
+ 2));
}
inline_or_runtime.End();
}
@@ -2204,29 +2270,5 @@ Handle<Code> KeyedLoadGenericStub::GenerateCode() {
return DoGenerateCode(this);
}
-
-Handle<Code> MegamorphicLoadStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<MegamorphicLoadStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
- HValue* name = GetParameter(LoadDescriptor::kNameIndex);
-
- // We shouldn't generate this when FLAG_vector_ics is true because the
- // megamorphic case is handled as part of the default stub.
- DCHECK(!FLAG_vector_ics);
-
- // This stub tail calls, and an erected frame presents complications we don't
- // need.
- info()->MarkMustNotHaveEagerFrame();
-
- // Probe the stub cache.
- Add<HTailCallThroughMegamorphicCache>(receiver, name);
-
- // We never continue.
- return graph()->GetConstant0();
-}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 21d11a93d0..bcc1fe8801 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -13,6 +13,7 @@
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
+#include "src/parser.h"
namespace v8 {
namespace internal {
@@ -122,7 +123,6 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
-
// Copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(
GetCodeKind(),
@@ -268,8 +268,7 @@ MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
// Generate the uninitialized versions of the stub.
for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
- BinaryOpICStub stub(isolate, static_cast<Token::Value>(op),
- LanguageMode::SLOPPY);
+ BinaryOpICStub stub(isolate, static_cast<Token::Value>(op), Strength::WEAK);
stub.GetCode();
}
@@ -314,18 +313,29 @@ void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(
}
-void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
- os << "StringAddStub";
- if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- os << "_CheckBoth";
- } else if ((flags() & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- os << "_CheckLeft";
- } else if ((flags() & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- os << "_CheckRight";
- }
- if (pretenure_flag() == TENURED) {
- os << "_Tenured";
+std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
+ switch (flags) {
+ case STRING_ADD_CHECK_NONE:
+ return os << "CheckNone";
+ case STRING_ADD_CHECK_LEFT:
+ return os << "CheckLeft";
+ case STRING_ADD_CHECK_RIGHT:
+ return os << "CheckRight";
+ case STRING_ADD_CHECK_BOTH:
+ return os << "CheckBoth";
}
+ UNREACHABLE();
+ return os;
+}
+
+
+void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
+ os << "StringAddStub_" << flags() << "_" << pretenure_flag();
+}
+
+
+void StringAddTFStub::PrintBaseName(std::ostream& os) const { // NOLINT
+ os << "StringAddTFStub_" << flags() << "_" << pretenure_flag();
}
@@ -453,6 +463,50 @@ void CompareNilICStub::UpdateStatus(Handle<Object> object) {
}
+namespace {
+
+Handle<JSFunction> GetFunction(Isolate* isolate, const char* name) {
+ v8::ExtensionConfiguration no_extensions;
+ Handle<Context> ctx = isolate->bootstrapper()->CreateEnvironment(
+ MaybeHandle<JSGlobalProxy>(), v8::Local<v8::ObjectTemplate>(),
+ &no_extensions);
+ Handle<JSBuiltinsObject> builtins = handle(ctx->builtins());
+ MaybeHandle<Object> fun = Object::GetProperty(isolate, builtins, name);
+ Handle<JSFunction> function = Handle<JSFunction>::cast(fun.ToHandleChecked());
+ DCHECK(!function->IsUndefined() &&
+ "JavaScript implementation of stub not found");
+ return function;
+}
+} // namespace
+
+
+Handle<Code> TurboFanCodeStub::GenerateCode() {
+ // Get the outer ("stub generator") function.
+ const char* name = CodeStub::MajorName(MajorKey(), false);
+ Handle<JSFunction> outer = GetFunction(isolate(), name);
+ DCHECK_EQ(2, outer->shared()->length());
+
+ // Invoke the outer function to get the stub itself.
+ Factory* factory = isolate()->factory();
+ Handle<Object> call_conv = factory->InternalizeUtf8String(name);
+ Handle<Object> minor_key = factory->NewNumber(MinorKey());
+ Handle<Object> args[] = {call_conv, minor_key};
+ MaybeHandle<Object> result = Execution::Call(
+ isolate(), outer, factory->undefined_value(), 2, args, false);
+ Handle<JSFunction> inner = Handle<JSFunction>::cast(result.ToHandleChecked());
+ // Just to make sure nobody calls this...
+ inner->set_code(isolate()->builtins()->builtin(Builtins::kIllegal));
+
+ Zone zone;
+ // Build a "hybrid" CompilationInfo for a JSFunction/CodeStub pair.
+ ParseInfo parse_info(&zone, inner);
+ CompilationInfo info(&parse_info);
+ info.SetFunctionType(GetCallInterfaceDescriptor().GetFunctionType());
+ info.SetStub(this);
+ return info.GenerateCodeStub();
+}
+
+
template<class StateType>
void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
// Note: Although a no-op transition is semantically OK, it is hinting at a
@@ -564,11 +618,6 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
-void LoadFastElementStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
-}
-
-
void LoadDictionaryElementStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
@@ -578,7 +627,9 @@ void LoadDictionaryElementStub::InitializeDescriptor(
void KeyedLoadGenericStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
+ Runtime::FunctionForId(is_strong(language_mode())
+ ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty)->entry);
}
@@ -587,18 +638,17 @@ void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(StoreIC_MissFromStubFailure));
} else if (kind() == Code::KEYED_LOAD_IC) {
descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
+ } else if (kind() == Code::KEYED_STORE_IC) {
+ descriptor->Initialize(FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
}
}
-CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() {
+CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
- if (FLAG_vector_ics) {
- return VectorLoadICDescriptor(isolate());
- }
- return LoadDescriptor(isolate());
+ return LoadWithVectorDescriptor(isolate());
} else {
- DCHECK_EQ(Code::STORE_IC, kind());
+ DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
return StoreDescriptor(isolate());
}
}
@@ -616,14 +666,12 @@ void ElementsTransitionAndStoreStub::InitializeDescriptor(
}
-CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor() {
+CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor()
+ const {
return StoreTransitionDescriptor(isolate());
}
-void MegamorphicLoadStub::InitializeDescriptor(CodeStubDescriptor* d) {}
-
-
void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
@@ -762,7 +810,8 @@ void StoreElementStub::Generate(MacroAssembler* masm) {
case DICTIONARY_ELEMENTS:
ElementHandlerCompiler::GenerateStoreSlow(masm);
break;
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -879,7 +928,7 @@ bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
Types old_types = new_types;
bool to_boolean_value = new_types.UpdateStatus(object);
TraceTransition(old_types, new_types);
- set_sub_minor_key(TypesBits::update(sub_minor_key(), new_types.ToByte()));
+ set_sub_minor_key(TypesBits::update(sub_minor_key(), new_types.ToIntegral()));
return to_boolean_value;
}
@@ -1001,4 +1050,21 @@ InternalArrayConstructorStub::InternalArrayConstructorStub(
}
-} } // namespace v8::internal
+Representation RepresentationFromType(Type* type) {
+ if (type->Is(Type::UntaggedSigned()) || type->Is(Type::UntaggedUnsigned())) {
+ return Representation::Integer32();
+ }
+
+ if (type->Is(Type::TaggedSigned())) {
+ return Representation::Smi();
+ }
+
+ if (type->Is(Type::UntaggedPointer())) {
+ return Representation::External();
+ }
+
+ DCHECK(!type->Is(Type::Untagged()));
+ return Representation::Tagged();
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 040bf10f77..c06c6c1fe4 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -54,6 +54,10 @@ namespace internal {
V(StubFailureTrampoline) \
V(SubString) \
V(ToNumber) \
+ V(VectorStoreICTrampoline) \
+ V(VectorKeyedStoreICTrampoline) \
+ V(VectorStoreIC) \
+ V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \
V(AllocateHeapNumber) \
V(ArrayNArgumentsConstructor) \
@@ -76,8 +80,6 @@ namespace internal {
V(KeyedLoadGeneric) \
V(LoadScriptContextField) \
V(LoadDictionaryElement) \
- V(LoadFastElement) \
- V(MegamorphicLoad) \
V(NameDictionaryLookup) \
V(NumberToString) \
V(Typeof) \
@@ -87,13 +89,19 @@ namespace internal {
V(StringAdd) \
V(ToBoolean) \
V(TransitionElementsKind) \
- V(VectorRawKeyedLoad) \
- V(VectorRawLoad) \
+ V(KeyedLoadIC) \
+ V(LoadIC) \
+ /* TurboFanCodeStubs */ \
+ V(StringLengthTF) \
+ V(StringAddTF) \
+ V(MathFloor) \
/* IC Handler stubs */ \
V(ArrayBufferViewLoadField) \
V(LoadConstant) \
+ V(LoadFastElement) \
V(LoadField) \
V(KeyedLoadSloppyArguments) \
+ V(KeyedStoreSloppyArguments) \
V(StoreField) \
V(StoreGlobal) \
V(StoreTransition) \
@@ -152,6 +160,8 @@ namespace internal {
CODE_STUB_LIST_PPC(V) \
CODE_STUB_LIST_MIPS(V)
+static const int kHasReturnedMinusZeroSentinel = 1;
+
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
public:
@@ -204,7 +214,9 @@ class CodeStub BASE_EMBEDDED {
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out);
- virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() = 0;
+ virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() const = 0;
+
+ virtual int GetStackParameterCount() const { return 0; }
virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) {}
@@ -310,7 +322,7 @@ struct FakeStubForTesting : public CodeStub {
// Only used by pipeline.cc's GetDebugName in DEBUG mode.
Major MajorKey() const override { return CodeStub::NoCache; }
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override {
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
UNREACHABLE();
return CallInterfaceDescriptor();
}
@@ -353,20 +365,20 @@ struct FakeStubForTesting : public CodeStub {
Handle<Code> GenerateCode() override; \
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME) \
- public: \
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override { \
- return NAME##Descriptor(isolate()); \
+#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME) \
+ public: \
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
+ return NAME##Descriptor(isolate()); \
}
// There are some code stubs we just can't describe right now with a
// CallInterfaceDescriptor. Isolate behavior for those cases with this macro.
// An attempt to retrieve a descriptor will fail.
-#define DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR() \
- public: \
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override { \
- UNREACHABLE(); \
- return CallInterfaceDescriptor(); \
+#define DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR() \
+ public: \
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
+ UNREACHABLE(); \
+ return CallInterfaceDescriptor(); \
}
@@ -375,8 +387,6 @@ class PlatformCodeStub : public CodeStub {
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GenerateCode() override;
- Code::Kind GetCodeKind() const override { return Code::STUB; }
-
protected:
explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) {}
@@ -417,12 +427,16 @@ class CodeStubDescriptor {
void set_call_descriptor(CallInterfaceDescriptor d) { call_descriptor_ = d; }
CallInterfaceDescriptor call_descriptor() const { return call_descriptor_; }
- int GetEnvironmentParameterCount() const {
- return call_descriptor().GetEnvironmentParameterCount();
+ int GetRegisterParameterCount() const {
+ return call_descriptor().GetRegisterParameterCount();
+ }
+
+ Register GetRegisterParameter(int index) const {
+ return call_descriptor().GetRegisterParameter(index);
}
- Representation GetEnvironmentParameterRepresentation(int index) const {
- return call_descriptor().GetEnvironmentParameterRepresentation(index);
+ Type* GetParameterType(int index) const {
+ return call_descriptor().GetParameterType(index);
}
ExternalReference miss_handler() const {
@@ -434,13 +448,8 @@ class CodeStubDescriptor {
return has_miss_handler_;
}
- bool IsEnvironmentParameterCountRegister(int index) const {
- return call_descriptor().GetEnvironmentParameterRegister(index).is(
- stack_parameter_count_);
- }
-
int GetHandlerParameterCount() const {
- int params = call_descriptor().GetEnvironmentParameterCount();
+ int params = GetRegisterParameterCount();
if (handler_arguments_mode_ == PASS_ARGUMENTS) {
params += 1;
}
@@ -475,8 +484,6 @@ class HydrogenCodeStub : public CodeStub {
INITIALIZED
};
- Code::Kind GetCodeKind() const override { return Code::STUB; }
-
template<class SubClass>
static Handle<Code> GetUninitialized(Isolate* isolate) {
SubClass::GenerateAheadOfTime(isolate);
@@ -518,6 +525,25 @@ class HydrogenCodeStub : public CodeStub {
};
+class TurboFanCodeStub : public CodeStub {
+ public:
+ // Retrieve the code for the stub. Generate the code if needed.
+ Handle<Code> GenerateCode() override;
+
+ virtual int GetStackParameterCount() const override {
+ return GetCallInterfaceDescriptor().GetStackParameterCount();
+ }
+
+ Code::StubType GetStubType() const override { return Code::FAST; }
+
+ protected:
+ explicit TurboFanCodeStub(Isolate* isolate) : CodeStub(isolate) {}
+
+ private:
+ DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
+};
+
+
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
@@ -584,6 +610,72 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
};
+class MathFloorStub : public TurboFanCodeStub {
+ public:
+ explicit MathFloorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+ int GetStackParameterCount() const override { return 1; }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(MathRoundVariant);
+ DEFINE_CODE_STUB(MathFloor, TurboFanCodeStub);
+};
+
+
+class StringLengthTFStub : public TurboFanCodeStub {
+ public:
+ explicit StringLengthTFStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ InlineCacheState GetICState() const override { return MONOMORPHIC; }
+ ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
+ DEFINE_CODE_STUB(StringLengthTF, TurboFanCodeStub);
+};
+
+
+enum StringAddFlags {
+ // Omit both parameter checks.
+ STRING_ADD_CHECK_NONE = 0,
+ // Check left parameter.
+ STRING_ADD_CHECK_LEFT = 1 << 0,
+ // Check right parameter.
+ STRING_ADD_CHECK_RIGHT = 1 << 1,
+ // Check both parameters.
+ STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
+};
+
+
+std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags);
+
+
+class StringAddTFStub : public TurboFanCodeStub {
+ public:
+ StringAddTFStub(Isolate* isolate, StringAddFlags flags,
+ PretenureFlag pretenure_flag)
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = StringAddFlagsBits::encode(flags) |
+ PretenureFlagBits::encode(pretenure_flag);
+ }
+
+ StringAddFlags flags() const {
+ return StringAddFlagsBits::decode(MinorKey());
+ }
+
+ PretenureFlag pretenure_flag() const {
+ return PretenureFlagBits::decode(MinorKey());
+ }
+
+ private:
+ class StringAddFlagsBits : public BitField<StringAddFlags, 0, 2> {};
+ class PretenureFlagBits : public BitField<PretenureFlag, 2, 1> {};
+
+ void PrintBaseName(std::ostream& os) const override; // NOLINT
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StringAdd);
+ DEFINE_CODE_STUB(StringAddTF, TurboFanCodeStub);
+};
+
+
class NumberToStringStub final : public HydrogenCodeStub {
public:
explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
@@ -643,7 +735,7 @@ class FastNewContextStub final : public HydrogenCodeStub {
static const int kMaximumSlots = 64;
FastNewContextStub(Isolate* isolate, int slots) : HydrogenCodeStub(isolate) {
- DCHECK(slots > 0 && slots <= kMaximumSlots);
+ DCHECK(slots >= 0 && slots <= kMaximumSlots);
set_sub_minor_key(SlotsBits::encode(slots));
}
@@ -763,7 +855,7 @@ class InstanceofStub: public PlatformCodeStub {
static Register left() { return InstanceofDescriptor::left(); }
static Register right() { return InstanceofDescriptor::right(); }
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override {
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
if (HasArgsInRegisters()) {
return InstanceofDescriptor(isolate());
}
@@ -844,7 +936,7 @@ class MathPowStub: public PlatformCodeStub {
minor_key_ = ExponentTypeBits::encode(exponent_type);
}
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override {
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
if (exponent_type() == TAGGED) {
return MathPowTaggedDescriptor(isolate());
} else if (exponent_type() == INTEGER) {
@@ -932,11 +1024,8 @@ class FunctionPrototypeStub : public PlatformCodeStub {
// TODO(mvstanton): only the receiver register is accessed. When this is
// translated to a hydrogen code stub, a new CallInterfaceDescriptor
// should be created that just uses that register for more efficient code.
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override {
- if (FLAG_vector_ics) {
- return VectorLoadICDescriptor(isolate());
- }
- return LoadDescriptor(isolate());
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
+ return LoadWithVectorDescriptor(isolate());
}
DEFINE_PLATFORM_CODE_STUB(FunctionPrototype, PlatformCodeStub);
@@ -978,7 +1067,7 @@ class HandlerStub : public HydrogenCodeStub {
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override;
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override;
protected:
explicit HandlerStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
@@ -1052,6 +1141,20 @@ class KeyedLoadSloppyArgumentsStub : public HandlerStub {
};
+class KeyedStoreSloppyArgumentsStub : public HandlerStub {
+ public:
+ explicit KeyedStoreSloppyArgumentsStub(Isolate* isolate)
+ : HandlerStub(isolate) {}
+
+ protected:
+ Code::Kind kind() const override { return Code::KEYED_STORE_IC; }
+ Code::StubType GetStubType() const override { return Code::FAST; }
+
+ private:
+ DEFINE_HANDLER_CODE_STUB(KeyedStoreSloppyArguments, HandlerStub);
+};
+
+
class LoadConstantStub : public HandlerStub {
public:
LoadConstantStub(Isolate* isolate, int constant_index)
@@ -1158,7 +1261,7 @@ class StoreTransitionStub : public HandlerStub {
return StoreModeBits::decode(sub_minor_key());
}
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override;
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override;
protected:
Code::Kind kind() const override { return Code::STORE_IC; }
@@ -1321,9 +1424,9 @@ class CallApiGetterStub : public PlatformCodeStub {
class BinaryOpICStub : public HydrogenCodeStub {
public:
- BinaryOpICStub(Isolate* isolate, Token::Value op, LanguageMode language_mode)
+ BinaryOpICStub(Isolate* isolate, Token::Value op, Strength strength)
: HydrogenCodeStub(isolate, UNINITIALIZED) {
- BinaryOpICState state(isolate, op, language_mode);
+ BinaryOpICState state(isolate, op, strength);
set_sub_minor_key(state.GetExtraICState());
}
@@ -1405,8 +1508,8 @@ class BinaryOpICWithAllocationSiteStub final : public PlatformCodeStub {
class BinaryOpWithAllocationSiteStub final : public BinaryOpICStub {
public:
BinaryOpWithAllocationSiteStub(Isolate* isolate, Token::Value op,
- LanguageMode language_mode)
- : BinaryOpICStub(isolate, op, language_mode) {}
+ Strength strength)
+ : BinaryOpICStub(isolate, op, strength) {}
BinaryOpWithAllocationSiteStub(Isolate* isolate, const BinaryOpICState& state)
: BinaryOpICStub(isolate, state) {}
@@ -1423,18 +1526,6 @@ class BinaryOpWithAllocationSiteStub final : public BinaryOpICStub {
};
-enum StringAddFlags {
- // Omit both parameter checks.
- STRING_ADD_CHECK_NONE = 0,
- // Check left parameter.
- STRING_ADD_CHECK_LEFT = 1 << 0,
- // Check right parameter.
- STRING_ADD_CHECK_RIGHT = 1 << 1,
- // Check both parameters.
- STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
-};
-
-
class StringAddStub final : public HydrogenCodeStub {
public:
StringAddStub(Isolate* isolate, StringAddFlags flags,
@@ -1469,12 +1560,15 @@ class StringAddStub final : public HydrogenCodeStub {
class CompareICStub : public PlatformCodeStub {
public:
- CompareICStub(Isolate* isolate, Token::Value op, CompareICState::State left,
- CompareICState::State right, CompareICState::State state)
+ CompareICStub(Isolate* isolate, Token::Value op, Strength strength,
+ CompareICState::State left, CompareICState::State right,
+ CompareICState::State state)
: PlatformCodeStub(isolate) {
DCHECK(Token::IsCompareOp(op));
- minor_key_ = OpBits::encode(op - Token::EQ) | LeftStateBits::encode(left) |
- RightStateBits::encode(right) | StateBits::encode(state);
+ minor_key_ = OpBits::encode(op - Token::EQ) |
+ StrengthBits::encode(is_strong(strength)) |
+ LeftStateBits::encode(left) | RightStateBits::encode(right) |
+ StateBits::encode(state);
}
void set_known_map(Handle<Map> map) { known_map_ = map; }
@@ -1485,6 +1579,10 @@ class CompareICStub : public PlatformCodeStub {
return static_cast<Token::Value>(Token::EQ + OpBits::decode(minor_key_));
}
+ Strength strength() const {
+ return StrengthBits::decode(minor_key_) ? Strength::STRONG : Strength::WEAK;
+ }
+
CompareICState::State left() const {
return LeftStateBits::decode(minor_key_);
}
@@ -1516,9 +1614,10 @@ class CompareICStub : public PlatformCodeStub {
}
class OpBits : public BitField<int, 0, 3> {};
- class LeftStateBits : public BitField<CompareICState::State, 3, 4> {};
- class RightStateBits : public BitField<CompareICState::State, 7, 4> {};
- class StateBits : public BitField<CompareICState::State, 11, 4> {};
+ class StrengthBits : public BitField<bool, 3, 1> {};
+ class LeftStateBits : public BitField<CompareICState::State, 4, 4> {};
+ class RightStateBits : public BitField<CompareICState::State, 8, 4> {};
+ class StateBits : public BitField<CompareICState::State, 12, 4> {};
Handle<Map> known_map_;
@@ -1689,16 +1788,11 @@ class ArgumentsAccessStub: public PlatformCodeStub {
NEW_STRICT
};
- enum HasNewTarget { NO_NEW_TARGET, HAS_NEW_TARGET };
-
- ArgumentsAccessStub(Isolate* isolate, Type type,
- HasNewTarget has_new_target = NO_NEW_TARGET)
- : PlatformCodeStub(isolate) {
- minor_key_ =
- TypeBits::encode(type) | HasNewTargetBits::encode(has_new_target);
+ ArgumentsAccessStub(Isolate* isolate, Type type) : PlatformCodeStub(isolate) {
+ minor_key_ = TypeBits::encode(type);
}
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override {
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
if (type() == READ_ELEMENT) {
return ArgumentsAccessReadDescriptor(isolate());
}
@@ -1707,9 +1801,6 @@ class ArgumentsAccessStub: public PlatformCodeStub {
private:
Type type() const { return TypeBits::decode(minor_key_); }
- bool has_new_target() const {
- return HasNewTargetBits::decode(minor_key_) == HAS_NEW_TARGET;
- }
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewStrict(MacroAssembler* masm);
@@ -1719,7 +1810,6 @@ class ArgumentsAccessStub: public PlatformCodeStub {
void PrintName(std::ostream& os) const override; // NOLINT
class TypeBits : public BitField<Type, 0, 2> {};
- class HasNewTargetBits : public BitField<HasNewTarget, 2, 1> {};
DEFINE_PLATFORM_CODE_STUB(ArgumentsAccess, PlatformCodeStub);
};
@@ -1729,14 +1819,14 @@ class RestParamAccessStub: public PlatformCodeStub {
public:
explicit RestParamAccessStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override {
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
return ContextOnlyDescriptor(isolate());
}
private:
void GenerateNew(MacroAssembler* masm);
- virtual void PrintName(std::ostream& os) const override; // NOLINT
+ void PrintName(std::ostream& os) const override; // NOLINT
DEFINE_PLATFORM_CODE_STUB(RestParamAccess, PlatformCodeStub);
};
@@ -2018,14 +2108,17 @@ class StringCharAtGenerator {
class LoadDictionaryElementStub : public HydrogenCodeStub {
public:
- explicit LoadDictionaryElementStub(Isolate* isolate)
- : HydrogenCodeStub(isolate) {}
+ explicit LoadDictionaryElementStub(Isolate* isolate, const LoadICState& state)
+ : HydrogenCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override {
- if (FLAG_vector_ics) {
- return VectorLoadICDescriptor(isolate());
- }
- return LoadDescriptor(isolate());
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
+ return LoadWithVectorDescriptor(isolate());
+ }
+
+ LanguageMode language_mode() const {
+ return LoadICState::GetLanguageMode(MinorKey());
}
DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
@@ -2034,14 +2127,18 @@ class LoadDictionaryElementStub : public HydrogenCodeStub {
class KeyedLoadGenericStub : public HydrogenCodeStub {
public:
- explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+ explicit KeyedLoadGenericStub(Isolate* isolate, const LoadICState& state)
+ : HydrogenCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
InlineCacheState GetICState() const override { return GENERIC; }
- // Since KeyedLoadGeneric stub doesn't miss (simply calls runtime), it
- // doesn't need to use the VectorLoadICDescriptor for the case when
- // flag --vector-ics is true.
+ LanguageMode language_mode() const {
+ return LoadICState::GetLanguageMode(MinorKey());
+ }
+
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
@@ -2063,20 +2160,20 @@ class LoadICTrampolineStub : public PlatformCodeStub {
return static_cast<ExtraICState>(minor_key_);
}
- private:
+ protected:
LoadICState state() const {
return LoadICState(static_cast<ExtraICState>(minor_key_));
}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadICTrampoline);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_PLATFORM_CODE_STUB(LoadICTrampoline, PlatformCodeStub);
};
class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
public:
- explicit KeyedLoadICTrampolineStub(Isolate* isolate)
- : LoadICTrampolineStub(isolate, LoadICState(0)) {}
+ explicit KeyedLoadICTrampolineStub(Isolate* isolate, const LoadICState& state)
+ : LoadICTrampolineStub(isolate, state) {}
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
@@ -2084,6 +2181,44 @@ class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
};
+class VectorStoreICTrampolineStub : public PlatformCodeStub {
+ public:
+ VectorStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
+
+ Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
+
+ InlineCacheState GetICState() const final { return DEFAULT; }
+
+ ExtraICState GetExtraICState() const final {
+ return static_cast<ExtraICState>(minor_key_);
+ }
+
+ protected:
+ StoreICState state() const {
+ return StoreICState(static_cast<ExtraICState>(minor_key_));
+ }
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreICTrampoline);
+ DEFINE_PLATFORM_CODE_STUB(VectorStoreICTrampoline, PlatformCodeStub);
+};
+
+
+class VectorKeyedStoreICTrampolineStub : public VectorStoreICTrampolineStub {
+ public:
+ VectorKeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
+ : VectorStoreICTrampolineStub(isolate, state) {}
+
+ Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
+
+ DEFINE_PLATFORM_CODE_STUB(VectorKeyedStoreICTrampoline,
+ VectorStoreICTrampolineStub);
+};
+
+
class CallICTrampolineStub : public PlatformCodeStub {
public:
CallICTrampolineStub(Isolate* isolate, const CallICState& state)
@@ -2119,72 +2254,92 @@ class CallIC_ArrayTrampolineStub : public CallICTrampolineStub {
};
-class MegamorphicLoadStub : public HydrogenCodeStub {
+class LoadICStub : public PlatformCodeStub {
public:
- MegamorphicLoadStub(Isolate* isolate, const LoadICState& state)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(state.GetExtraICState());
+ explicit LoadICStub(Isolate* isolate, const LoadICState& state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
}
- Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-
- InlineCacheState GetICState() const final { return MEGAMORPHIC; }
+ void GenerateForTrampoline(MacroAssembler* masm);
+ Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
+ InlineCacheState GetICState() const final { return DEFAULT; }
ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(sub_minor_key());
+ return static_cast<ExtraICState>(minor_key_);
}
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override {
- if (FLAG_vector_ics) {
- return VectorLoadICDescriptor(isolate());
- }
- return LoadDescriptor(isolate());
- }
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
+ DEFINE_PLATFORM_CODE_STUB(LoadIC, PlatformCodeStub);
- DEFINE_HYDROGEN_CODE_STUB(MegamorphicLoad, HydrogenCodeStub);
+ protected:
+ void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
-class VectorRawLoadStub : public PlatformCodeStub {
+class KeyedLoadICStub : public PlatformCodeStub {
public:
- explicit VectorRawLoadStub(Isolate* isolate, const LoadICState& state)
+ explicit KeyedLoadICStub(Isolate* isolate, const LoadICState& state)
: PlatformCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
void GenerateForTrampoline(MacroAssembler* masm);
- virtual Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
+ Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
+ InlineCacheState GetICState() const final { return DEFAULT; }
+ ExtraICState GetExtraICState() const final {
+ return static_cast<ExtraICState>(minor_key_);
+ }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
+ DEFINE_PLATFORM_CODE_STUB(KeyedLoadIC, PlatformCodeStub);
+
+ protected:
+ void GenerateImpl(MacroAssembler* masm, bool in_frame);
+};
- virtual InlineCacheState GetICState() const final override { return DEFAULT; }
- virtual ExtraICState GetExtraICState() const final override {
+class VectorStoreICStub : public PlatformCodeStub {
+ public:
+ VectorStoreICStub(Isolate* isolate, const StoreICState& state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
+
+ void GenerateForTrampoline(MacroAssembler* masm);
+
+ Code::Kind GetCodeKind() const final { return Code::STORE_IC; }
+ InlineCacheState GetICState() const final { return DEFAULT; }
+ ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
- DEFINE_PLATFORM_CODE_STUB(VectorRawLoad, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreIC);
+ DEFINE_PLATFORM_CODE_STUB(VectorStoreIC, PlatformCodeStub);
protected:
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
-class VectorRawKeyedLoadStub : public PlatformCodeStub {
+class VectorKeyedStoreICStub : public PlatformCodeStub {
public:
- explicit VectorRawKeyedLoadStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
+ VectorKeyedStoreICStub(Isolate* isolate, const StoreICState& state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
void GenerateForTrampoline(MacroAssembler* masm);
- virtual Code::Kind GetCodeKind() const override {
- return Code::KEYED_LOAD_IC;
+ Code::Kind GetCodeKind() const final { return Code::KEYED_STORE_IC; }
+ InlineCacheState GetICState() const final { return DEFAULT; }
+ virtual ExtraICState GetExtraICState() const final {
+ return static_cast<ExtraICState>(minor_key_);
}
- virtual InlineCacheState GetICState() const final override { return DEFAULT; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
- DEFINE_PLATFORM_CODE_STUB(VectorRawKeyedLoad, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreIC);
+ DEFINE_PLATFORM_CODE_STUB(VectorKeyedStoreIC, PlatformCodeStub);
protected:
void GenerateImpl(MacroAssembler* masm, bool in_frame);
@@ -2298,18 +2453,20 @@ class StoreScriptContextFieldStub : public ScriptContextFieldStub {
};
-class LoadFastElementStub : public HydrogenCodeStub {
+class LoadFastElementStub : public HandlerStub {
public:
LoadFastElementStub(Isolate* isolate, bool is_js_array,
ElementsKind elements_kind,
bool convert_hole_to_undefined = false)
- : HydrogenCodeStub(isolate) {
+ : HandlerStub(isolate) {
set_sub_minor_key(
ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array) |
CanConvertHoleToUndefined::encode(convert_hole_to_undefined));
}
+ Code::Kind kind() const override { return Code::KEYED_LOAD_IC; }
+
bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
bool convert_hole_to_undefined() const {
return CanConvertHoleToUndefined::decode(sub_minor_key());
@@ -2324,14 +2481,7 @@ class LoadFastElementStub : public HydrogenCodeStub {
class IsJSArrayBits: public BitField<bool, 8, 1> {};
class CanConvertHoleToUndefined : public BitField<bool, 9, 1> {};
- CallInterfaceDescriptor GetCallInterfaceDescriptor() override {
- if (FLAG_vector_ics) {
- return VectorLoadICDescriptor(isolate());
- }
- return LoadDescriptor(isolate());
- }
-
- DEFINE_HYDROGEN_CODE_STUB(LoadFastElement, HydrogenCodeStub);
+ DEFINE_HANDLER_CODE_STUB(LoadFastElement, HandlerStub);
};
@@ -2612,16 +2762,15 @@ class ToBooleanStub: public HydrogenCodeStub {
RESULT_AS_INVERSE_ODDBALL // For {false} on truthy value, {true} otherwise.
};
- // At most 8 different types can be distinguished, because the Code object
- // only has room for a single byte to hold a set of these types. :-P
- STATIC_ASSERT(NUMBER_OF_TYPES <= 8);
+ // At most 16 different types can be distinguished, because the Code object
+ // only has room for two bytes to hold a set of these types. :-P
+ STATIC_ASSERT(NUMBER_OF_TYPES <= 16);
- class Types : public EnumSet<Type, byte> {
+ class Types : public EnumSet<Type, uint16_t> {
public:
- Types() : EnumSet<Type, byte>(0) {}
- explicit Types(byte bits) : EnumSet<Type, byte>(bits) {}
+ Types() : EnumSet<Type, uint16_t>(0) {}
+ explicit Types(uint16_t bits) : EnumSet<Type, uint16_t>(bits) {}
- byte ToByte() const { return ToIntegral(); }
bool UpdateStatus(Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const;
@@ -2632,13 +2781,13 @@ class ToBooleanStub: public HydrogenCodeStub {
ToBooleanStub(Isolate* isolate, ResultMode mode, Types types = Types())
: HydrogenCodeStub(isolate) {
- set_sub_minor_key(TypesBits::encode(types.ToByte()) |
+ set_sub_minor_key(TypesBits::encode(types.ToIntegral()) |
ResultModeBits::encode(mode));
}
ToBooleanStub(Isolate* isolate, ExtraICState state)
: HydrogenCodeStub(isolate) {
- set_sub_minor_key(TypesBits::encode(static_cast<byte>(state)) |
+ set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)) |
ResultModeBits::encode(RESULT_AS_SMI));
}
@@ -2671,7 +2820,7 @@ class ToBooleanStub: public HydrogenCodeStub {
set_sub_minor_key(ResultModeBits::encode(RESULT_AS_SMI));
}
- class TypesBits : public BitField<byte, 0, NUMBER_OF_TYPES> {};
+ class TypesBits : public BitField<uint16_t, 0, NUMBER_OF_TYPES> {};
class ResultModeBits : public BitField<ResultMode, NUMBER_OF_TYPES, 2> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(ToBoolean);
@@ -2839,6 +2988,8 @@ class StringCompareStub : public PlatformCodeStub {
#undef DEFINE_HYDROGEN_CODE_STUB
#undef DEFINE_CODE_STUB
#undef DEFINE_CODE_STUB_BASE
+
+extern Representation RepresentationFromType(Type* type);
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 796e39a50c..f25ce4df22 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -239,18 +239,5 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#endif // ENABLE_DISASSEMBLER
}
-
-bool CodeGenerator::RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here) {
- if (pos != RelocInfo::kNoPosition) {
- masm->positions_recorder()->RecordStatementPosition(pos);
- masm->positions_recorder()->RecordPosition(pos);
- if (right_here) {
- return masm->positions_recorder()->WriteRecordedPositions();
- }
- }
- return false;
-}
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 0e0cf1d294..79c2c33696 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -32,7 +32,6 @@
// ~CodeGenerator
// Generate
// ComputeLazyCompile
-// BuildFunctionInfo
// ProcessDeclarations
// DeclareGlobals
// CheckForInlineRuntimeCall
@@ -85,10 +84,6 @@ class CodeGenerator {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/collection-iterator.js b/deps/v8/src/collection-iterator.js
index 7aa5208cea..8cae6060d0 100644
--- a/deps/v8/src/collection-iterator.js
+++ b/deps/v8/src/collection-iterator.js
@@ -7,14 +7,13 @@ var $mapIteratorNext;
var $setIteratorNext;
var $setValues;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
var GlobalMap = global.Map;
-var GlobalObject = global.Object;
var GlobalSet = global.Set;
// -------------------------------------------------------------------
@@ -49,11 +48,6 @@ function SetIteratorNextJS() {
}
-function SetIteratorSymbolIterator() {
- return this;
-}
-
-
function SetEntries() {
if (!IS_SET(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -74,19 +68,16 @@ function SetValues() {
// -------------------------------------------------------------------
%SetCode(SetIterator, SetIteratorConstructor);
-%FunctionSetPrototype(SetIterator, new GlobalObject());
+%FunctionSetPrototype(SetIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(SetIterator, 'Set Iterator');
-$installFunctions(SetIterator.prototype, DONT_ENUM, [
+utils.InstallFunctions(SetIterator.prototype, DONT_ENUM, [
'next', SetIteratorNextJS
]);
-$setFunctionName(SetIteratorSymbolIterator, symbolIterator);
-%AddNamedProperty(SetIterator.prototype, symbolIterator,
- SetIteratorSymbolIterator, DONT_ENUM);
%AddNamedProperty(SetIterator.prototype, symbolToStringTag,
"Set Iterator", READ_ONLY | DONT_ENUM);
-$installFunctions(GlobalSet.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
'entries', SetEntries,
'keys', SetValues,
'values', SetValues
@@ -104,11 +95,6 @@ function MapIteratorConstructor(map, kind) {
}
-function MapIteratorSymbolIterator() {
- return this;
-}
-
-
function MapIteratorNextJS() {
if (!IS_MAP_ITERATOR(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -164,20 +150,17 @@ function MapValues() {
// -------------------------------------------------------------------
%SetCode(MapIterator, MapIteratorConstructor);
-%FunctionSetPrototype(MapIterator, new GlobalObject());
+%FunctionSetPrototype(MapIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(MapIterator, 'Map Iterator');
-$installFunctions(MapIterator.prototype, DONT_ENUM, [
+utils.InstallFunctions(MapIterator.prototype, DONT_ENUM, [
'next', MapIteratorNextJS
]);
-$setFunctionName(MapIteratorSymbolIterator, symbolIterator);
-%AddNamedProperty(MapIterator.prototype, symbolIterator,
- MapIteratorSymbolIterator, DONT_ENUM);
%AddNamedProperty(MapIterator.prototype, symbolToStringTag,
"Map Iterator", READ_ONLY | DONT_ENUM);
-$installFunctions(GlobalMap.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
'entries', MapEntries,
'keys', MapKeys,
'values', MapValues
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index db30546165..ceab1642c5 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -2,15 +2,39 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
-
+var $getHash;
+var $getExistingHash;
+var $mapSet;
+var $mapHas;
+var $mapDelete;
+var $setAdd;
+var $setHas;
+var $setDelete;
+var $mapFromArray;
+var $setFromArray;
+
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalMap = global.Map;
var GlobalObject = global.Object;
var GlobalSet = global.Set;
+var IntRandom;
+
+utils.Import(function(from) {
+ IntRandom = from.IntRandom;
+});
+
+var NumberIsNaN;
+
+utils.Import(function(from) {
+ NumberIsNaN = from.NumberIsNaN;
+});
// -------------------------------------------------------------------
@@ -18,43 +42,47 @@ function HashToEntry(table, hash, numBuckets) {
var bucket = ORDERED_HASH_TABLE_HASH_TO_BUCKET(hash, numBuckets);
return ORDERED_HASH_TABLE_BUCKET_AT(table, bucket);
}
-%SetInlineBuiltinFlag(HashToEntry);
+%SetForceInlineFlag(HashToEntry);
function SetFindEntry(table, numBuckets, key, hash) {
- var keyIsNaN = $numberIsNaN(key);
- for (var entry = HashToEntry(table, hash, numBuckets);
- entry !== NOT_FOUND;
- entry = ORDERED_HASH_SET_CHAIN_AT(table, entry, numBuckets)) {
- var candidate = ORDERED_HASH_SET_KEY_AT(table, entry, numBuckets);
- if (key === candidate) {
- return entry;
- }
- if (keyIsNaN && $numberIsNaN(candidate)) {
+ var entry = HashToEntry(table, hash, numBuckets);
+ if (entry === NOT_FOUND) return entry;
+ var candidate = ORDERED_HASH_SET_KEY_AT(table, entry, numBuckets);
+ if (key === candidate) return entry;
+ var keyIsNaN = NumberIsNaN(key);
+ while (true) {
+ if (keyIsNaN && NumberIsNaN(candidate)) {
return entry;
}
+ entry = ORDERED_HASH_SET_CHAIN_AT(table, entry, numBuckets);
+ if (entry === NOT_FOUND) return entry;
+ candidate = ORDERED_HASH_SET_KEY_AT(table, entry, numBuckets);
+ if (key === candidate) return entry;
}
return NOT_FOUND;
}
-%SetInlineBuiltinFlag(SetFindEntry);
+%SetForceInlineFlag(SetFindEntry);
function MapFindEntry(table, numBuckets, key, hash) {
- var keyIsNaN = $numberIsNaN(key);
- for (var entry = HashToEntry(table, hash, numBuckets);
- entry !== NOT_FOUND;
- entry = ORDERED_HASH_MAP_CHAIN_AT(table, entry, numBuckets)) {
- var candidate = ORDERED_HASH_MAP_KEY_AT(table, entry, numBuckets);
- if (key === candidate) {
- return entry;
- }
- if (keyIsNaN && $numberIsNaN(candidate)) {
+ var entry = HashToEntry(table, hash, numBuckets);
+ if (entry === NOT_FOUND) return entry;
+ var candidate = ORDERED_HASH_MAP_KEY_AT(table, entry, numBuckets);
+ if (key === candidate) return entry;
+ var keyIsNaN = NumberIsNaN(key);
+ while (true) {
+ if (keyIsNaN && NumberIsNaN(candidate)) {
return entry;
}
+ entry = ORDERED_HASH_MAP_CHAIN_AT(table, entry, numBuckets);
+ if (entry === NOT_FOUND) return entry;
+ candidate = ORDERED_HASH_MAP_KEY_AT(table, entry, numBuckets);
+ if (key === candidate) return entry;
}
return NOT_FOUND;
}
-%SetInlineBuiltinFlag(MapFindEntry);
+%SetForceInlineFlag(MapFindEntry);
function ComputeIntegerHash(key, seed) {
@@ -66,12 +94,13 @@ function ComputeIntegerHash(key, seed) {
hash = hash ^ (hash >>> 4);
hash = (hash * 2057) | 0; // hash = (hash + (hash << 3)) + (hash << 11);
hash = hash ^ (hash >>> 16);
- return hash;
+ return hash & 0x3fffffff;
}
-%SetInlineBuiltinFlag(ComputeIntegerHash);
+%SetForceInlineFlag(ComputeIntegerHash);
+var hashCodeSymbol = GLOBAL_PRIVATE("hash_code_symbol");
-function GetHash(key) {
+function GetExistingHash(key) {
if (%_IsSmi(key)) {
return ComputeIntegerHash(key, 0);
}
@@ -80,10 +109,25 @@ function GetHash(key) {
if ((field & 1 /* Name::kHashNotComputedMask */) === 0) {
return field >>> 2 /* Name::kHashShift */;
}
+ } else if (IS_SPEC_OBJECT(key) && !%_IsJSProxy(key) && !IS_GLOBAL(key)) {
+ var hash = GET_PRIVATE(key, hashCodeSymbol);
+ return hash;
}
return %GenericHash(key);
}
-%SetInlineBuiltinFlag(GetHash);
+%SetForceInlineFlag(GetExistingHash);
+
+
+function GetHash(key) {
+ var hash = GetExistingHash(key);
+ if (IS_UNDEFINED(hash)) {
+ hash = IntRandom() | 0;
+ if (hash === 0) hash = 1;
+ SET_PRIVATE(key, hashCodeSymbol, hash);
+ }
+ return hash;
+}
+%SetForceInlineFlag(GetHash);
// -------------------------------------------------------------------
@@ -155,7 +199,8 @@ function SetHas(key) {
}
var table = %_JSCollectionGetTable(this);
var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetHash(key);
+ var hash = GetExistingHash(key);
+ if (IS_UNDEFINED(hash)) return false;
return SetFindEntry(table, numBuckets, key, hash) !== NOT_FOUND;
}
@@ -167,7 +212,8 @@ function SetDelete(key) {
}
var table = %_JSCollectionGetTable(this);
var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetHash(key);
+ var hash = GetExistingHash(key);
+ if (IS_UNDEFINED(hash)) return false;
var entry = SetFindEntry(table, numBuckets, key, hash);
if (entry === NOT_FOUND) return false;
@@ -239,8 +285,8 @@ function SetForEach(f, receiver) {
%FunctionSetLength(SetForEach, 1);
// Set up the non-enumerable functions on the Set prototype object.
-$installGetter(GlobalSet.prototype, "size", SetGetSize);
-$installFunctions(GlobalSet.prototype, DONT_ENUM, [
+utils.InstallGetter(GlobalSet.prototype, "size", SetGetSize);
+utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
"add", SetAdd,
"has", SetHas,
"delete", SetDelete,
@@ -282,7 +328,8 @@ function MapGet(key) {
}
var table = %_JSCollectionGetTable(this);
var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetHash(key);
+ var hash = GetExistingHash(key);
+ if (IS_UNDEFINED(hash)) return UNDEFINED;
var entry = MapFindEntry(table, numBuckets, key, hash);
if (entry === NOT_FOUND) return UNDEFINED;
return ORDERED_HASH_MAP_VALUE_AT(table, entry, numBuckets);
@@ -427,8 +474,8 @@ function MapForEach(f, receiver) {
%FunctionSetLength(MapForEach, 1);
// Set up the non-enumerable functions on the Map prototype object.
-$installGetter(GlobalMap.prototype, "size", MapGetSize);
-$installFunctions(GlobalMap.prototype, DONT_ENUM, [
+utils.InstallGetter(GlobalMap.prototype, "size", MapGetSize);
+utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
"get", MapGet,
"set", MapSet,
"has", MapHas,
@@ -437,4 +484,35 @@ $installFunctions(GlobalMap.prototype, DONT_ENUM, [
"forEach", MapForEach
]);
+// Expose to the global scope.
+$getHash = GetHash;
+$getExistingHash = GetExistingHash;
+$mapGet = MapGet;
+$mapSet = MapSet;
+$mapHas = MapHas;
+$mapDelete = MapDelete;
+$setAdd = SetAdd;
+$setHas = SetHas;
+$setDelete = SetDelete;
+
+$mapFromArray = function(array) {
+ var map = new GlobalMap;
+ var length = array.length;
+ for (var i = 0; i < length; i += 2) {
+ var key = array[i];
+ var value = array[i + 1];
+ %_CallFunction(map, key, value, MapSet);
+ }
+ return map;
+};
+
+$setFromArray = function(array) {
+ var set = new GlobalSet;
+ var length = array.length;
+ for (var i = 0; i < length; ++i) {
+ %_CallFunction(set, array[i], SetAdd);
+ }
+ return set;
+};
+
})
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index f2cb4c9000..46d783866f 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -113,8 +113,7 @@ CompilationCacheScript::CompilationCacheScript(Isolate* isolate,
bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name, int line_offset,
int column_offset,
- bool is_embedder_debug_script,
- bool is_shared_cross_origin) {
+ ScriptOriginOptions resource_options) {
Handle<Script> script =
Handle<Script>(Script::cast(function_info->script()), isolate());
// If the script name isn't set, the boilerplate script should have
@@ -127,12 +126,9 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
if (column_offset != script->column_offset()->value()) return false;
// Check that both names are strings. If not, no match.
if (!name->IsString() || !script->name()->IsString()) return false;
- // Were both scripts tagged by the embedder as being internal script?
- if (is_embedder_debug_script != script->is_embedder_debug_script()) {
+ // Are the origin_options same?
+ if (resource_options.Flags() != script->origin_options().Flags())
return false;
- }
- // Were both scripts tagged by the embedder as being shared cross-origin?
- if (is_shared_cross_origin != script->is_shared_cross_origin()) return false;
// Compare the two name strings for equality.
return String::Equals(Handle<String>::cast(name),
Handle<String>(String::cast(script->name())));
@@ -145,9 +141,8 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
// won't.
Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
Handle<String> source, Handle<Object> name, int line_offset,
- int column_offset, bool is_embedder_debug_script,
- bool is_shared_cross_origin, Handle<Context> context,
- LanguageMode language_mode) {
+ int column_offset, ScriptOriginOptions resource_options,
+ Handle<Context> context, LanguageMode language_mode) {
Object* result = NULL;
int generation;
@@ -163,7 +158,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
// Break when we've found a suitable shared function info that
// matches the origin.
if (HasOrigin(function_info, name, line_offset, column_offset,
- is_embedder_debug_script, is_shared_cross_origin)) {
+ resource_options)) {
result = *function_info;
break;
}
@@ -177,8 +172,8 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
if (result != NULL) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
isolate());
- DCHECK(HasOrigin(shared, name, line_offset, column_offset,
- is_embedder_debug_script, is_shared_cross_origin));
+ DCHECK(
+ HasOrigin(shared, name, line_offset, column_offset, resource_options));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
if (generation != 0) Put(source, context, language_mode, shared);
@@ -292,14 +287,12 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<String> source, Handle<Object> name, int line_offset,
- int column_offset, bool is_embedder_debug_script,
- bool is_shared_cross_origin, Handle<Context> context,
- LanguageMode language_mode) {
+ int column_offset, ScriptOriginOptions resource_options,
+ Handle<Context> context, LanguageMode language_mode) {
if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
return script_.Lookup(source, name, line_offset, column_offset,
- is_embedder_debug_script, is_shared_cross_origin,
- context, language_mode);
+ resource_options, context, language_mode);
}
@@ -407,4 +400,5 @@ void CompilationCache::Disable() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 1a2608e3aa..136bb97ea8 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -74,8 +74,7 @@ class CompilationCacheScript : public CompilationSubCache {
Handle<SharedFunctionInfo> Lookup(Handle<String> source, Handle<Object> name,
int line_offset, int column_offset,
- bool is_embedder_debug_script,
- bool is_shared_cross_origin,
+ ScriptOriginOptions resource_options,
Handle<Context> context,
LanguageMode language_mode);
void Put(Handle<String> source,
@@ -86,7 +85,7 @@ class CompilationCacheScript : public CompilationSubCache {
private:
bool HasOrigin(Handle<SharedFunctionInfo> function_info, Handle<Object> name,
int line_offset, int column_offset,
- bool is_embedder_debug_script, bool is_shared_cross_origin);
+ ScriptOriginOptions resource_options);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
};
@@ -149,9 +148,8 @@ class CompilationCache {
// script for the given source string with the right origin.
MaybeHandle<SharedFunctionInfo> LookupScript(
Handle<String> source, Handle<Object> name, int line_offset,
- int column_offset, bool is_embedder_debug_script,
- bool is_shared_cross_origin, Handle<Context> context,
- LanguageMode language_mode);
+ int column_offset, ScriptOriginOptions resource_options,
+ Handle<Context> context, LanguageMode language_mode);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
diff --git a/deps/v8/src/compilation-dependencies.cc b/deps/v8/src/compilation-dependencies.cc
index 9ea6a7a878..e20015ca2f 100644
--- a/deps/v8/src/compilation-dependencies.cc
+++ b/deps/v8/src/compilation-dependencies.cc
@@ -117,5 +117,5 @@ void CompilationDependencies::AssumeTransitionStable(
Insert(DependentCode::kAllocationSiteTransitionChangedGroup, site);
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 165bf4f2e4..0f3ebe0e67 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -114,18 +114,19 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info)
if (isolate_->debug()->is_active()) MarkAsDebug();
if (FLAG_context_specialization) MarkAsContextSpecializing();
- if (FLAG_turbo_builtin_inlining) MarkAsBuiltinInliningEnabled();
- if (FLAG_turbo_deoptimization) MarkAsDeoptimizationEnabled();
if (FLAG_turbo_inlining) MarkAsInliningEnabled();
if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
if (FLAG_turbo_types) MarkAsTypingEnabled();
- if (has_shared_info() && shared_info()->is_compiled()) {
- // We should initialize the CompilationInfo feedback vector from the
- // passed in shared info, rather than creating a new one.
- feedback_vector_ = Handle<TypeFeedbackVector>(
- shared_info()->feedback_vector(), parse_info->isolate());
+ if (has_shared_info()) {
+ if (shared_info()->is_compiled()) {
+ // We should initialize the CompilationInfo feedback vector from the
+ // passed in shared info, rather than creating a new one.
+ feedback_vector_ = Handle<TypeFeedbackVector>(
+ shared_info()->feedback_vector(), parse_info->isolate());
+ }
+ if (shared_info()->never_compiled()) MarkAsFirstCompile();
}
}
@@ -155,7 +156,8 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
opt_count_(has_shared_info() ? shared_info()->opt_count() : 0),
parameter_count_(0),
optimization_id_(-1),
- osr_expr_stack_height_(0) {}
+ osr_expr_stack_height_(0),
+ function_type_(nullptr) {}
CompilationInfo::~CompilationInfo() {
@@ -175,6 +177,14 @@ int CompilationInfo::num_parameters() const {
}
+int CompilationInfo::num_parameters_including_this() const {
+ return num_parameters() + (is_this_defined() ? 1 : 0);
+}
+
+
+bool CompilationInfo::is_this_defined() const { return !IsStub(); }
+
+
int CompilationInfo::num_heap_slots() const {
return has_scope() ? scope()->num_heap_slots() : 0;
}
@@ -215,8 +225,7 @@ bool CompilationInfo::is_simple_parameter_list() {
bool CompilationInfo::MayUseThis() const {
- return scope()->uses_this() || scope()->inner_uses_this() ||
- scope()->calls_sloppy_eval();
+ return scope()->has_this_declaration() && scope()->receiver()->is_used();
}
@@ -273,6 +282,14 @@ void CompilationInfo::LogDeoptCallPosition(int pc_offset, int inlining_id) {
}
+Handle<Code> CompilationInfo::GenerateCodeStub() {
+ // Run a "mini pipeline", extracted from compiler.cc.
+ CHECK(Parser::ParseStatic(parse_info()));
+ CHECK(Compiler::Analyze(parse_info()));
+ return compiler::Pipeline(this).GenerateCode();
+}
+
+
class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
@@ -359,9 +376,12 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
}
DCHECK(info()->shared_info()->has_deoptimization_support());
+ DCHECK(!info()->is_first_compile());
// Check the enabling conditions for TurboFan.
+ bool dont_crankshaft = info()->shared_info()->dont_crankshaft();
if (((FLAG_turbo_asm && info()->shared_info()->asm_function()) ||
+ (dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0) ||
info()->closure()->PassesFilter(FLAG_turbo_filter)) &&
(FLAG_turbo_osr || !info()->is_osr())) {
// Use TurboFan for the compilation.
@@ -374,11 +394,16 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
}
if (info()->shared_info()->asm_function()) {
+ if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
info()->MarkAsContextSpecializing();
} else if (FLAG_turbo_type_feedback) {
info()->MarkAsTypeFeedbackEnabled();
info()->EnsureFeedbackVector();
}
+ if (!info()->shared_info()->asm_function() ||
+ FLAG_turbo_asm_deoptimization) {
+ info()->MarkAsDeoptimizationEnabled();
+ }
Timer t(this, &time_taken_to_create_graph_);
compiler::Pipeline pipeline(info());
@@ -388,7 +413,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
}
}
- if (!isolate()->use_crankshaft()) {
+ if (!isolate()->use_crankshaft() || dont_crankshaft) {
// Crankshaft is entirely disabled.
return SetLastStatus(FAILED);
}
@@ -488,7 +513,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
// TODO(turbofan): Currently everything is done in the first phase.
if (!info()->code().is_null()) {
info()->dependencies()->Commit(info()->code());
- if (FLAG_turbo_deoptimization) {
+ if (info()->is_deoptimization_enabled()) {
info()->parse_info()->context()->native_context()->AddOptimizedCode(
*info()->code());
}
@@ -660,7 +685,6 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
// Update the code and feedback vector for the shared function info.
shared->ReplaceCode(*info->code());
- if (shared->optimization_disabled()) info->code()->set_optimizable(false);
shared->set_feedback_vector(*info->feedback_vector());
return info->code();
@@ -669,26 +693,16 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
Handle<JSFunction> function, BailoutId osr_ast_id) {
- if (FLAG_cache_optimized_code) {
- Handle<SharedFunctionInfo> shared(function->shared());
- // Bound functions are not cached.
- if (shared->bound()) return MaybeHandle<Code>();
- DisallowHeapAllocation no_gc;
- int index = shared->SearchOptimizedCodeMap(
- function->context()->native_context(), osr_ast_id);
- if (index > 0) {
- if (FLAG_trace_opt) {
- PrintF("[found optimized code for ");
- function->ShortPrint();
- if (!osr_ast_id.IsNone()) {
- PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
- }
- PrintF("]\n");
- }
- FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index);
- if (literals != NULL) function->set_literals(literals);
- return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index));
- }
+ Handle<SharedFunctionInfo> shared(function->shared());
+ DisallowHeapAllocation no_gc;
+ CodeAndLiterals cached = shared->SearchOptimizedCodeMap(
+ function->context()->native_context(), osr_ast_id);
+ if (cached.code != nullptr) {
+ // Caching of optimized code enabled and optimized code found.
+ if (cached.literals != nullptr) function->set_literals(cached.literals);
+ DCHECK(!cached.code->marked_for_deoptimization());
+ DCHECK(function->shared()->is_compiled());
+ return Handle<Code>(cached.code);
}
return MaybeHandle<Code>();
}
@@ -699,19 +713,33 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
// Context specialization folds-in the context, so no sharing can occur.
- if (code->is_turbofanned() && info->is_context_specializing()) return;
+ if (info->is_context_specializing()) return;
+ // Frame specialization implies context specialization.
+ DCHECK(!info->is_frame_specializing());
- // Cache optimized code.
+ // Do not cache bound functions.
+ Handle<JSFunction> function = info->closure();
+ if (function->shared()->bound()) return;
+
+ // Cache optimized context-specific code.
if (FLAG_cache_optimized_code) {
- Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
- // Do not cache bound functions.
- if (shared->bound()) return;
Handle<FixedArray> literals(function->literals());
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
literals, info->osr_ast_id());
}
+
+ // Do not cache context-independent code compiled for OSR.
+ if (code->is_turbofanned() && info->is_osr()) return;
+
+ // Cache optimized context-independent code.
+ if (FLAG_turbo_cache_shared_code && code->is_turbofanned()) {
+ DCHECK(!info->is_context_specializing());
+ DCHECK(info->osr_ast_id().IsNone());
+ Handle<SharedFunctionInfo> shared(function->shared());
+ SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(shared, code);
+ }
}
@@ -725,7 +753,7 @@ static bool Renumber(ParseInfo* parse_info) {
FunctionLiteral* lit = parse_info->function();
shared_info->set_ast_node_count(lit->ast_node_count());
MaybeDisableOptimization(shared_info, lit->dont_optimize_reason());
- shared_info->set_dont_cache(lit->flags()->Contains(kDontCache));
+ shared_info->set_dont_crankshaft(lit->flags()->Contains(kDontCrankshaft));
}
return true;
}
@@ -835,7 +863,7 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
// If the debugger is active, do not compile with turbofan unless we can
// deopt from turbofan code.
if (FLAG_turbo_asm && function->shared()->asm_function() &&
- (FLAG_turbo_deoptimization || !isolate->debug()->is_active()) &&
+ (FLAG_turbo_asm_deoptimization || !isolate->debug()->is_active()) &&
!FLAG_turbo_osr) {
CompilationInfoWithZone info(function);
@@ -932,6 +960,8 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
shared->EnableDeoptimizationSupport(*unoptimized.code());
shared->set_feedback_vector(*unoptimized.feedback_vector());
+ info->MarkAsCompiled();
+
// The scope info might not have been set if a lazily compiled
// function is inlined before being called for the first time.
if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
@@ -994,6 +1024,8 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
PostponeInterruptsScope postpone(info.isolate());
VMState<COMPILER> state(info.isolate());
+ // Get rid of old list of shared function infos.
+ info.MarkAsFirstCompile();
info.parse_info()->set_global();
if (!Parser::ParseStatic(info.parse_info())) return;
@@ -1054,6 +1086,8 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
}
}
+ info->MarkAsFirstCompile();
+
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
@@ -1080,7 +1114,7 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
SharedFunctionInfo::InitFromFunctionLiteral(result, lit);
- result->set_script(*script);
+ SharedFunctionInfo::SetScript(result, script);
result->set_is_toplevel(true);
Handle<String> script_name = script->name()->IsString()
@@ -1153,10 +1187,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
// If caller is strict mode, the result must be in strict mode as well.
DCHECK(is_sloppy(language_mode) ||
is_strict(shared_info->language_mode()));
- if (!shared_info->dont_cache()) {
- compilation_cache->PutEval(source, outer_info, context, shared_info,
- scope_position);
- }
+ compilation_cache->PutEval(source, outer_info, context, shared_info,
+ scope_position);
}
} else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
shared_info->ResetForNewContext(isolate->heap()->global_ic_age());
@@ -1169,9 +1201,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<SharedFunctionInfo> Compiler::CompileScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
- int column_offset, bool is_embedder_debug_script,
- bool is_shared_cross_origin, Handle<Object> source_map_url,
- Handle<Context> context, v8::Extension* extension, ScriptData** cached_data,
+ int column_offset, ScriptOriginOptions resource_options,
+ Handle<Object> source_map_url, Handle<Context> context,
+ v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options, NativesFlag natives,
bool is_module) {
Isolate* isolate = source->GetIsolate();
@@ -1206,9 +1238,8 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
if (extension == NULL) {
// First check per-isolate compilation cache.
maybe_result = compilation_cache->LookupScript(
- source, script_name, line_offset, column_offset,
- is_embedder_debug_script, is_shared_cross_origin, context,
- language_mode);
+ source, script_name, line_offset, column_offset, resource_options,
+ context, language_mode);
if (maybe_result.is_null() && FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kConsumeCodeCache &&
!isolate->debug()->is_loaded()) {
@@ -1218,7 +1249,6 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
if (CodeSerializer::Deserialize(isolate, *cached_data, source)
.ToHandle(&result)) {
// Promote to per-isolate compilation cache.
- DCHECK(!result->dont_cache());
compilation_cache->PutScript(source, context, language_mode, result);
return result;
}
@@ -1245,8 +1275,7 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
script->set_line_offset(Smi::FromInt(line_offset));
script->set_column_offset(Smi::FromInt(column_offset));
}
- script->set_is_shared_cross_origin(is_shared_cross_origin);
- script->set_is_embedder_debug_script(is_embedder_debug_script);
+ script->set_origin_options(resource_options);
if (!source_map_url.is_null()) {
script->set_source_mapping_url(*source_map_url);
}
@@ -1274,7 +1303,7 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
parse_info.set_language_mode(
static_cast<LanguageMode>(info.language_mode() | language_mode));
result = CompileToplevel(&info);
- if (extension == NULL && !result.is_null() && !result->dont_cache()) {
+ if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, context, language_mode, result);
if (FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kProduceCodeCache) {
@@ -1315,10 +1344,29 @@ Handle<SharedFunctionInfo> Compiler::CompileStreamedScript(
}
-Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
+Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script,
CompilationInfo* outer_info) {
// Precondition: code has been parsed and scopes have been analyzed.
+ Isolate* isolate = outer_info->isolate();
+ MaybeHandle<SharedFunctionInfo> maybe_existing;
+ if (outer_info->is_first_compile()) {
+ // On the first compile, there are no existing shared function info for
+ // inner functions yet, so do not try to find them. All bets are off for
+ // live edit though.
+ DCHECK(script->FindSharedFunctionInfo(literal).is_null() ||
+ isolate->debug()->live_edit_enabled());
+ } else {
+ maybe_existing = script->FindSharedFunctionInfo(literal);
+ }
+ // We found an existing shared function info. If it's already compiled,
+ // don't worry about compiling it, and simply return it. If it's not yet
+ // compiled, continue to decide whether to eagerly compile.
+ Handle<SharedFunctionInfo> existing;
+ if (maybe_existing.ToHandle(&existing) && existing->is_compiled()) {
+ return existing;
+ }
+
Zone zone;
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
@@ -1326,9 +1374,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
parse_info.set_scope(literal->scope());
parse_info.set_language_mode(literal->scope()->language_mode());
if (outer_info->will_serialize()) info.PrepareForSerializing();
+ if (outer_info->is_first_compile()) info.MarkAsFirstCompile();
- Isolate* isolate = info.isolate();
- Factory* factory = isolate->factory();
LiveEditFunctionTracker live_edit_tracker(isolate, literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
@@ -1351,9 +1398,11 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
DCHECK(allow_lazy);
}
+ bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
+
// Generate code
Handle<ScopeInfo> scope_info;
- if (FLAG_lazy && allow_lazy && !literal->should_eager_compile()) {
+ if (lazy) {
Handle<Code> code = isolate->builtins()->CompileLazy();
info.SetCode(code);
// There's no need in theory for a lazy-compiled function to have a type
@@ -1379,57 +1428,82 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
return Handle<SharedFunctionInfo>::null();
}
- // Create a shared function info object.
- Handle<SharedFunctionInfo> result = factory->NewSharedFunctionInfo(
- literal->name(), literal->materialized_literal_count(), literal->kind(),
- info.code(), scope_info, info.feedback_vector());
-
- SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
- result->set_script(*script);
- result->set_is_toplevel(false);
+ if (maybe_existing.is_null()) {
+ // Create a shared function info object.
+ Handle<SharedFunctionInfo> result =
+ isolate->factory()->NewSharedFunctionInfo(
+ literal->name(), literal->materialized_literal_count(),
+ literal->kind(), info.code(), scope_info, info.feedback_vector());
+
+ SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
+ SharedFunctionInfo::SetScript(result, script);
+ result->set_is_toplevel(false);
+ // If the outer function has been compiled before, we cannot be sure that
+ // shared function info for this function literal has been created for the
+ // first time. It may have already been compiled previously.
+ result->set_never_compiled(outer_info->is_first_compile() && lazy);
+
+ if (literal->scope()->new_target_var() != nullptr) {
+ Handle<Code> stub(isolate->builtins()->JSConstructStubNewTarget());
+ result->set_construct_stub(*stub);
+ }
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
- result->set_allows_lazy_compilation(literal->AllowsLazyCompilation());
- result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
+ RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
+ result->set_allows_lazy_compilation(literal->AllowsLazyCompilation());
+ result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(result,
- literal->expected_property_count());
- live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
- return result;
+ // Set the expected number of properties for instances and return
+ // the resulting function.
+ SetExpectedNofPropertiesFromEstimate(result,
+ literal->expected_property_count());
+ live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
+ return result;
+ } else if (!lazy) {
+ // We have additional data from compilation now.
+ DCHECK(!existing->is_compiled());
+ existing->ReplaceCode(*info.code());
+ existing->set_scope_info(*scope_info);
+ existing->set_feedback_vector(*info.feedback_vector());
+ }
+ return existing;
}
MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
Handle<Code> current_code,
ConcurrencyMode mode,
- BailoutId osr_ast_id) {
+ BailoutId osr_ast_id,
+ JavaScriptFrame* osr_frame) {
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeMap(
function, osr_ast_id).ToHandle(&cached_code)) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ if (!osr_ast_id.IsNone()) {
+ PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
+ }
+ PrintF("]\n");
+ }
return cached_code;
}
- SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(function));
- Isolate* isolate = info->isolate();
+ Isolate* isolate = function->GetIsolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
- VMState<COMPILER> state(isolate);
- DCHECK(!isolate->has_pending_exception());
- PostponeInterruptsScope postpone(isolate);
- Handle<SharedFunctionInfo> shared = info->shared_info();
- if (shared->code()->kind() != Code::FUNCTION ||
- ScopeInfo::Empty(isolate) == shared->scope_info()) {
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ if (!shared->is_compiled() ||
+ shared->scope_info() == ScopeInfo::Empty(isolate)) {
// The function was never compiled. Compile it unoptimized first.
// TODO(titzer): reuse the AST and scope info from this compile.
- CompilationInfoWithZone nested(function);
- nested.EnableDeoptimizationSupport();
- if (!GetUnoptimizedCodeCommon(&nested).ToHandle(&current_code)) {
+ CompilationInfoWithZone unoptimized(function);
+ unoptimized.EnableDeoptimizationSupport();
+ if (!GetUnoptimizedCodeCommon(&unoptimized).ToHandle(&current_code)) {
return MaybeHandle<Code>();
}
shared->ReplaceCode(*current_code);
}
+
current_code->set_profiler_ticks(0);
// TODO(mstarzinger): We cannot properly deserialize a scope chain containing
@@ -1444,6 +1518,11 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
+ SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(function));
+ VMState<COMPILER> state(isolate);
+ DCHECK(!isolate->has_pending_exception());
+ PostponeInterruptsScope postpone(isolate);
+
info->SetOptimizing(osr_ast_id, current_code);
if (mode == CONCURRENT) {
@@ -1452,6 +1531,7 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
return isolate->builtins()->InOptimizationQueue();
}
} else {
+ info->set_osr_frame(osr_frame);
if (GetOptimizedCodeNow(info.get())) return info->code();
}
@@ -1487,8 +1567,8 @@ Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
job->RetryOptimization(kDebuggerHasBreakPoints);
} else if (job->GenerateCode() == OptimizedCompileJob::SUCCEEDED) {
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info.get(), shared);
- if (info->shared_info()->SearchOptimizedCodeMap(
- info->context()->native_context(), info->osr_ast_id()) == -1) {
+ if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
+ info->osr_ast_id()).code == nullptr) {
InsertCodeIntoOptimizedCodeMap(info.get());
}
if (FLAG_trace_opt) {
@@ -1556,4 +1636,5 @@ void CompilationInfo::PrintAstForTesting() {
PrettyPrinter(isolate(), zone()).PrintProgram(function()));
}
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index ff3a86a10c..45863f6b28 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -9,6 +9,7 @@
#include "src/ast.h"
#include "src/bailout-reason.h"
#include "src/compilation-dependencies.h"
+#include "src/signature.h"
#include "src/zone.h"
namespace v8 {
@@ -16,6 +17,7 @@ namespace internal {
class AstValueFactory;
class HydrogenCodeStub;
+class JavaScriptFrame;
class ParseInfo;
class ScriptData;
@@ -121,14 +123,15 @@ class CompilationInfo {
kCompilingForDebugging = 1 << 7,
kSerializing = 1 << 8,
kContextSpecializing = 1 << 9,
- kInliningEnabled = 1 << 10,
- kTypingEnabled = 1 << 11,
- kDisableFutureOptimization = 1 << 12,
- kSplittingEnabled = 1 << 13,
- kBuiltinInliningEnabled = 1 << 14,
+ kFrameSpecializing = 1 << 10,
+ kInliningEnabled = 1 << 11,
+ kTypingEnabled = 1 << 12,
+ kDisableFutureOptimization = 1 << 13,
+ kSplittingEnabled = 1 << 14,
kTypeFeedbackEnabled = 1 << 15,
kDeoptimizationEnabled = 1 << 16,
- kSourcePositionsEnabled = 1 << 17
+ kSourcePositionsEnabled = 1 << 17,
+ kFirstCompile = 1 << 18,
};
explicit CompilationInfo(ParseInfo* parse_info);
@@ -165,6 +168,8 @@ class CompilationInfo {
Handle<Code> unoptimized_code() const { return unoptimized_code_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
+ int num_parameters_including_this() const;
+ bool is_this_defined() const;
int num_heap_slots() const;
Code::Flags flags() const;
bool has_scope() const { return scope() != nullptr; }
@@ -214,6 +219,10 @@ class CompilationInfo {
bool is_context_specializing() const { return GetFlag(kContextSpecializing); }
+ void MarkAsFrameSpecializing() { SetFlag(kFrameSpecializing); }
+
+ bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
+
void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
bool is_type_feedback_enabled() const {
@@ -236,12 +245,6 @@ class CompilationInfo {
bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
- void MarkAsBuiltinInliningEnabled() { SetFlag(kBuiltinInliningEnabled); }
-
- bool is_builtin_inlining_enabled() const {
- return GetFlag(kBuiltinInliningEnabled);
- }
-
void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
@@ -250,6 +253,12 @@ class CompilationInfo {
bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
+ void MarkAsFirstCompile() { SetFlag(kFirstCompile); }
+
+ void MarkAsCompiled() { SetFlag(kFirstCompile, false); }
+
+ bool is_first_compile() const { return GetFlag(kFirstCompile); }
+
bool IsCodePreAgingActive() const {
return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
!is_debug();
@@ -293,6 +302,11 @@ class CompilationInfo {
optimization_id_ = isolate()->NextOptimizationId();
}
+ void SetFunctionType(Type::FunctionType* function_type) {
+ function_type_ = function_type;
+ }
+ Type::FunctionType* function_type() const { return function_type_; }
+
void SetStub(CodeStub* code_stub) {
SetMode(STUB);
code_stub_ = code_stub;
@@ -380,6 +394,8 @@ class CompilationInfo {
DCHECK(height >= 0);
osr_expr_stack_height_ = height;
}
+ JavaScriptFrame* osr_frame() const { return osr_frame_; }
+ void set_osr_frame(JavaScriptFrame* osr_frame) { osr_frame_ = osr_frame; }
#if DEBUG
void PrintAstForTesting();
@@ -387,6 +403,16 @@ class CompilationInfo {
bool is_simple_parameter_list();
+ Handle<Code> GenerateCodeStub();
+
+ typedef std::vector<Handle<SharedFunctionInfo>> InlinedFunctionList;
+ InlinedFunctionList const& inlined_functions() const {
+ return inlined_functions_;
+ }
+ void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function) {
+ inlined_functions_.push_back(inlined_function);
+ }
+
protected:
ParseInfo* parse_info_;
@@ -461,6 +487,8 @@ class CompilationInfo {
std::vector<InlinedFunctionInfo> inlined_function_infos_;
bool track_positions_;
+ InlinedFunctionList inlined_functions_;
+
// A copy of shared_info()->opt_count() to avoid handle deref
// during graph optimization.
int opt_count_;
@@ -472,6 +500,11 @@ class CompilationInfo {
int osr_expr_stack_height_;
+ // The current OSR frame for specialization or {nullptr}.
+ JavaScriptFrame* osr_frame_ = nullptr;
+
+ Type::FunctionType* function_type_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -620,7 +653,7 @@ class Compiler : public AllStatic {
// Compile a String source within a context.
static Handle<SharedFunctionInfo> CompileScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
- int column_offset, bool is_debugger_script, bool is_shared_cross_origin,
+ int column_offset, ScriptOriginOptions resource_options,
Handle<Object> source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options,
@@ -631,9 +664,8 @@ class Compiler : public AllStatic {
int source_length);
// Create a shared function info object (the code may be lazily compiled).
- static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
- Handle<Script> script,
- CompilationInfo* outer);
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
+ FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
@@ -641,10 +673,9 @@ class Compiler : public AllStatic {
// In the latter case, return the InOptimizationQueue builtin. On failure,
// return the empty handle.
MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCode(
- Handle<JSFunction> function,
- Handle<Code> current_code,
- ConcurrencyMode mode,
- BailoutId osr_ast_id = BailoutId::None());
+ Handle<JSFunction> function, Handle<Code> current_code,
+ ConcurrencyMode mode, BailoutId osr_ast_id = BailoutId::None(),
+ JavaScriptFrame* osr_frame = nullptr);
// Generate and return code from previously queued optimization job.
// On failure, return the empty handle.
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 0b1769bc4a..5046fef593 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -38,6 +38,13 @@ FieldAccess AccessBuilder::ForJSFunctionContext() {
// static
+FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
+ return {kTaggedBase, JSFunction::kSharedFunctionInfoOffset, Handle<Name>(),
+ Type::Any(), kMachAnyTagged};
+}
+
+
+// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, MaybeHandle<Name>(),
Type::UntaggedPointer(), kMachPtr};
@@ -45,9 +52,21 @@ FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
// static
+FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
+ return {kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
+ MaybeHandle<Name>(), Type::Number(), kMachAnyTagged};
+}
+
+
+// static
FieldAccess AccessBuilder::ForFixedArrayLength() {
+ // TODO(turbofan): 2^30 is a valid upper limit for the FixedArray::length
+ // field, although it's not the best. If we had a Zone we could create an
+ // appropriate range type instead.
+ STATIC_ASSERT(FixedArray::kMaxLength <= 1 << 30);
return {kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
- Type::TaggedSigned(), kMachAnyTagged};
+ Type::Intersect(Type::Unsigned30(), Type::TaggedSigned()),
+ kMachAnyTagged};
}
@@ -59,6 +78,34 @@ FieldAccess AccessBuilder::ForExternalArrayPointer() {
// static
+FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
+ return {kTaggedBase, DescriptorArray::kEnumCacheOffset, Handle<Name>(),
+ Type::TaggedPointer(), kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
+ return {kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset,
+ Handle<Name>(), Type::TaggedPointer(), kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForMapBitField3() {
+ return {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
+ Type::UntaggedUnsigned32(), kMachUint32};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForMapDescriptors() {
+ return {kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
+ Type::TaggedPointer(), kMachAnyTagged};
+}
+
+
+// static
FieldAccess AccessBuilder::ForMapInstanceType() {
return {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
Type::UntaggedUnsigned8(), kMachUint8};
@@ -89,15 +136,16 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
// static
-FieldAccess AccessBuilder::ForStatsCounter() {
- return {kUntaggedBase, 0, MaybeHandle<Name>(), Type::Signed32(), kMachInt32};
+FieldAccess AccessBuilder::ForPropertyCellValue() {
+ return {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(), Type::Any(),
+ kMachAnyTagged};
}
// static
-FieldAccess AccessBuilder::ForPropertyCellValue() {
- return {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(), Type::Any(),
- kMachAnyTagged};
+FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
+ return {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
+ Handle<Name>(), Type::Any(), kMachAnyTagged};
}
@@ -150,6 +198,26 @@ ElementAccess AccessBuilder::ForSeqStringChar(String::Encoding encoding) {
return {kUntaggedBase, 0, Type::None(), kMachNone};
}
+
+// static
+FieldAccess AccessBuilder::ForStatsCounter() {
+ return {kUntaggedBase, 0, MaybeHandle<Name>(), Type::Signed32(), kMachInt32};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForFrameCallerFramePtr() {
+ return {kUntaggedBase, StandardFrameConstants::kCallerFPOffset,
+ MaybeHandle<Name>(), Type::Internal(), kMachPtr};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForFrameMarker() {
+ return {kUntaggedBase, StandardFrameConstants::kMarkerOffset,
+ MaybeHandle<Name>(), Type::Tagged(), kMachAnyTagged};
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 3939f83026..240ffdcb5d 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -16,6 +16,9 @@ namespace compiler {
// parameters to simplified load/store operators.
class AccessBuilder final : public AllStatic {
public:
+ // ===========================================================================
+ // Access to heap object fields and elements (based on tagged pointer).
+
// Provides access to HeapObject::map() field.
static FieldAccess ForMap();
@@ -28,15 +31,33 @@ class AccessBuilder final : public AllStatic {
// Provides access to JSFunction::context() field.
static FieldAccess ForJSFunctionContext();
+ // Provides access to JSFunction::shared() field.
+ static FieldAccess ForJSFunctionSharedFunctionInfo();
+
// Provides access to JSArrayBuffer::backing_store() field.
static FieldAccess ForJSArrayBufferBackingStore();
+ // Provides access to JSDate fields.
+ static FieldAccess ForJSDateField(JSDate::FieldIndex index);
+
// Provides access to FixedArray::length() field.
static FieldAccess ForFixedArrayLength();
// Provides access to ExternalArray::external_pointer() field.
static FieldAccess ForExternalArrayPointer();
+ // Provides access to DescriptorArray::enum_cache() field.
+ static FieldAccess ForDescriptorArrayEnumCache();
+
+ // Provides access to DescriptorArray::enum_cache_bridge_cache() field.
+ static FieldAccess ForDescriptorArrayEnumCacheBridgeCache();
+
+ // Provides access to Map::bit_field3() field.
+ static FieldAccess ForMapBitField3();
+
+ // Provides access to Map::descriptors() field.
+ static FieldAccess ForMapDescriptors();
+
// Provides access to Map::instance_type() field.
static FieldAccess ForMapInstanceType();
@@ -49,12 +70,12 @@ class AccessBuilder final : public AllStatic {
// Provides access Context slots.
static FieldAccess ForContextSlot(size_t index);
- // Provides access to the backing store of a StatsCounter.
- static FieldAccess ForStatsCounter();
-
// Provides access to PropertyCell::value() field.
static FieldAccess ForPropertyCellValue();
+ // Provides access to SharedFunctionInfo::feedback_vector() field.
+ static FieldAccess ForSharedFunctionInfoTypeFeedbackVector();
+
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
@@ -62,9 +83,24 @@ class AccessBuilder final : public AllStatic {
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
- // Provides access to the charaters of sequential strings.
+ // Provides access to the characters of sequential strings.
static ElementAccess ForSeqStringChar(String::Encoding encoding);
+ // ===========================================================================
+ // Access to global per-isolate variables (based on external reference).
+
+ // Provides access to the backing store of a StatsCounter.
+ static FieldAccess ForStatsCounter();
+
+ // ===========================================================================
+ // Access to activation records on the stack (based on frame pointer).
+
+ // Provides access to the next frame pointer in a stack frame.
+ static FieldAccess ForFrameCallerFramePtr();
+
+ // Provides access to the marker in a stack frame.
+ static FieldAccess ForFrameMarker();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index cc93cf4451..0c97f846f0 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -304,10 +304,6 @@ void CodeGenerator::AssembleDeconstructActivationRecord() {
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ LeaveFrame(StackFrame::MANUAL);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Drop(pop_count);
}
}
@@ -375,6 +371,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -405,6 +417,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mov(i.OutputRegister(), sp);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -804,6 +820,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Push(i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArmPoke: {
+ int const slot = MiscField::decode(instr->opcode());
+ __ str(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
case kArmStoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
@@ -928,7 +950,7 @@ void CodeGenerator::AssemblePrologue() {
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
bool saved_pp;
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
__ Push(lr, fp, pp);
// Adjust FP to point to saved FP.
__ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
@@ -938,23 +960,35 @@ void CodeGenerator::AssemblePrologue() {
__ mov(fp, sp);
saved_pp = false;
}
+ int register_save_area_size = saved_pp ? kPointerSize : 0;
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0 || saved_pp) {
// Save callee-saved registers.
- int register_save_area_size = saved_pp ? kPointerSize : 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
__ stm(db_w, sp, saves);
+ register_save_area_size +=
+ kPointerSize * base::bits::CountPopulation32(saves);
+ }
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ // Save callee-saved FP registers.
+ STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+ DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
+
+ __ vstm(db_w, sp, DwVfpRegister::from_code(first),
+ DwVfpRegister::from_code(last));
+ register_save_area_size += 2 * kPointerSize * (last - first + 1);
+ }
+ if (register_save_area_size > 0) {
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
}
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else if (stack_slots > 0) {
+ } else if (needs_frame_) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -991,6 +1025,15 @@ void CodeGenerator::AssembleReturn() {
if (stack_slots > 0) {
__ add(sp, sp, Operand(stack_slots * kPointerSize));
}
+ // Restore FP registers.
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+ __ vldm(ia_w, sp, DwVfpRegister::from_code(first),
+ DwVfpRegister::from_code(last));
+ }
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
@@ -999,13 +1042,23 @@ void CodeGenerator::AssembleReturn() {
}
__ LeaveFrame(StackFrame::MANUAL);
__ Ret();
- } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ LeaveFrame(StackFrame::MANUAL);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Drop(pop_count);
- __ Ret();
+ } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ __ LeaveFrame(StackFrame::MANUAL);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : (info()->IsStub()
+ ? info()->code_stub()->GetStackParameterCount()
+ : 0);
+ if (pop_count != 0) {
+ __ Drop(pop_count);
+ }
+ __ Ret();
+ }
} else {
__ Ret();
}
@@ -1215,7 +1268,6 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
}
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 377fd8672c..c210c171e4 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -93,6 +93,7 @@ namespace compiler {
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
+ V(ArmPoke) \
V(ArmStoreWriteBarrier)
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 56b155da45..8855388048 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -914,6 +914,17 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kArmVcvtS32F64, node);
+ }
+ UNREACHABLE();
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
ArmOperandGenerator g(this);
Float32BinopMatcher m(node);
@@ -1096,16 +1107,37 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// TODO(turbofan): on ARM it's probably better to use the code object in a
// register if there are multiple uses of it. Improve constant pool and the
// heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
+ if (Node* node = buffer.pushed_nodes[n]) {
+ int const slot = static_cast<int>(n);
+ InstructionOperand value = g.UseRegister(node);
+ Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
+ }
}
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
@@ -1113,18 +1145,21 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
@@ -1142,9 +1177,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
// TODO(turbofan): Relax restriction for stack parameters.
- if (descriptor->UsesOnlyRegisters() &&
- descriptor->HasSameReturnLocationsAs(
- linkage()->GetIncomingDescriptor())) {
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
@@ -1153,8 +1186,6 @@ void InstructionSelector::VisitTailCall(Node* node) {
// heuristics in the register allocator for where to emit constants.
InitializeCallBuffer(node, &buffer, true, false);
- DCHECK_EQ(0u, buffer.pushed_nodes.size());
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
diff --git a/deps/v8/src/compiler/arm/linkage-arm.cc b/deps/v8/src/compiler/arm/linkage-arm.cc
index 2b1faa2aca..a923f1bf8d 100644
--- a/deps/v8/src/compiler/arm/linkage-arm.cc
+++ b/deps/v8/src/compiler/arm/linkage-arm.cc
@@ -23,11 +23,17 @@ struct ArmLinkageHelperTraits {
return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() |
r10.bit();
}
+ static RegList CCalleeSaveFPRegisters() {
+ return (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) |
+ (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) |
+ (1 << d14.code()) | (1 << d15.code());
+ }
static Register CRegisterParameter(int i) {
static Register register_parameters[] = {r0, r1, r2, r3};
return register_parameters[i];
}
static int CRegisterParametersLength() { return 4; }
+ static int CStackBackingStoreLength() { return 0; }
};
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 01de1b0735..c3e9af6a29 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -331,15 +331,17 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-#define ASSEMBLE_SHIFT(asm_instr, width) \
- do { \
- if (instr->InputAt(1)->IsRegister()) { \
- __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
- i.InputRegister##width(1)); \
- } else { \
- int64_t imm = i.InputOperand##width(1).immediate().value(); \
- __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
- } \
+#define ASSEMBLE_SHIFT(asm_instr, width) \
+ do { \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
+ i.InputRegister##width(1)); \
+ } else { \
+ uint32_t imm = \
+ static_cast<uint32_t>(i.InputOperand##width(1).ImmediateValue()); \
+ __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
+ imm % (width)); \
+ } \
} while (0)
@@ -349,10 +351,6 @@ void CodeGenerator::AssembleDeconstructActivationRecord() {
if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ Mov(jssp, fp);
__ Pop(fp, lr);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Drop(pop_count);
}
}
@@ -418,6 +416,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(x10);
break;
}
+ case kArchPrepareCallCFunction:
+ // We don't need kArchPrepareCallCFunction on arm64 as the instruction
+ // selector already perform a Claim to reserve space on the stack and
+ // guarantee correct alignment of stack pointer.
+ UNREACHABLE();
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters, 0);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters, 0);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
@@ -442,6 +457,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchStackPointer:
__ mov(i.OutputRegister(), masm()->StackPointer());
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -557,12 +575,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputRegister32(0));
break;
}
- // TODO(dcarney): use mvn instr??
case kArm64Not:
- __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
+ __ Mvn(i.OutputRegister(), i.InputOperand(0));
break;
case kArm64Not32:
- __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
+ __ Mvn(i.OutputRegister32(), i.InputOperand32(0));
break;
case kArm64Neg:
__ Neg(i.OutputRegister(), i.InputOperand(0));
@@ -654,6 +671,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
i.InputInt5(2));
break;
+ case kArm64Ubfiz32:
+ __ Ubfiz(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
+ i.InputInt5(2));
+ break;
case kArm64Bfi:
__ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2),
i.InputInt6(3));
@@ -686,7 +707,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Cmp(i.InputRegister(0), i.InputOperand(1));
break;
case kArm64Cmp32:
- __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
+ __ Cmp(i.InputRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Cmn:
__ Cmn(i.InputRegister(0), i.InputOperand(1));
@@ -1069,16 +1090,30 @@ void CodeGenerator::AssemblePrologue() {
__ SetStackPointer(csp);
__ Push(lr, fp);
__ Mov(fp, csp);
- // TODO(dcarney): correct callee saved registers.
- __ PushCalleeSavedRegisters();
- frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
+
+ // Save FP registers.
+ CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ descriptor->CalleeSavedFPRegisters());
+ DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+ int saved_count = saves_fp.Count();
+ __ PushCPURegList(saves_fp);
+ // Save registers.
+ CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+ descriptor->CalleeSavedRegisters());
+ // TODO(palfia): TF save list is not in sync with
+ // CPURegList::GetCalleeSaved(): x30 is missing.
+ // DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
+ saved_count += saves.Count();
+ __ PushCPURegList(saves);
+
+ frame()->SetRegisterSaveAreaSize(saved_count * kPointerSize);
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ SetStackPointer(jssp);
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else if (stack_slots > 0) {
+ } else if (needs_frame_) {
__ SetStackPointer(jssp);
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
@@ -1120,21 +1155,39 @@ void CodeGenerator::AssembleReturn() {
if (stack_slots > 0) {
__ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
}
+
// Restore registers.
- // TODO(dcarney): correct callee saved registers.
- __ PopCalleeSavedRegisters();
+ CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+ descriptor->CalleeSavedRegisters());
+ __ PopCPURegList(saves);
+
+ CPURegList saves_fp =
+ CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ descriptor->CalleeSavedFPRegisters());
+ __ PopCPURegList(saves_fp);
}
+
__ Mov(csp, fp);
__ Pop(fp, lr);
__ Ret();
- } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ Mov(jssp, fp);
- __ Pop(fp, lr);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Drop(pop_count);
- __ Ret();
+ } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ B(&return_label_);
+ } else {
+ __ Bind(&return_label_);
+ __ Mov(jssp, fp);
+ __ Pop(fp, lr);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : (info()->IsStub()
+ ? info()->code_stub()->GetStackParameterCount()
+ : 0);
+ if (pop_count != 0) {
+ __ Drop(pop_count);
+ }
+ __ Ret();
+ }
} else {
__ Ret();
}
@@ -1321,7 +1374,6 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
}
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index d888c0b1c3..f76854611e 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -72,6 +72,7 @@ namespace compiler {
V(Arm64Sbfx32) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
+ V(Arm64Ubfiz32) \
V(Arm64Bfi) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 26e9b8cad9..ca0ec4e400 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -37,15 +37,31 @@ class Arm64OperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
+ // Use the provided node if it has the required value, or create a
+ // TempImmediate otherwise.
+ InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
+ if (GetIntegerConstantValue(node) == value) {
+ return UseImmediate(node);
+ }
+ return TempImmediate(value);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant) ||
+ (node->opcode() == IrOpcode::kInt64Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node);
+ }
+ DCHECK(node->opcode() == IrOpcode::kInt64Constant);
+ return OpParameter<int64_t>(node);
+ }
+
bool CanBeImmediate(Node* node, ImmediateMode mode) {
- int64_t value;
- if (node->opcode() == IrOpcode::kInt32Constant)
- value = OpParameter<int32_t>(node);
- else if (node->opcode() == IrOpcode::kInt64Constant)
- value = OpParameter<int64_t>(node);
- else
- return false;
- return CanBeImmediate(value, mode);
+ return IsIntegerConstant(node) &&
+ CanBeImmediate(GetIntegerConstantValue(node), mode);
}
bool CanBeImmediate(int64_t value, ImmediateMode mode) {
@@ -61,10 +77,6 @@ class Arm64OperandGenerator final : public OperandGenerator {
&ignored, &ignored, &ignored);
case kArithmeticImm:
return Assembler::IsImmAddSub(value);
- case kShift32Imm:
- return 0 <= value && value < 32;
- case kShift64Imm:
- return 0 <= value && value < 64;
case kLoadStoreImm8:
return IsLoadStoreImmediate(value, LSByte);
case kLoadStoreImm16:
@@ -75,6 +87,12 @@ class Arm64OperandGenerator final : public OperandGenerator {
return IsLoadStoreImmediate(value, LSDoubleWord);
case kNoImmediate:
return false;
+ case kShift32Imm: // Fall through.
+ case kShift64Imm:
+ // Shift operations only observe the bottom 5 or 6 bits of the value.
+ // All possible shifts can be encoded by discarding bits which have no
+ // effect.
+ return true;
}
return false;
}
@@ -113,54 +131,46 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
}
-template <typename Matcher>
-bool TryMatchShift(InstructionSelector* selector, Node* node,
- InstructionCode* opcode, IrOpcode::Value shift_opcode,
- ImmediateMode imm_mode, AddressingMode addressing_mode) {
- if (node->opcode() != shift_opcode) return false;
+bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
+ Node* input_node, InstructionCode* opcode, bool try_ror) {
Arm64OperandGenerator g(selector);
- Matcher m(node);
- if (g.CanBeImmediate(m.right().node(), imm_mode)) {
- *opcode |= AddressingModeField::encode(addressing_mode);
- return true;
- }
- return false;
-}
+ if (!selector->CanCover(node, input_node)) return false;
+ if (input_node->InputCount() != 2) return false;
+ if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
-bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
- InstructionCode* opcode, bool try_ror) {
- return TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord32Shl, kShift32Imm,
- kMode_Operand2_R_LSL_I) ||
- TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord32Shr, kShift32Imm,
- kMode_Operand2_R_LSR_I) ||
- TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord32Sar, kShift32Imm,
- kMode_Operand2_R_ASR_I) ||
- (try_ror && TryMatchShift<Int32BinopMatcher>(
- selector, node, opcode, IrOpcode::kWord32Ror,
- kShift32Imm, kMode_Operand2_R_ROR_I)) ||
- TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord64Shl, kShift64Imm,
- kMode_Operand2_R_LSL_I) ||
- TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord64Shr, kShift64Imm,
- kMode_Operand2_R_LSR_I) ||
- TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord64Sar, kShift64Imm,
- kMode_Operand2_R_ASR_I) ||
- (try_ror && TryMatchShift<Int64BinopMatcher>(
- selector, node, opcode, IrOpcode::kWord64Ror,
- kShift64Imm, kMode_Operand2_R_ROR_I));
+ switch (input_node->opcode()) {
+ case IrOpcode::kWord32Shl:
+ case IrOpcode::kWord64Shl:
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+ return true;
+ case IrOpcode::kWord32Shr:
+ case IrOpcode::kWord64Shr:
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
+ return true;
+ case IrOpcode::kWord32Sar:
+ case IrOpcode::kWord64Sar:
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+ return true;
+ case IrOpcode::kWord32Ror:
+ case IrOpcode::kWord64Ror:
+ if (try_ror) {
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
}
bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
- Node* left_node, Node* right_node,
+ Node* node, Node* left_node, Node* right_node,
InstructionOperand* left_op,
InstructionOperand* right_op, InstructionCode* opcode) {
+ if (!selector->CanCover(node, right_node)) return false;
+
NodeMatcher nm(right_node);
if (nm.IsWord32And()) {
@@ -200,15 +210,20 @@ void VisitBinop(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
Matcher m(node);
- InstructionOperand inputs[4];
+ InstructionOperand inputs[5];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
- bool is_add_sub = false;
+ bool is_cmp = opcode == kArm64Cmp32;
- if (m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() || m.IsInt64Sub()) {
- is_add_sub = true;
- }
+ // We can commute cmp by switching the inputs and commuting the flags
+ // continuation.
+ bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
+
+ // The cmp instruction is encoded as sub with zero output register, and
+ // therefore supports the same operand modes.
+ bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
+ m.IsInt64Sub() || is_cmp;
Node* left_node = m.left().node();
Node* right_node = m.right().node();
@@ -216,21 +231,28 @@ void VisitBinop(InstructionSelector* selector, Node* node,
if (g.CanBeImmediate(right_node, operand_mode)) {
inputs[input_count++] = g.UseRegister(left_node);
inputs[input_count++] = g.UseImmediate(right_node);
+ } else if (is_cmp && g.CanBeImmediate(left_node, operand_mode)) {
+ cont->Commute();
+ inputs[input_count++] = g.UseRegister(right_node);
+ inputs[input_count++] = g.UseImmediate(left_node);
} else if (is_add_sub &&
- TryMatchAnyExtend(&g, selector, left_node, right_node, &inputs[0],
- &inputs[1], &opcode)) {
+ TryMatchAnyExtend(&g, selector, node, left_node, right_node,
+ &inputs[0], &inputs[1], &opcode)) {
input_count += 2;
- } else if (is_add_sub && m.HasProperty(Operator::kCommutative) &&
- TryMatchAnyExtend(&g, selector, right_node, left_node, &inputs[0],
- &inputs[1], &opcode)) {
+ } else if (is_add_sub && can_commute &&
+ TryMatchAnyExtend(&g, selector, node, right_node, left_node,
+ &inputs[0], &inputs[1], &opcode)) {
+ if (is_cmp) cont->Commute();
input_count += 2;
- } else if (TryMatchAnyShift(selector, right_node, &opcode, !is_add_sub)) {
+ } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
+ !is_add_sub)) {
Matcher m_shift(right_node);
inputs[input_count++] = g.UseRegister(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
- } else if (m.HasProperty(Operator::kCommutative) &&
- TryMatchAnyShift(selector, left_node, &opcode, !is_add_sub)) {
+ } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
+ !is_add_sub)) {
+ if (is_cmp) cont->Commute();
Matcher m_shift(left_node);
inputs[input_count++] = g.UseRegister(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
@@ -245,13 +267,16 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (!is_cmp) {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
+
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
DCHECK_NE(0u, input_count);
- DCHECK_NE(0u, output_count);
+ DCHECK((output_count != 0) || is_cmp);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
@@ -278,12 +303,28 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
selector->Emit(negate_opcode, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
- g.TempImmediate(-m.right().Value()));
+ g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
} else {
VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
}
}
+
+// For multiplications by immediate of the form x * (2^k + 1), where k > 0,
+// return the value of k, otherwise return zero. This is used to reduce the
+// multiplication to addition with left shift: x + (x << k).
+template <typename Matcher>
+int32_t LeftShiftForReducedMultiply(Matcher* m) {
+ DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
+ if (m->right().HasValue() && m->right().Value() >= 3) {
+ uint64_t value_minus_one = m->right().Value() - 1;
+ if (base::bits::IsPowerOfTwo64(value_minus_one)) {
+ return WhichPowerOf2_64(value_minus_one);
+ }
+ }
+ return 0;
+}
+
} // namespace
@@ -548,17 +589,20 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().IsInRange(0, 31)) {
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
// still use ubfx with a smaller mask and the remaining bits will be
// zeros.
- uint32_t lsb = mleft.right().Value();
if (lsb + mask_width > 32) mask_width = 32 - lsb;
Emit(kArm64Ubfx32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
- g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
+ g.UseImmediateOrTemp(mleft.right().node(), lsb),
+ g.TempImmediate(mask_width));
return;
}
// Other cases fall through to the normal And operation.
@@ -585,17 +629,20 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().IsInRange(0, 63)) {
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
// still use ubfx with a smaller mask and the remaining bits will be
// zeros.
- uint64_t lsb = mleft.right().Value();
if (lsb + mask_width > 64) mask_width = 64 - lsb;
Emit(kArm64Ubfx, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
- g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
+ g.UseImmediateOrTemp(mleft.right().node(), lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
return;
}
// Other cases fall through to the normal And operation.
@@ -640,6 +687,38 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kArm64Lsl32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ } else {
+ // Select Ubfiz for Shl(And(x, mask), imm) where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()), g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
}
@@ -691,20 +770,21 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
- uint32_t lsb = m.right().Value();
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
- uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
- uint32_t mask_width = base::bits::CountPopulation32(mask);
- uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
Arm64OperandGenerator g(this);
DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
Emit(kArm64Ubfx32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediateOrTemp(m.right().node(), lsb),
g.TempImmediate(mask_width));
return;
}
@@ -712,26 +792,43 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
} else if (TryEmitBitfieldExtract32(this, node)) {
return;
}
+
+ if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
+ CanCover(node, node->InputAt(0))) {
+ // Combine this shift with the multiply and shift that would be generated
+ // by Uint32MulHigh.
+ Arm64OperandGenerator g(this);
+ Node* left = m.left().node();
+ int shift = m.right().Value() & 0x1f;
+ InstructionOperand const smull_operand = g.TempRegister();
+ Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
+ g.UseRegister(left->InputAt(1)));
+ Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
+ g.TempImmediate(32 + shift));
+ return;
+ }
+
VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
}
void InstructionSelector::VisitWord64Shr(Node* node) {
- Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
- if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
- uint64_t lsb = m.right().Value();
+ if (m.left().IsWord64And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x3f;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
- uint64_t mask_width = base::bits::CountPopulation64(mask);
- uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ unsigned mask_width = base::bits::CountPopulation64(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
+ Arm64OperandGenerator g(this);
DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
Emit(kArm64Ubfx, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediateOrTemp(m.right().node(), lsb),
g.TempImmediate(mask_width));
return;
}
@@ -745,6 +842,51 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
if (TryEmitBitfieldExtract32(this, node)) {
return;
}
+
+ Int32BinopMatcher m(node);
+ if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
+ CanCover(node, node->InputAt(0))) {
+ // Combine this shift with the multiply and shift that would be generated
+ // by Int32MulHigh.
+ Arm64OperandGenerator g(this);
+ Node* left = m.left().node();
+ int shift = m.right().Value() & 0x1f;
+ InstructionOperand const smull_operand = g.TempRegister();
+ Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
+ g.UseRegister(left->InputAt(1)));
+ Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
+ g.TempImmediate(32 + shift));
+ return;
+ }
+
+ if (m.left().IsInt32Add() && m.right().HasValue() &&
+ CanCover(node, node->InputAt(0))) {
+ Node* add_node = m.left().node();
+ Int32BinopMatcher madd_node(add_node);
+ if (madd_node.left().IsInt32MulHigh() &&
+ CanCover(add_node, madd_node.left().node())) {
+ // Combine the shift that would be generated by Int32MulHigh with the add
+ // on the left of this Sar operation. We do it here, as the result of the
+ // add potentially has 33 bits, so we have to ensure the result is
+ // truncated by being the input to this 32-bit Sar operation.
+ Arm64OperandGenerator g(this);
+ Node* mul_node = madd_node.left().node();
+
+ InstructionOperand const smull_operand = g.TempRegister();
+ Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)),
+ g.UseRegister(mul_node->InputAt(1)));
+
+ InstructionOperand const add_operand = g.TempRegister();
+ Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
+ add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand,
+ g.TempImmediate(32));
+
+ Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
+ g.UseImmediate(node->InputAt(1)));
+ return;
+ }
+ }
+
VisitRRO(this, kArm64Asr32, node, kShift32Imm);
}
@@ -776,18 +918,26 @@ void InstructionSelector::VisitInt32Add(Node* node) {
// Select Madd(x, y, z) for Add(Mul(x, y), z).
if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- Emit(kArm64Madd32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mleft) == 0) {
+ Emit(kArm64Madd32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
}
- // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+ // Select Madd(x, y, z) for Add(z, Mul(x, y)).
if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- Emit(kArm64Madd32, g.DefineAsRegister(node),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mright) == 0) {
+ Emit(kArm64Madd32, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
}
VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
}
@@ -799,18 +949,26 @@ void InstructionSelector::VisitInt64Add(Node* node) {
// Select Madd(x, y, z) for Add(Mul(x, y), z).
if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
Int64BinopMatcher mleft(m.left().node());
- Emit(kArm64Madd, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mleft) == 0) {
+ Emit(kArm64Madd, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
}
- // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+ // Select Madd(x, y, z) for Add(z, Mul(x, y)).
if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
Int64BinopMatcher mright(m.right().node());
- Emit(kArm64Madd, g.DefineAsRegister(node),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mright) == 0) {
+ Emit(kArm64Madd, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
}
VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
}
@@ -820,13 +978,17 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
- // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+ // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- Emit(kArm64Msub32, g.DefineAsRegister(node),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mright) == 0) {
+ Emit(kArm64Msub32, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
}
if (m.left().Is(0)) {
@@ -842,13 +1004,17 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
- // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+ // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
Int64BinopMatcher mright(m.right().node());
- Emit(kArm64Msub, g.DefineAsRegister(node),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mright) == 0) {
+ Emit(kArm64Msub, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
}
if (m.left().Is(0)) {
@@ -863,6 +1029,16 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
+ // First, try to reduce the multiplication to addition with left shift.
+ // x * (2^k + 1) -> x + (x << k)
+ int32_t shift = LeftShiftForReducedMultiply(&m);
+ if (shift > 0) {
+ Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift));
+ return;
+ }
+
if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
@@ -887,18 +1063,6 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
}
}
- // x * (2^k + 1) -> x + (x << k)
- if (m.right().HasValue() && m.right().Value() > 0) {
- int32_t value = m.right().Value();
- if (base::bits::IsPowerOfTwo32(value - 1)) {
- Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(m.left().node()),
- g.TempImmediate(WhichPowerOf2(value - 1)));
- return;
- }
- }
-
VisitRRR(this, kArm64Mul32, node);
}
@@ -907,6 +1071,16 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
+ // First, try to reduce the multiplication to addition with left shift.
+ // x * (2^k + 1) -> x + (x << k)
+ int32_t shift = LeftShiftForReducedMultiply(&m);
+ if (shift > 0) {
+ Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift));
+ return;
+ }
+
if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
Int64BinopMatcher mleft(m.left().node());
@@ -930,24 +1104,11 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
}
}
- // x * (2^k + 1) -> x + (x << k)
- if (m.right().HasValue() && m.right().Value() > 0) {
- int64_t value = m.right().Value();
- if (base::bits::IsPowerOfTwo64(value - 1)) {
- Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(m.left().node()),
- g.TempImmediate(WhichPowerOf2_64(value - 1)));
- return;
- }
- }
-
VisitRRR(this, kArm64Mul, node);
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
- // TODO(arm64): Can we do better here?
Arm64OperandGenerator g(this);
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
@@ -957,7 +1118,6 @@ void InstructionSelector::VisitInt32MulHigh(Node* node) {
void InstructionSelector::VisitUint32MulHigh(Node* node) {
- // TODO(arm64): Can we do better here?
Arm64OperandGenerator g(this);
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
@@ -1077,16 +1237,25 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Float64ToFloat32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArm64Float64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kArm64Float64ToInt32, node);
+ }
+ UNREACHABLE();
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
- if (CanCover(node, value)) {
+ if (CanCover(node, value) && value->InputCount() >= 2) {
Int64BinopMatcher m(value);
if ((m.IsWord64Sar() && m.right().HasValue() &&
(m.right().Value() == 32)) ||
@@ -1168,24 +1337,16 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) {
- VisitRRR(this, kArm64Float32Max, node);
-}
+void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitFloat64Max(Node* node) {
- VisitRRR(this, kArm64Float64Max, node);
-}
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitFloat32Min(Node* node) {
- VisitRRR(this, kArm64Float32Min, node);
-}
+void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitFloat64Min(Node* node) {
- VisitRRR(this, kArm64Float64Min, node);
-}
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -1229,8 +1390,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ frame_state_descriptor = GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())));
}
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
@@ -1239,11 +1400,11 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// TODO(turbofan): on ARM64 it's probably better to use the code object in a
// register if there are multiple uses of it. Improve constant pool and the
// heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
+ InitializeCallBuffer(node, &buffer, true, true);
// Push the arguments to the stack.
- bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
- int aligned_push_count = buffer.pushed_nodes.size();
+ int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
+ bool pushed_count_uneven = aligned_push_count & 1;
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
@@ -1254,7 +1415,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
// Move arguments to the stack.
{
- int slot = buffer.pushed_nodes.size() - 1;
+ int slot = aligned_push_count - 1;
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
@@ -1274,6 +1435,11 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
if (handler != nullptr) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
@@ -1281,18 +1447,21 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
@@ -1310,9 +1479,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
// TODO(turbofan): Relax restriction for stack parameters.
- if (descriptor->UsesOnlyRegisters() &&
- descriptor->HasSameReturnLocationsAs(
- linkage()->GetIncomingDescriptor())) {
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
@@ -1321,8 +1488,6 @@ void InstructionSelector::VisitTailCall(Node* node) {
// heuristics in the register allocator for where to emit constants.
InitializeCallBuffer(node, &buffer, true, false);
- DCHECK_EQ(0u, buffer.pushed_nodes.size());
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -1344,8 +1509,8 @@ void InstructionSelector::VisitTailCall(Node* node) {
} else {
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ frame_state_descriptor = GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())));
}
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
@@ -1357,8 +1522,8 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, false);
// Push the arguments to the stack.
- bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
- int aligned_push_count = buffer.pushed_nodes.size();
+ int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
+ bool pushed_count_uneven = aligned_push_count & 1;
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
@@ -1369,7 +1534,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
}
// Move arguments to the stack.
{
- int slot = buffer.pushed_nodes.size() - 1;
+ int slot = aligned_push_count - 1;
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
@@ -1455,7 +1620,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitWordCompare(selector, node, kArm64Cmp32, cont, false, kArithmeticImm);
+ VisitBinop<Int32BinopMatcher>(selector, node, kArm64Cmp32, kArithmeticImm,
+ cont);
}
@@ -1565,6 +1731,10 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
kArithmeticImm);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ kArithmeticImm);
case IrOpcode::kFloat32Equal:
cont.OverwriteAndNegateIfEqual(kEqual);
return VisitFloat32Compare(this, value, &cont);
@@ -1614,8 +1784,7 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
kArithmeticImm);
case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
- kArithmeticImm);
+ return VisitWord32Compare(this, value, &cont);
case IrOpcode::kWord32And: {
Int32BinopMatcher m(value);
if (m.right().HasValue() &&
@@ -1802,6 +1971,12 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
}
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+}
+
+
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont(kEqual, node);
VisitFloat32Compare(this, node, &cont);
@@ -1890,11 +2065,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat32Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kFloat64RoundDown |
+ return MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32ShiftIsSafe |
diff --git a/deps/v8/src/compiler/arm64/linkage-arm64.cc b/deps/v8/src/compiler/arm64/linkage-arm64.cc
index 745eb5cde6..afedefbbd1 100644
--- a/deps/v8/src/compiler/arm64/linkage-arm64.cc
+++ b/deps/v8/src/compiler/arm64/linkage-arm64.cc
@@ -20,14 +20,22 @@ struct Arm64LinkageHelperTraits {
static Register RuntimeCallFunctionReg() { return x1; }
static Register RuntimeCallArgCountReg() { return x0; }
static RegList CCalleeSaveRegisters() {
- // TODO(dcarney): correct callee saved registers.
- return 0;
+ return (1 << x19.code()) | (1 << x20.code()) | (1 << x21.code()) |
+ (1 << x22.code()) | (1 << x23.code()) | (1 << x24.code()) |
+ (1 << x25.code()) | (1 << x26.code()) | (1 << x27.code()) |
+ (1 << x28.code()) | (1 << x29.code()) | (1 << x30.code());
+ }
+ static RegList CCalleeSaveFPRegisters() {
+ return (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) |
+ (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) |
+ (1 << d14.code()) | (1 << d15.code());
}
static Register CRegisterParameter(int i) {
static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6, x7};
return register_parameters[i];
}
static int CRegisterParametersLength() { return 8; }
+ static int CStackBackingStoreLength() { return 0; }
};
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index 88422886ed..341aedc099 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -335,9 +335,11 @@ class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
ControlScopeForCatch(AstGraphBuilder* owner, TryCatchBuilder* control)
: ControlScope(owner), control_(control) {
builder()->try_nesting_level_++; // Increment nesting.
+ builder()->try_catch_nesting_level_++;
}
~ControlScopeForCatch() {
builder()->try_nesting_level_--; // Decrement nesting.
+ builder()->try_catch_nesting_level_--;
}
protected:
@@ -384,6 +386,48 @@ class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
};
+// Helper for generating before and after frame states.
+class AstGraphBuilder::FrameStateBeforeAndAfter {
+ public:
+ FrameStateBeforeAndAfter(AstGraphBuilder* builder, BailoutId id_before)
+ : builder_(builder), frame_state_before_(nullptr) {
+ frame_state_before_ = id_before == BailoutId::None()
+ ? builder_->jsgraph()->EmptyFrameState()
+ : builder_->environment()->Checkpoint(id_before);
+ }
+
+ void AddToNode(Node* node, BailoutId id_after,
+ OutputFrameStateCombine combine) {
+ int count = OperatorProperties::GetFrameStateInputCount(node->op());
+ DCHECK_LE(count, 2);
+
+ if (count >= 1) {
+ // Add the frame state for after the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+
+ Node* frame_state_after =
+ id_after == BailoutId::None()
+ ? builder_->jsgraph()->EmptyFrameState()
+ : builder_->environment()->Checkpoint(id_after, combine);
+
+ NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
+ }
+
+ if (count >= 2) {
+ // Add the frame state for before the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 1)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
+ }
+ }
+
+ private:
+ AstGraphBuilder* builder_;
+ Node* frame_state_before_;
+};
+
+
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
JSTypeFeedbackTable* js_type_feedback)
@@ -395,19 +439,39 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
globals_(0, local_zone),
execution_control_(nullptr),
execution_context_(nullptr),
+ try_catch_nesting_level_(0),
try_nesting_level_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
- exit_control_(nullptr),
+ exit_controls_(local_zone),
loop_assignment_analysis_(loop),
state_values_cache_(jsgraph),
liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
local_zone),
+ frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction,
+ info->num_parameters_including_this(),
+ info->scope()->num_stack_slots(), info->shared_info())),
js_type_feedback_(js_type_feedback) {
InitializeAstVisitor(info->isolate(), local_zone);
}
+Node* AstGraphBuilder::GetFunctionClosureForContext() {
+ Scope* declaration_scope = current_scope()->DeclarationScope();
+ if (declaration_scope->is_script_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function as
+ // their closure, not the anonymous closure containing the global code.
+ // Pass a SMI sentinel and let the runtime look up the empty function.
+ return jsgraph()->SmiConstant(0);
+ } else {
+ DCHECK(declaration_scope->is_function_scope());
+ return GetFunctionClosure();
+ }
+}
+
+
Node* AstGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
const Operator* op = common()->Parameter(
@@ -419,37 +483,26 @@ Node* AstGraphBuilder::GetFunctionClosure() {
}
-void AstGraphBuilder::CreateFunctionContext(bool constant_context) {
- function_context_.set(constant_context
- ? jsgraph()->HeapConstant(info()->context())
- : NewOuterContextParam());
-}
-
-
-Node* AstGraphBuilder::NewOuterContextParam() {
- // Parameter (arity + 1) is special for the outer context of the function
- const Operator* op =
- common()->Parameter(info()->num_parameters() + 1, "%context");
- return NewNode(op, graph()->start());
-}
-
-
-Node* AstGraphBuilder::NewCurrentContextOsrValue() {
- // TODO(titzer): use a real OSR value here; a parameter works by accident.
- // Parameter (arity + 1) is special for the outer context of the function
- const Operator* op =
- common()->Parameter(info()->num_parameters() + 1, "%osr-context");
- return NewNode(op, graph()->start());
+Node* AstGraphBuilder::GetFunctionContext() {
+ if (!function_context_.is_set()) {
+ // Parameter (arity + 1) is special for the outer context of the function
+ const Operator* op = common()->Parameter(
+ info()->num_parameters_including_this(), "%context");
+ Node* node = NewNode(op, graph()->start());
+ function_context_.set(node);
+ }
+ return function_context_.get();
}
-bool AstGraphBuilder::CreateGraph(bool constant_context, bool stack_check) {
+bool AstGraphBuilder::CreateGraph(bool stack_check) {
Scope* scope = info()->scope();
DCHECK(graph() != NULL);
- // Set up the basic structure of the graph.
- int parameter_count = info()->num_parameters();
- graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
+ // Set up the basic structure of the graph. Outputs for {Start} are the formal
+ // parameters (including the receiver) plus context and closure.
+ int actual_parameter_count = info()->num_parameters_including_this() + 2;
+ graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
// Initialize the top-level environment.
Environment env(this, scope, graph()->start());
@@ -462,23 +515,23 @@ bool AstGraphBuilder::CreateGraph(bool constant_context, bool stack_check) {
}
// Initialize the incoming context.
- CreateFunctionContext(constant_context);
- ContextScope incoming(this, scope, function_context_.get());
+ ContextScope incoming(this, scope, GetFunctionContext());
// Initialize control scope.
ControlScope control(this);
// Build receiver check for sloppy mode if necessary.
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
- Node* original_receiver = env.Lookup(scope->receiver());
- Node* patched_receiver = BuildPatchReceiverToGlobalProxy(original_receiver);
- env.Bind(scope->receiver(), patched_receiver);
+ if (scope->has_this_declaration()) {
+ Node* original_receiver = env.RawParameterLookup(0);
+ Node* patched_receiver = BuildPatchReceiverToGlobalProxy(original_receiver);
+ env.RawParameterBind(0, patched_receiver);
+ }
// Build function context only if there are context allocated variables.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info()->num_heap_slots() > 0) {
// Push a new inner context scope for the function.
- Node* inner_context = BuildLocalFunctionContext(function_context_.get());
+ Node* inner_context = BuildLocalFunctionContext(GetFunctionContext());
ContextScope top_context(this, scope, inner_context);
CreateGraphBody(stack_check);
} else {
@@ -487,7 +540,11 @@ bool AstGraphBuilder::CreateGraph(bool constant_context, bool stack_check) {
}
// Finish the basic structure of the graph.
- graph()->SetEnd(graph()->NewNode(common()->End(), exit_control()));
+ DCHECK_NE(0u, exit_controls_.size());
+ int const input_count = static_cast<int>(exit_controls_.size());
+ Node** const inputs = &exit_controls_.front();
+ Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
+ graph()->SetEnd(end);
// Compute local variable liveness information and use it to relax
// frame states.
@@ -509,6 +566,12 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
Variable* rest_parameter = scope->rest_parameter(&rest_index);
BuildRestArgumentsArray(rest_parameter, rest_index);
+ // Build assignment to {.this_function} variable if it is used.
+ BuildThisFunctionVariable(scope->this_function_var());
+
+ // Build assignment to {new.target} variable if it is used.
+ BuildNewTargetVariable(scope->new_target_var());
+
// Emit tracing call if requested to do so.
if (FLAG_trace) {
NewNode(javascript()->CallRuntime(Runtime::kTraceEnter, 0));
@@ -551,7 +614,10 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
void AstGraphBuilder::ClearNonLiveSlotsInFrameStates() {
- if (!FLAG_analyze_environment_liveness) return;
+ if (!FLAG_analyze_environment_liveness ||
+ !info()->is_deoptimization_enabled()) {
+ return;
+ }
NonLiveFrameStateSlotReplacer replacer(
&state_values_cache_, jsgraph()->UndefinedConstant(),
@@ -568,19 +634,11 @@ void AstGraphBuilder::ClearNonLiveSlotsInFrameStates() {
}
-// Left-hand side can only be a property, a global or a variable slot.
-enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
-
-
-// Determine the left-hand side kind of an assignment.
-static LhsKind DetermineLhsKind(Expression* expr) {
- Property* property = expr->AsProperty();
- DCHECK(expr->IsValidReferenceExpression());
- LhsKind lhs_kind =
- (property == NULL) ? VARIABLE : (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- return lhs_kind;
+// Gets the bailout id just before reading a variable proxy, but only for
+// unallocated variables.
+static BailoutId BeforeId(VariableProxy* proxy) {
+ return proxy->var()->IsUnallocatedOrGlobalSlot() ? proxy->BeforeId()
+ : BailoutId::None();
}
@@ -604,7 +662,9 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
: builder_(builder),
parameters_count_(scope->num_parameters() + 1),
locals_count_(scope->num_stack_slots()),
- liveness_block_(builder_->liveness_analyzer()->NewBlock()),
+ liveness_block_(IsLivenessAnalysisEnabled()
+ ? builder_->liveness_analyzer()->NewBlock()
+ : nullptr),
values_(builder_->local_zone()),
contexts_(builder_->local_zone()),
control_dependency_(control_dependency),
@@ -615,16 +675,21 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
DCHECK_EQ(scope->num_parameters() + 1, parameters_count());
// Bind the receiver variable.
- Node* receiver = builder->graph()->NewNode(common()->Parameter(0, "%this"),
- builder->graph()->start());
- values()->push_back(receiver);
+ int param_num = 0;
+ if (builder->info()->is_this_defined()) {
+ const Operator* op = common()->Parameter(param_num++, "%this");
+ Node* receiver = builder->graph()->NewNode(op, builder->graph()->start());
+ values()->push_back(receiver);
+ } else {
+ values()->push_back(builder->jsgraph()->UndefinedConstant());
+ }
// Bind all parameter variables. The parameter indices are shifted by 1
// (receiver is parameter index -1 but environment index 0).
for (int i = 0; i < scope->num_parameters(); ++i) {
const char* debug_name = GetDebugParameterName(graph()->zone(), scope, i);
- Node* parameter = builder->graph()->NewNode(
- common()->Parameter(i + 1, debug_name), builder->graph()->start());
+ const Operator* op = common()->Parameter(param_num++, debug_name);
+ Node* parameter = builder->graph()->NewNode(op, builder->graph()->start());
values()->push_back(parameter);
}
@@ -634,10 +699,12 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
}
-AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy)
+AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy,
+ LivenessAnalyzerBlock* liveness_block)
: builder_(copy->builder_),
parameters_count_(copy->parameters_count_),
locals_count_(copy->locals_count_),
+ liveness_block_(liveness_block),
values_(copy->zone()),
contexts_(copy->zone()),
control_dependency_(copy->control_dependency_),
@@ -651,14 +718,6 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy)
contexts_.reserve(copy->contexts_.size());
contexts_.insert(contexts_.begin(), copy->contexts_.begin(),
copy->contexts_.end());
-
- if (FLAG_analyze_environment_liveness) {
- // Split the liveness blocks.
- copy->liveness_block_ =
- builder_->liveness_analyzer()->NewBlock(copy->liveness_block());
- liveness_block_ =
- builder_->liveness_analyzer()->NewBlock(copy->liveness_block());
- }
}
@@ -671,7 +730,8 @@ void AstGraphBuilder::Environment::Bind(Variable* variable, Node* node) {
} else {
DCHECK(variable->IsStackLocal());
values()->at(variable->index() + parameters_count_) = node;
- if (FLAG_analyze_environment_liveness) {
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
liveness_block()->Bind(variable->index());
}
}
@@ -686,7 +746,8 @@ Node* AstGraphBuilder::Environment::Lookup(Variable* variable) {
return values()->at(variable->index() + 1);
} else {
DCHECK(variable->IsStackLocal());
- if (FLAG_analyze_environment_liveness) {
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
liveness_block()->Lookup(variable->index());
}
return values()->at(variable->index() + parameters_count_);
@@ -695,7 +756,8 @@ Node* AstGraphBuilder::Environment::Lookup(Variable* variable) {
void AstGraphBuilder::Environment::MarkAllLocalsLive() {
- if (FLAG_analyze_environment_liveness) {
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
for (int i = 0; i < locals_count_; i++) {
liveness_block()->Lookup(i);
}
@@ -703,16 +765,56 @@ void AstGraphBuilder::Environment::MarkAllLocalsLive() {
}
+void AstGraphBuilder::Environment::RawParameterBind(int index, Node* node) {
+ DCHECK_LT(index, parameters_count());
+ values()->at(index) = node;
+}
+
+
+Node* AstGraphBuilder::Environment::RawParameterLookup(int index) {
+ DCHECK_LT(index, parameters_count());
+ return values()->at(index);
+}
+
+
+AstGraphBuilder::Environment*
+AstGraphBuilder::Environment::CopyForConditional() {
+ LivenessAnalyzerBlock* copy_liveness_block = nullptr;
+ if (liveness_block() != nullptr) {
+ copy_liveness_block =
+ builder_->liveness_analyzer()->NewBlock(liveness_block());
+ liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
+ }
+ return new (zone()) Environment(this, copy_liveness_block);
+}
+
+
+AstGraphBuilder::Environment*
+AstGraphBuilder::Environment::CopyAsUnreachable() {
+ Environment* env = new (zone()) Environment(this, nullptr);
+ env->MarkAsUnreachable();
+ return env;
+}
+
+
AstGraphBuilder::Environment*
AstGraphBuilder::Environment::CopyAndShareLiveness() {
- Environment* env = new (zone()) Environment(this);
- if (FLAG_analyze_environment_liveness) {
- env->liveness_block_ = liveness_block();
+ if (liveness_block() != nullptr) {
+ // Finish the current liveness block before copying.
+ liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
}
+ Environment* env = new (zone()) Environment(this, liveness_block());
return env;
}
+AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForLoop(
+ BitVector* assigned, bool is_osr) {
+ PrepareForLoop(assigned, is_osr);
+ return CopyAndShareLiveness();
+}
+
+
void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
int offset, int count) {
bool should_update = false;
@@ -754,18 +856,34 @@ Node* AstGraphBuilder::Environment::Checkpoint(
UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
stack_height());
- const Operator* op = common()->FrameState(JS_FRAME, ast_id, combine);
+ const Operator* op = common()->FrameState(
+ ast_id, combine, builder()->frame_state_function_info());
Node* result = graph()->NewNode(op, parameters_node_, locals_node_,
stack_node_, builder()->current_context(),
- builder()->jsgraph()->UndefinedConstant());
- if (FLAG_analyze_environment_liveness) {
+ builder()->GetFunctionClosure(),
+ builder()->graph()->start());
+
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
liveness_block()->Checkpoint(result);
}
return result;
}
+bool AstGraphBuilder::Environment::IsLivenessAnalysisEnabled() {
+ return FLAG_analyze_environment_liveness &&
+ builder()->info()->is_deoptimization_enabled();
+}
+
+
+bool AstGraphBuilder::Environment::IsLivenessBlockConsistent() {
+ return (!IsLivenessAnalysisEnabled() || IsMarkedAsUnreachable()) ==
+ (liveness_block() == nullptr);
+}
+
+
AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
Expression::Context kind)
: kind_(kind), owner_(own), outer_(own->ast_context()) {
@@ -938,7 +1056,8 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
VariableMode mode = decl->mode();
bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Handle<Oddball> value = variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value();
@@ -946,21 +1065,21 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
globals()->push_back(value);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Node* value = jsgraph()->TheHoleConstant();
environment()->Bind(variable, value);
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Node* value = jsgraph()->TheHoleConstant();
const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, current_context(), value);
}
break;
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
UNIMPLEMENTED();
}
}
@@ -969,30 +1088,31 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Variable* variable = decl->proxy()->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(decl->fun(), info()->script(), info());
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
+ decl->fun(), info()->script(), info());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals()->push_back(variable->name());
globals()->push_back(function);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
VisitForValue(decl->fun());
Node* value = environment()->Pop();
environment()->Bind(variable, value);
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
VisitForValue(decl->fun());
Node* value = environment()->Pop();
const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, current_context(), value);
break;
}
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
UNIMPLEMENTED();
}
}
@@ -1075,10 +1195,9 @@ void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
VisitForValue(stmt->expression());
Node* value = environment()->Pop();
const Operator* op = javascript()->CreateWithContext();
- Node* context = NewNode(op, value, GetFunctionClosure());
+ Node* context = NewNode(op, value, GetFunctionClosureForContext());
PrepareFrameState(context, stmt->EntryId());
- ContextScope scope(this, stmt->scope(), context);
- Visit(stmt->statement());
+ VisitInScope(stmt->statement(), stmt->scope(), context);
}
@@ -1176,158 +1295,83 @@ void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
}
-// TODO(dcarney): this is a big function. Try to clean up some.
void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
VisitForValue(stmt->subject());
- Node* obj = environment()->Pop();
- // Check for undefined or null before entering loop.
- IfBuilder is_undefined(this);
- Node* is_undefined_cond =
- NewNode(javascript()->StrictEqual(), obj, jsgraph()->UndefinedConstant());
- is_undefined.If(is_undefined_cond);
- is_undefined.Then();
- is_undefined.Else();
+ Node* object = environment()->Pop();
+ BlockBuilder for_block(this);
+ for_block.BeginBlock();
+ // Check for null or undefined before entering loop.
+ Node* is_null_cond =
+ NewNode(javascript()->StrictEqual(), object, jsgraph()->NullConstant());
+ for_block.BreakWhen(is_null_cond, BranchHint::kFalse);
+ Node* is_undefined_cond = NewNode(javascript()->StrictEqual(), object,
+ jsgraph()->UndefinedConstant());
+ for_block.BreakWhen(is_undefined_cond, BranchHint::kFalse);
{
- IfBuilder is_null(this);
- Node* is_null_cond =
- NewNode(javascript()->StrictEqual(), obj, jsgraph()->NullConstant());
- is_null.If(is_null_cond);
- is_null.Then();
- is_null.Else();
// Convert object to jsobject.
- // PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
- obj = NewNode(javascript()->ToObject(), obj);
- PrepareFrameState(obj, stmt->ToObjectId(), OutputFrameStateCombine::Push());
- environment()->Push(obj);
- // TODO(dcarney): should do a fast enum cache check here to skip runtime.
- Node* cache_type = NewNode(
- javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), obj);
- PrepareFrameState(cache_type, stmt->EnumId(),
- OutputFrameStateCombine::Push());
- // TODO(dcarney): these next runtime calls should be removed in favour of
- // a few simplified instructions.
- Node* cache_pair = NewNode(
- javascript()->CallRuntime(Runtime::kForInInit, 2), obj, cache_type);
- // cache_type may have been replaced.
- Node* cache_array = NewNode(common()->Projection(0), cache_pair);
- cache_type = NewNode(common()->Projection(1), cache_pair);
- Node* cache_length =
- NewNode(javascript()->CallRuntime(Runtime::kForInCacheArrayLength, 2),
- cache_type, cache_array);
+ object = BuildToObject(object, stmt->ToObjectId());
+ environment()->Push(object);
+
+ // Prepare for-in cache.
+ Node* prepare = NewNode(javascript()->ForInPrepare(), object);
+ PrepareFrameState(prepare, stmt->EnumId(), OutputFrameStateCombine::Push());
+ Node* cache_type = NewNode(common()->Projection(0), prepare);
+ Node* cache_array = NewNode(common()->Projection(1), prepare);
+ Node* cache_length = NewNode(common()->Projection(2), prepare);
+
+ // Construct the rest of the environment.
+ environment()->Push(cache_type);
+ environment()->Push(cache_array);
+ environment()->Push(cache_length);
+ environment()->Push(jsgraph()->ZeroConstant());
+
+ // Build the actual loop body.
+ LoopBuilder for_loop(this);
+ for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
{
- // TODO(dcarney): this check is actually supposed to be for the
- // empty enum case only.
- IfBuilder have_no_properties(this);
- Node* empty_array_cond = NewNode(javascript()->StrictEqual(),
- cache_length, jsgraph()->ZeroConstant());
- have_no_properties.If(empty_array_cond);
- have_no_properties.Then();
- // Pop obj and skip loop.
- environment()->Pop();
- have_no_properties.Else();
+ // These stack values are renamed in the case of OSR, so reload them
+ // from the environment.
+ Node* index = environment()->Peek(0);
+ Node* cache_length = environment()->Peek(1);
+ Node* cache_array = environment()->Peek(2);
+ Node* cache_type = environment()->Peek(3);
+ Node* object = environment()->Peek(4);
+
+ // Check loop termination condition.
+ Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
+ for_loop.BreakWhen(exit_cond);
+
+ // Compute the next enumerated value.
+ Node* value = NewNode(javascript()->ForInNext(), object, cache_array,
+ cache_type, index);
+ PrepareFrameState(value, stmt->FilterId(),
+ OutputFrameStateCombine::Push());
+ IfBuilder test_value(this);
+ Node* test_value_cond = NewNode(javascript()->StrictEqual(), value,
+ jsgraph()->UndefinedConstant());
+ test_value.If(test_value_cond, BranchHint::kFalse);
+ test_value.Then();
+ test_value.Else();
{
- // Construct the rest of the environment.
- environment()->Push(cache_type);
- environment()->Push(cache_array);
- environment()->Push(cache_length);
- environment()->Push(jsgraph()->ZeroConstant());
-
- // Build the actual loop body.
- VisitForInBody(stmt);
+ // Bind value and do loop body.
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(stmt->EachFeedbackSlot());
+ VisitForInAssignment(stmt->each(), value, feedback,
+ stmt->AssignmentId());
+ VisitIterationBody(stmt, &for_loop);
}
- have_no_properties.End();
+ test_value.End();
+ index = environment()->Peek(0);
+ for_loop.EndBody();
+
+ // Increment counter and continue.
+ index = NewNode(javascript()->ForInStep(), index);
+ environment()->Poke(0, index);
}
- is_null.End();
+ for_loop.EndLoop();
+ environment()->Drop(5);
}
- is_undefined.End();
-}
-
-
-// TODO(dcarney): this is a big function. Try to clean up some.
-void AstGraphBuilder::VisitForInBody(ForInStatement* stmt) {
- LoopBuilder for_loop(this);
- for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
-
- // These stack values are renamed in the case of OSR, so reload them
- // from the environment.
- Node* index = environment()->Peek(0);
- Node* cache_length = environment()->Peek(1);
- Node* cache_array = environment()->Peek(2);
- Node* cache_type = environment()->Peek(3);
- Node* obj = environment()->Peek(4);
-
- // Check loop termination condition.
- Node* exit_cond = NewNode(javascript()->LessThan(LanguageMode::SLOPPY),
- index, cache_length);
- // TODO(jarin): provide real bailout id.
- PrepareFrameState(exit_cond, BailoutId::None());
- for_loop.BreakUnless(exit_cond);
- Node* pair = NewNode(javascript()->CallRuntime(Runtime::kForInNext, 4), obj,
- cache_array, cache_type, index);
- Node* value = NewNode(common()->Projection(0), pair);
- Node* should_filter = NewNode(common()->Projection(1), pair);
- environment()->Push(value);
- {
- // Test if FILTER_KEY needs to be called.
- IfBuilder test_should_filter(this);
- Node* should_filter_cond = NewNode(
- javascript()->StrictEqual(), should_filter, jsgraph()->TrueConstant());
- test_should_filter.If(should_filter_cond);
- test_should_filter.Then();
- value = environment()->Pop();
- Node* builtins = BuildLoadBuiltinsObject();
- Node* function = BuildLoadObjectField(
- builtins,
- JSBuiltinsObject::OffsetOfFunctionWithId(Builtins::FILTER_KEY));
- // result is either the string key or Smi(0) indicating the property
- // is gone.
- Node* res = NewNode(
- javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS, language_mode()),
- function, obj, value);
- PrepareFrameState(res, stmt->FilterId(), OutputFrameStateCombine::Push());
- Node* property_missing =
- NewNode(javascript()->StrictEqual(), res, jsgraph()->ZeroConstant());
- {
- IfBuilder is_property_missing(this);
- is_property_missing.If(property_missing);
- is_property_missing.Then();
- // Inc counter and continue.
- Node* index_inc =
- NewNode(javascript()->Add(LanguageMode::SLOPPY), index,
- jsgraph()->OneConstant());
- // TODO(jarin): provide real bailout id.
- PrepareFrameStateAfterAndBefore(index_inc, BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- jsgraph()->EmptyFrameState());
- environment()->Poke(0, index_inc);
- for_loop.Continue();
- is_property_missing.Else();
- is_property_missing.End();
- }
- // Replace 'value' in environment.
- environment()->Push(res);
- test_should_filter.Else();
- test_should_filter.End();
- }
- value = environment()->Pop();
- // Bind value and do loop body.
- VisitForInAssignment(stmt->each(), value, stmt->AssignmentId());
- VisitIterationBody(stmt, &for_loop);
- index = environment()->Peek(0);
- for_loop.EndBody();
-
- // Inc counter and continue.
- Node* index_inc =
- NewNode(javascript()->Add(LanguageMode::SLOPPY), index,
- jsgraph()->OneConstant());
- // TODO(jarin): provide real bailout id.
- PrepareFrameStateAfterAndBefore(index_inc, BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- jsgraph()->EmptyFrameState());
- environment()->Poke(0, index_inc);
- for_loop.EndLoop();
- environment()->Drop(5);
- // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ for_block.EndBlock();
}
@@ -1348,6 +1392,8 @@ void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
TryCatchBuilder try_control(this);
+ ExternalReference message_object =
+ ExternalReference::address_of_pending_message_obj(isolate());
// Evaluate the try-block inside a control scope. This simulates a handler
// that is intercepting 'throw' control commands.
@@ -1361,28 +1407,32 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
}
try_control.EndTry();
+ // TODO(mstarzinger): We are only using a runtime call to get a lazy bailout
+ // point, there is no need to really emit an actual call. Optimize this!
+ Node* guard = NewNode(javascript()->CallRuntime(Runtime::kMaxSmi, 0));
+ PrepareFrameState(guard, stmt->HandlerId());
+
+ // Clear message object as we enter the catch block.
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ BuildStoreExternal(message_object, kMachAnyTagged, the_hole);
+
// Create a catch scope that binds the exception.
Node* exception = try_control.GetExceptionNode();
Unique<String> name = MakeUnique(stmt->variable()->name());
const Operator* op = javascript()->CreateCatchContext(name);
- Node* context = NewNode(op, exception, GetFunctionClosure());
- PrepareFrameState(context, BailoutId::None());
- {
- ContextScope scope(this, stmt->scope(), context);
- DCHECK(stmt->scope()->declarations()->is_empty());
- // Evaluate the catch-block.
- Visit(stmt->catch_block());
- }
+ Node* context = NewNode(op, exception, GetFunctionClosureForContext());
+
+ // Evaluate the catch-block.
+ VisitInScope(stmt->catch_block(), stmt->scope(), context);
try_control.EndCatch();
// TODO(mstarzinger): Remove bailout once everything works.
- if (!FLAG_turbo_exceptions) SetStackOverflow();
+ if (!FLAG_turbo_try_catch) SetStackOverflow();
}
void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
TryFinallyBuilder try_control(this);
-
ExternalReference message_object =
ExternalReference::address_of_pending_message_obj(isolate());
@@ -1411,6 +1461,11 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
}
try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
+ // TODO(mstarzinger): We are only using a runtime call to get a lazy bailout
+ // point, there is no need to really emit an actual call. Optimize this!
+ Node* guard = NewNode(javascript()->CallRuntime(Runtime::kMaxSmi, 0));
+ PrepareFrameState(guard, stmt->HandlerId());
+
// The result value semantics depend on how the block was entered:
// - ReturnStatement: It represents the return value being returned.
// - ThrowStatement: It represents the exception being thrown.
@@ -1426,6 +1481,10 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
environment()->Push(result);
environment()->Push(message);
+ // Clear message object as we enter the finally block.
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ BuildStoreExternal(message_object, kMachAnyTagged, the_hole);
+
// Evaluate the finally-block.
Visit(stmt->finally_block());
try_control.EndFinally();
@@ -1441,7 +1500,7 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
commands->ApplyDeferredCommands(token, result);
// TODO(mstarzinger): Remove bailout once everything works.
- if (!FLAG_turbo_exceptions) SetStackOverflow();
+ if (!FLAG_turbo_try_finally) SetStackOverflow();
}
@@ -1455,14 +1514,10 @@ void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
Node* context = current_context();
- // Build a new shared function info if we cannot find one in the baseline
- // code. We also have a stack overflow if the recursive compilation did.
- expr->InitializeSharedInfo(handle(info()->shared_info()->code()));
- Handle<SharedFunctionInfo> shared_info = expr->shared_info();
- if (shared_info.is_null()) {
- shared_info = Compiler::BuildFunctionInfo(expr, info()->script(), info());
- CHECK(!shared_info.is_null()); // TODO(mstarzinger): Set stack overflow?
- }
+ // Find or build a shared function info.
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
+ CHECK(!shared_info.is_null()); // TODO(mstarzinger): Set stack overflow?
// Create node to instantiate a new closure.
PretenureFlag pretenure = expr->pretenure() ? TENURED : NOT_TENURED;
@@ -1523,6 +1578,7 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
environment()->Push(proto);
// Create nodes to store method values into the literal.
+ int store_slot_index = 0;
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
environment()->Push(property->is_static() ? literal : proto);
@@ -1545,7 +1601,9 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
Node* value = environment()->Pop();
Node* key = environment()->Pop();
Node* receiver = environment()->Pop();
- BuildSetHomeObject(value, receiver, property->value());
+ VectorSlotPair feedback = CreateVectorSlotPair(
+ expr->SlotForHomeObject(property->value(), &store_slot_index));
+ BuildSetHomeObject(value, receiver, property->value(), feedback);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1584,7 +1642,12 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
if (expr->scope() != NULL) {
DCHECK_NOT_NULL(expr->class_variable_proxy());
Variable* var = expr->class_variable_proxy()->var();
- BuildVariableAssignment(var, literal, Token::INIT_CONST, BailoutId::None());
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ VectorSlotPair feedback = CreateVectorSlotPair(
+ FLAG_vector_stores ? expr->GetNthSlot(store_slot_index++)
+ : FeedbackVectorICSlot::Invalid());
+ BuildVariableAssignment(var, literal, Token::INIT_CONST, feedback,
+ BailoutId::None(), states);
}
ast_context()->ProduceValue(literal);
@@ -1612,7 +1675,9 @@ void AstGraphBuilder::VisitConditional(Conditional* expr) {
void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
VectorSlotPair pair = CreateVectorSlotPair(expr->VariableFeedbackSlot());
- Node* value = BuildVariableLoad(expr->var(), expr->id(), pair);
+ FrameStateBeforeAndAfter states(this, BeforeId(expr));
+ Node* value = BuildVariableLoad(expr->var(), expr->id(), states, pair,
+ ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -1644,7 +1709,6 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
- expr->BuildConstantProperties(isolate());
Node* literals_array =
BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* literal_index = jsgraph()->Constant(expr->literal_index());
@@ -1659,13 +1723,9 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// property values and is the value of the entire expression.
environment()->Push(literal);
- // Mark all computed expressions that are bound to a key that is shadowed by
- // a later occurrence of the same key. For the marked expressions, no store
- // code is emitted.
- expr->CalculateEmitStore(zone());
-
// Create nodes to store computed values into the literal.
int property_index = 0;
+ int store_slot_index = 0;
AccessorTable accessor_table(zone());
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
@@ -1685,12 +1745,21 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForValue(property->value());
+ FrameStateBeforeAndAfter states(this, property->value()->id());
Node* value = environment()->Pop();
Handle<Name> name = key->AsPropertyName();
- Node* store =
- BuildNamedStore(literal, name, value, TypeFeedbackId::None());
- PrepareFrameState(store, key->id());
- BuildSetHomeObject(value, literal, property->value());
+ VectorSlotPair feedback =
+ FLAG_vector_stores
+ ? CreateVectorSlotPair(expr->GetNthSlot(store_slot_index++))
+ : VectorSlotPair();
+ Node* store = BuildNamedStore(literal, name, value, feedback,
+ TypeFeedbackId::None());
+ states.AddToNode(store, key->id(),
+ OutputFrameStateCombine::Ignore());
+ VectorSlotPair home_feedback = CreateVectorSlotPair(
+ expr->SlotForHomeObject(property->value(), &store_slot_index));
+ BuildSetHomeObject(value, literal, property->value(),
+ home_feedback);
} else {
VisitForEffect(property->value());
}
@@ -1706,8 +1775,12 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* language = jsgraph()->Constant(SLOPPY);
const Operator* op =
javascript()->CallRuntime(Runtime::kSetProperty, 4);
- NewNode(op, receiver, key, value, language);
- BuildSetHomeObject(value, receiver, property->value());
+ Node* set_property = NewNode(op, receiver, key, value, language);
+ // SetProperty should not lazy deopt on an object literal.
+ PrepareFrameState(set_property, BailoutId::None());
+ VectorSlotPair home_feedback = CreateVectorSlotPair(
+ expr->SlotForHomeObject(property->value(), &store_slot_index));
+ BuildSetHomeObject(value, receiver, property->value(), home_feedback);
}
break;
}
@@ -1743,9 +1816,15 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
it != accessor_table.end(); ++it) {
VisitForValue(it->first);
VisitForValueOrNull(it->second->getter);
- BuildSetHomeObject(environment()->Top(), literal, it->second->getter);
+ VectorSlotPair feedback_getter = CreateVectorSlotPair(
+ expr->SlotForHomeObject(it->second->getter, &store_slot_index));
+ BuildSetHomeObject(environment()->Top(), literal, it->second->getter,
+ feedback_getter);
VisitForValueOrNull(it->second->setter);
- BuildSetHomeObject(environment()->Top(), literal, it->second->setter);
+ VectorSlotPair feedback_setter = CreateVectorSlotPair(
+ expr->SlotForHomeObject(it->second->setter, &store_slot_index));
+ BuildSetHomeObject(environment()->Top(), literal, it->second->setter,
+ feedback_setter);
Node* setter = environment()->Pop();
Node* getter = environment()->Pop();
Node* name = environment()->Pop();
@@ -1769,20 +1848,30 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ environment()->Push(literal); // Duplicate receiver.
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+ Node* call = NewNode(op, receiver, value);
+ PrepareFrameState(call, BailoutId::None());
+ continue;
+ }
+
environment()->Push(literal); // Duplicate receiver.
VisitForValue(property->key());
Node* name = BuildToName(environment()->Pop(),
expr->GetIdForProperty(property_index));
environment()->Push(name);
- // TODO(mstarzinger): For ObjectLiteral::Property::PROTOTYPE the key should
- // not be on the operand stack while the value is being evaluated. Come up
- // with a repro for this and fix it. Also find a nice way to do so. :)
VisitForValue(property->value());
Node* value = environment()->Pop();
Node* key = environment()->Pop();
Node* receiver = environment()->Pop();
- BuildSetHomeObject(value, receiver, property->value());
-
+ VectorSlotPair feedback = CreateVectorSlotPair(
+ expr->SlotForHomeObject(property->value(), &store_slot_index));
+ BuildSetHomeObject(value, receiver, property->value(), feedback);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::COMPUTED:
@@ -1794,13 +1883,9 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareFrameState(call, BailoutId::None());
break;
}
- case ObjectLiteral::Property::PROTOTYPE: {
- const Operator* op =
- javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
- Node* call = NewNode(op, receiver, value);
- PrepareFrameState(call, BailoutId::None());
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE(); // Handled specially above.
break;
- }
case ObjectLiteral::Property::GETTER: {
Node* attr = jsgraph()->Constant(NONE);
const Operator* op = javascript()->CallRuntime(
@@ -1827,6 +1912,10 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
NewNode(op, literal);
}
+ // Verify that compilation exactly consumed the number of store ic slots that
+ // the ObjectLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
+
ast_context()->ProduceValue(environment()->Pop());
}
@@ -1853,65 +1942,131 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// Create nodes to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < expr->values()->length(); i++) {
- Expression* subexpr = expr->values()->at(i);
+ int array_index = 0;
+ for (; array_index < expr->values()->length(); array_index++) {
+ Expression* subexpr = expr->values()->at(array_index);
+ if (subexpr->IsSpread()) break;
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
VisitForValue(subexpr);
- Node* frame_state_before = environment()->Checkpoint(
- subexpr->id(), OutputFrameStateCombine::PokeAt(0));
- Node* value = environment()->Pop();
- Node* index = jsgraph()->Constant(i);
- Node* store =
- BuildKeyedStore(literal, index, value, TypeFeedbackId::None());
- PrepareFrameStateAfterAndBefore(store, expr->GetIdForElement(i),
- OutputFrameStateCombine::Ignore(),
- frame_state_before);
+ {
+ FrameStateBeforeAndAfter states(this, subexpr->id());
+ Node* value = environment()->Pop();
+ Node* index = jsgraph()->Constant(array_index);
+ Node* store = BuildKeyedStore(literal, index, value, VectorSlotPair(),
+ TypeFeedbackId::None());
+ states.AddToNode(store, expr->GetIdForElement(array_index),
+ OutputFrameStateCombine::Ignore());
+ }
}
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
environment()->Pop(); // Array literal index.
+ for (; array_index < expr->values()->length(); array_index++) {
+ Expression* subexpr = expr->values()->at(array_index);
+ Node* array = environment()->Pop();
+ Node* result;
+
+ if (subexpr->IsSpread()) {
+ VisitForValue(subexpr->AsSpread()->expression());
+ Node* iterable = environment()->Pop();
+ Node* builtins = BuildLoadBuiltinsObject();
+ Node* function = BuildLoadObjectField(
+ builtins, JSBuiltinsObject::OffsetOfFunctionWithId(
+ Builtins::CONCAT_ITERABLE_TO_ARRAY));
+ result = NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS,
+ language_mode()),
+ function, array, iterable);
+ } else {
+ VisitForValue(subexpr);
+ Node* value = environment()->Pop();
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kAppendElement, 2);
+ result = NewNode(op, array, value);
+ }
+
+ PrepareFrameState(result, expr->GetIdForElement(array_index));
+ environment()->Push(result);
+ }
+
ast_context()->ProduceValue(environment()->Pop());
}
void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
+ const VectorSlotPair& feedback,
BailoutId bailout_id) {
DCHECK(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->AsProperty();
- LhsKind assign_type = DetermineLhsKind(expr);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression and store the value.
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
- BuildVariableAssignment(var, value, Token::ASSIGN, bailout_id);
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(var, value, Token::ASSIGN, feedback, bailout_id,
+ states);
break;
}
case NAMED_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
Node* object = environment()->Pop();
value = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store =
- BuildNamedStore(object, name, value, TypeFeedbackId::None());
- PrepareFrameState(store, bailout_id);
+ Node* store = BuildNamedStore(object, name, value, feedback,
+ TypeFeedbackId::None());
+ states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
break;
}
case KEYED_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj());
VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, TypeFeedbackId::None());
- // TODO(jarin) Provide a real frame state before.
- PrepareFrameStateAfterAndBefore(store, bailout_id,
- OutputFrameStateCombine::Ignore(),
- jsgraph()->EmptyFrameState());
+ Node* store =
+ BuildKeyedStore(object, key, value, feedback, TypeFeedbackId::None());
+ states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ environment()->Push(value);
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ value = environment()->Pop();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value,
+ TypeFeedbackId::None());
+ states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ environment()->Push(value);
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ Node* key = environment()->Pop();
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ value = environment()->Pop();
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value,
+ TypeFeedbackId::None());
+ states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
break;
}
}
@@ -1923,27 +2078,41 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = DetermineLhsKind(expr->target());
+ LhsKind assign_type = Property::GetAssignType(property);
+ bool needs_frame_state_before = true;
// Evaluate LHS expression.
switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
+ case VARIABLE: {
+ Variable* variable = expr->target()->AsVariableProxy()->var();
+ if (variable->location() == VariableLocation::PARAMETER ||
+ variable->location() == VariableLocation::LOCAL ||
+ variable->location() == VariableLocation::CONTEXT) {
+ needs_frame_state_before = false;
+ }
break;
+ }
case NAMED_PROPERTY:
VisitForValue(property->obj());
break;
- case KEYED_PROPERTY: {
+ case KEYED_PROPERTY:
VisitForValue(property->obj());
VisitForValue(property->key());
break;
- }
+ case NAMED_SUPER_PROPERTY:
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ break;
+ case KEYED_SUPER_PROPERTY:
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ VisitForValue(property->key());
+ break;
}
+ BailoutId before_store_id = BailoutId::None();
// Evaluate the value and potentially handle compound assignments by loading
// the left-hand side value and performing a binary operation.
- Node* frame_state_before_store = nullptr;
- bool needs_frame_state_before = (assign_type == KEYED_PROPERTY);
if (expr->is_compound()) {
Node* old_value = NULL;
switch (assign_type) {
@@ -1951,7 +2120,10 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
VectorSlotPair pair =
CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- old_value = BuildVariableLoad(proxy->var(), expr->target()->id(), pair);
+ FrameStateBeforeAndAfter states(this, BeforeId(proxy));
+ old_value =
+ BuildVariableLoad(proxy->var(), expr->target()->id(), states, pair,
+ OutputFrameStateCombine::Push());
break;
}
case NAMED_PROPERTY: {
@@ -1959,10 +2131,10 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value =
- BuildNamedLoad(object, name, pair, property->PropertyFeedbackId());
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ old_value = BuildNamedLoad(object, name, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
@@ -1970,65 +2142,106 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value =
- BuildKeyedLoad(object, key, pair, property->PropertyFeedbackId());
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ old_value = BuildKeyedLoad(object, key, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ Node* home_object = environment()->Top();
+ Node* receiver = environment()->Peek(1);
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ VectorSlotPair pair =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ Node* key = environment()->Top();
+ Node* home_object = environment()->Peek(1);
+ Node* receiver = environment()->Peek(2);
+ VectorSlotPair pair =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
}
environment()->Push(old_value);
VisitForValue(expr->value());
- Node* frame_state_before = environment()->Checkpoint(expr->value()->id());
- Node* right = environment()->Pop();
- Node* left = environment()->Pop();
- Node* value = BuildBinaryOp(left, right, expr->binary_op());
- PrepareFrameStateAfterAndBefore(value, expr->binary_operation()->id(),
- OutputFrameStateCombine::Push(),
- frame_state_before);
+ Node* value;
+ {
+ FrameStateBeforeAndAfter states(this, expr->value()->id());
+ Node* right = environment()->Pop();
+ Node* left = environment()->Pop();
+ value = BuildBinaryOp(left, right, expr->binary_op());
+ states.AddToNode(value, expr->binary_operation()->id(),
+ OutputFrameStateCombine::Push());
+ }
environment()->Push(value);
if (needs_frame_state_before) {
- frame_state_before_store = environment()->Checkpoint(
- expr->binary_operation()->id(), OutputFrameStateCombine::PokeAt(0));
+ before_store_id = expr->binary_operation()->id();
}
} else {
VisitForValue(expr->value());
if (needs_frame_state_before) {
- // This frame state can be used for lazy-deopting from a to-number
- // conversion if we are storing into a typed array. It is important
- // that the frame state is usable for such lazy deopt (i.e., it has
- // to specify how to override the value before the conversion, in this
- // case, it overwrites the stack top).
- frame_state_before_store = environment()->Checkpoint(
- expr->value()->id(), OutputFrameStateCombine::PokeAt(0));
+ before_store_id = expr->value()->id();
}
}
+ FrameStateBeforeAndAfter store_states(this, before_store_id);
// Store the value.
Node* value = environment()->Pop();
+ VectorSlotPair feedback = CreateVectorSlotPair(expr->AssignmentSlot());
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->target()->AsVariableProxy()->var();
- BuildVariableAssignment(variable, value, expr->op(), expr->id(),
- ast_context()->GetStateCombine());
+ BuildVariableAssignment(variable, value, expr->op(), feedback, expr->id(),
+ store_states, ast_context()->GetStateCombine());
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store =
- BuildNamedStore(object, name, value, expr->AssignmentFeedbackId());
- PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
+ Node* store = BuildNamedStore(object, name, value, feedback,
+ expr->AssignmentFeedbackId());
+ store_states.AddToNode(store, expr->id(),
+ ast_context()->GetStateCombine());
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store =
- BuildKeyedStore(object, key, value, expr->AssignmentFeedbackId());
- PrepareFrameStateAfterAndBefore(store, expr->id(),
- ast_context()->GetStateCombine(),
- frame_state_before_store);
+ Node* store = BuildKeyedStore(object, key, value, feedback,
+ expr->AssignmentFeedbackId());
+ store_states.AddToNode(store, expr->id(),
+ ast_context()->GetStateCombine());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value,
+ expr->AssignmentFeedbackId());
+ store_states.AddToNode(store, expr->id(),
+ ast_context()->GetStateCombine());
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ Node* key = environment()->Pop();
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value,
+ expr->AssignmentFeedbackId());
+ store_states.AddToNode(store, expr->id(),
+ ast_context()->GetStateCombine());
break;
}
}
@@ -2053,21 +2266,56 @@ void AstGraphBuilder::VisitThrow(Throw* expr) {
void AstGraphBuilder::VisitProperty(Property* expr) {
- Node* value;
+ Node* value = nullptr;
+ LhsKind property_kind = Property::GetAssignType(expr);
VectorSlotPair pair = CreateVectorSlotPair(expr->PropertyFeedbackSlot());
- if (expr->key()->IsPropertyName()) {
- VisitForValue(expr->obj());
- Node* object = environment()->Pop();
- Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
- value = BuildNamedLoad(object, name, pair, expr->PropertyFeedbackId());
- } else {
- VisitForValue(expr->obj());
- VisitForValue(expr->key());
- Node* key = environment()->Pop();
- Node* object = environment()->Pop();
- value = BuildKeyedLoad(object, key, pair, expr->PropertyFeedbackId());
+ switch (property_kind) {
+ case VARIABLE:
+ UNREACHABLE();
+ break;
+ case NAMED_PROPERTY: {
+ VisitForValue(expr->obj());
+ FrameStateBeforeAndAfter states(this, expr->obj()->id());
+ Node* object = environment()->Pop();
+ Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
+ value = BuildNamedLoad(object, name, pair);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ VisitForValue(expr->obj());
+ VisitForValue(expr->key());
+ FrameStateBeforeAndAfter states(this, expr->key()->id());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Pop();
+ value = BuildKeyedLoad(object, key, pair);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
+ FrameStateBeforeAndAfter states(this, expr->obj()->id());
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
+ value = BuildNamedSuperLoad(receiver, home_object, name, pair);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
+ VisitForValue(expr->key());
+ FrameStateBeforeAndAfter states(this, expr->key()->id());
+ Node* key = environment()->Pop();
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ break;
+ }
}
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -2086,14 +2334,16 @@ void AstGraphBuilder::VisitCall(Call* expr) {
case Call::GLOBAL_CALL: {
VariableProxy* proxy = callee->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+ FrameStateBeforeAndAfter states(this, BeforeId(proxy));
callee_value =
- BuildVariableLoad(proxy->var(), expr->expression()->id(), pair);
+ BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
+ pair, OutputFrameStateCombine::Push());
receiver_value = jsgraph()->UndefinedConstant();
break;
}
case Call::LOOKUP_SLOT_CALL: {
Variable* variable = callee->AsVariableProxy()->var();
- DCHECK(variable->location() == Variable::LOOKUP);
+ DCHECK(variable->location() == VariableLocation::LOOKUP);
Node* name = jsgraph()->Constant(variable->name());
const Operator* op =
javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
@@ -2101,33 +2351,64 @@ void AstGraphBuilder::VisitCall(Call* expr) {
callee_value = NewNode(common()->Projection(0), pair);
receiver_value = NewNode(common()->Projection(1), pair);
- PrepareFrameState(pair, expr->EvalOrLookupId(),
+ PrepareFrameState(pair, expr->LookupId(),
OutputFrameStateCombine::Push(2));
break;
}
case Call::PROPERTY_CALL: {
Property* property = callee->AsProperty();
- VisitForValue(property->obj());
- Node* object = environment()->Top();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- if (property->key()->IsPropertyName()) {
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- callee_value =
- BuildNamedLoad(object, name, pair, property->PropertyFeedbackId());
+ if (!property->IsSuperAccess()) {
+ VisitForValue(property->obj());
+ Node* object = environment()->Top();
+
+ if (property->key()->IsPropertyName()) {
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ callee_value = BuildNamedLoad(object, name, pair);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ } else {
+ VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ Node* key = environment()->Pop();
+ callee_value = BuildKeyedLoad(object, key, pair);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ }
+ receiver_value = environment()->Pop();
+ // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
+ // object for sloppy callees. This could also be modeled explicitly
+ // here,
+ // thereby obsoleting the need for a flag to the call operator.
+ flags = CALL_AS_METHOD;
+
} else {
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- callee_value =
- BuildKeyedLoad(object, key, pair, property->PropertyFeedbackId());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
+ Node* home_object = environment()->Pop();
+ receiver_value = environment()->Pop();
+ if (property->key()->IsPropertyName()) {
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ callee_value =
+ BuildNamedSuperLoad(receiver_value, home_object, name, pair);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+
+ } else {
+ VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ Node* key = environment()->Pop();
+ callee_value =
+ BuildKeyedSuperLoad(receiver_value, home_object, key, pair);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ }
}
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- receiver_value = environment()->Pop();
- // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
- // object for sloppy callees. This could also be modeled explicitly here,
- // thereby obsoleting the need for a flag to the call operator.
- flags = CALL_AS_METHOD;
+
break;
}
case Call::SUPER_CALL:
@@ -2138,6 +2419,18 @@ void AstGraphBuilder::VisitCall(Call* expr) {
break;
case Call::POSSIBLY_EVAL_CALL:
possibly_eval = true;
+ if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
+ Variable* variable = callee->AsVariableProxy()->var();
+ Node* name = jsgraph()->Constant(variable->name());
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Node* pair = NewNode(op, current_context(), name);
+ callee_value = NewNode(common()->Projection(0), pair);
+ receiver_value = NewNode(common()->Projection(1), pair);
+ PrepareFrameState(pair, expr->LookupId(),
+ OutputFrameStateCombine::Push(2));
+ break;
+ }
// Fall through.
case Call::OTHER_CALL:
VisitForValue(callee);
@@ -2167,26 +2460,23 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// Create node to ask for help resolving potential eval call. This will
// provide a fully resolved callee and the corresponding receiver.
Node* function = GetFunctionClosure();
- Node* receiver = environment()->Lookup(info()->scope()->receiver());
Node* language = jsgraph()->Constant(language_mode());
- Node* position = jsgraph()->Constant(info()->scope()->start_position());
+ Node* position = jsgraph()->Constant(current_scope()->start_position());
const Operator* op =
- javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
- Node* pair =
- NewNode(op, callee, source, function, receiver, language, position);
- PrepareFrameState(pair, expr->EvalOrLookupId(),
+ javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ Node* new_callee =
+ NewNode(op, callee, source, function, language, position);
+ PrepareFrameState(new_callee, expr->EvalId(),
OutputFrameStateCombine::PokeAt(arg_count + 1));
- Node* new_callee = NewNode(common()->Projection(0), pair);
- Node* new_receiver = NewNode(common()->Projection(1), pair);
- // Patch callee and receiver on the environment.
+ // Patch callee on the environment.
environment()->Poke(arg_count + 1, new_callee);
- environment()->Poke(arg_count + 0, new_receiver);
}
// Create node to perform the function call.
- const Operator* call =
- javascript()->CallFunction(args->length() + 2, flags, language_mode());
+ VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
+ const Operator* call = javascript()->CallFunction(args->length() + 2, flags,
+ language_mode(), feedback);
Node* value = ProcessArguments(call, args->length() + 2);
PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
@@ -2216,12 +2506,11 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
Node* receiver_value = BuildLoadBuiltinsObject();
VectorSlotPair pair = CreateVectorSlotPair(expr->CallRuntimeFeedbackSlot());
- Node* callee_value =
- BuildNamedLoad(receiver_value, name, pair, expr->CallRuntimeFeedbackId());
- // TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft
- // refuses to optimize functions with jsruntime calls).
- PrepareFrameState(callee_value, BailoutId::None(),
- OutputFrameStateCombine::Push());
+ // TODO(jarin): bailout ids for runtime calls.
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ Node* callee_value = BuildNamedLoad(receiver_value, name, pair);
+ states.AddToNode(callee_value, BailoutId::None(),
+ OutputFrameStateCombine::Push());
environment()->Push(callee_value);
environment()->Push(receiver_value);
@@ -2248,6 +2537,15 @@ void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
return VisitCallJSRuntime(expr);
}
+ // TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
+ if (function->function_id == Runtime::kInlineGeneratorNext ||
+ function->function_id == Runtime::kInlineGeneratorThrow ||
+ function->function_id == Runtime::kInlineDefaultConstructorCallSuper ||
+ function->function_id == Runtime::kInlineCallSuperWithSpread) {
+ ast_context()->ProduceValue(jsgraph()->TheHoleConstant());
+ return SetStackOverflow();
+ }
+
// Evaluate all arguments to the runtime call.
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
@@ -2255,8 +2553,9 @@ void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
// Create node to perform the runtime call.
Runtime::FunctionId functionId = function->function_id;
const Operator* call = javascript()->CallRuntime(functionId, args->length());
+ FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length());
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -2282,7 +2581,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->expression()->AsProperty();
- LhsKind assign_type = DetermineLhsKind(expr->expression());
+ LhsKind assign_type = Property::GetAssignType(property);
// Reserve space for result of postfix operation.
bool is_postfix = expr->is_postfix() && !ast_context()->IsEffect();
@@ -2295,91 +2594,155 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+ FrameStateBeforeAndAfter states(this, BeforeId(proxy));
old_value =
- BuildVariableLoad(proxy->var(), expr->expression()->id(), pair);
+ BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
+ pair, OutputFrameStateCombine::Push());
stack_depth = 0;
break;
}
case NAMED_PROPERTY: {
VisitForValue(property->obj());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
Node* object = environment()->Top();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value =
- BuildNamedLoad(object, name, pair, property->PropertyFeedbackId());
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ old_value = BuildNamedLoad(object, name, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 1;
break;
}
case KEYED_PROPERTY: {
VisitForValue(property->obj());
VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
Node* key = environment()->Top();
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value =
- BuildKeyedLoad(object, key, pair, property->PropertyFeedbackId());
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ old_value = BuildKeyedLoad(object, key, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ stack_depth = 2;
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Node* home_object = environment()->Top();
+ Node* receiver = environment()->Peek(1);
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ VectorSlotPair pair =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 2;
break;
}
+ case KEYED_SUPER_PROPERTY: {
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Node* key = environment()->Top();
+ Node* home_object = environment()->Peek(1);
+ Node* receiver = environment()->Peek(2);
+ VectorSlotPair pair =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ stack_depth = 3;
+ break;
+ }
}
// Convert old value into a number.
- old_value = NewNode(javascript()->ToNumber(), old_value);
- PrepareFrameState(old_value, expr->ToNumberId(),
- OutputFrameStateCombine::Push());
+ if (!is_strong(language_mode())) {
+ old_value = NewNode(javascript()->ToNumber(), old_value);
+ PrepareFrameState(old_value, expr->ToNumberId(),
+ OutputFrameStateCombine::Push());
+ }
- Node* frame_state_before_store =
- assign_type == KEYED_PROPERTY
- ? environment()->Checkpoint(expr->ToNumberId())
- : nullptr;
+ // TODO(titzer): combine this framestate with the above?
+ FrameStateBeforeAndAfter store_states(this, assign_type == KEYED_PROPERTY
+ ? expr->ToNumberId()
+ : BailoutId::None());
// Save result for postfix expressions at correct stack depth.
if (is_postfix) environment()->Poke(stack_depth, old_value);
// Create node to perform +1/-1 operation.
- Node* value =
- BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
- // This should never deoptimize because we have converted to number
- // before.
- PrepareFrameStateAfterAndBefore(value, BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- jsgraph()->EmptyFrameState());
+ Node* value;
+ {
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ value =
+ BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
+ // This should never deoptimize outside strong mode because otherwise we
+ // have converted to number before.
+ states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
+ : BailoutId::None(),
+ OutputFrameStateCombine::Ignore());
+ }
// Store the value.
+ VectorSlotPair feedback = CreateVectorSlotPair(expr->CountSlot());
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->expression()->AsVariableProxy()->var();
environment()->Push(value);
- BuildVariableAssignment(variable, value, expr->op(),
- expr->AssignmentId());
+ BuildVariableAssignment(variable, value, expr->op(), feedback,
+ expr->AssignmentId(), store_states);
environment()->Pop();
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store =
- BuildNamedStore(object, name, value, expr->CountStoreFeedbackId());
+ Node* store = BuildNamedStore(object, name, value, feedback,
+ expr->CountStoreFeedbackId());
environment()->Push(value);
- PrepareFrameState(store, expr->AssignmentId());
+ store_states.AddToNode(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
environment()->Pop();
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store =
- BuildKeyedStore(object, key, value, expr->CountStoreFeedbackId());
+ Node* store = BuildKeyedStore(object, key, value, feedback,
+ expr->CountStoreFeedbackId());
+ environment()->Push(value);
+ store_states.AddToNode(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
+ environment()->Pop();
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value,
+ expr->CountStoreFeedbackId());
environment()->Push(value);
- PrepareFrameStateAfterAndBefore(store, expr->AssignmentId(),
- OutputFrameStateCombine::Ignore(),
- frame_state_before_store);
+ store_states.AddToNode(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
+ environment()->Pop();
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ Node* key = environment()->Pop();
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value,
+ expr->CountStoreFeedbackId());
+ environment()->Push(value);
+ store_states.AddToNode(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
environment()->Pop();
break;
}
@@ -2402,13 +2765,11 @@ void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
default: {
VisitForValue(expr->left());
VisitForValue(expr->right());
- Node* frame_state_before = environment()->Checkpoint(expr->right()->id());
+ FrameStateBeforeAndAfter states(this, expr->right()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = BuildBinaryOp(left, right, expr->op());
- PrepareFrameStateAfterAndBefore(value, expr->id(),
- ast_context()->GetStateCombine(),
- frame_state_before);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
}
@@ -2454,15 +2815,19 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
VisitForValue(expr->left());
VisitForValue(expr->right());
+ FrameStateBeforeAndAfter states(this, expr->right()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = NewNode(op, left, right);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
-void AstGraphBuilder::VisitSpread(Spread* expr) { UNREACHABLE(); }
+void AstGraphBuilder::VisitSpread(Spread* expr) {
+ // Handled entirely by the parser itself.
+ UNREACHABLE();
+}
void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
@@ -2471,14 +2836,23 @@ void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
}
-void AstGraphBuilder::VisitSuperReference(SuperReference* expr) {
- // TODO(turbofan): Implement super here.
- SetStackOverflow();
- ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+void AstGraphBuilder::VisitSuperPropertyReference(
+ SuperPropertyReference* expr) {
+ Node* value = BuildThrowUnsupportedSuperError(expr->id());
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitSuperCallReference(SuperCallReference* expr) {
+ // Handled by VisitCall
+ UNREACHABLE();
}
-void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); }
+void AstGraphBuilder::VisitCaseClause(CaseClause* expr) {
+ // Handled entirely in VisitSwitch.
+ UNREACHABLE();
+}
void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
@@ -2507,6 +2881,13 @@ void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
}
+void AstGraphBuilder::VisitInScope(Statement* stmt, Scope* s, Node* context) {
+ ContextScope scope(this, s, context);
+ DCHECK(s->declarations()->is_empty());
+ Visit(stmt);
+}
+
+
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop) {
ControlScopeForIteration scope(this, stmt, loop);
@@ -2526,7 +2907,9 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
// Delete of an unqualified identifier is only allowed in classic mode but
// deleting "this" is allowed in all language modes.
Variable* variable = expr->expression()->AsVariableProxy()->var();
- DCHECK(is_sloppy(language_mode()) || variable->is_this());
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ DCHECK(is_sloppy(language_mode()) || variable->HasThisName(isolate()));
value = BuildVariableDelete(variable, expr->id(),
ast_context()->GetStateCombine());
} else if (expr->expression()->IsProperty()) {
@@ -2559,8 +2942,10 @@ void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->expression()->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- operand = BuildVariableLoad(proxy->var(), expr->expression()->id(), pair,
- NOT_CONTEXTUAL);
+ FrameStateBeforeAndAfter states(this, BeforeId(proxy));
+ operand =
+ BuildVariableLoad(proxy->var(), expr->expression()->id(), states, pair,
+ OutputFrameStateCombine::Push(), NOT_CONTEXTUAL);
} else {
VisitForValue(expr->expression());
operand = environment()->Pop();
@@ -2598,6 +2983,8 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
Visit(expr->right());
} else if (ast_context()->IsEffect()) {
environment()->Pop();
+ } else if (ast_context()->IsTest()) {
+ environment()->Poke(0, jsgraph()->TrueConstant());
}
compare_if.Else();
if (!is_logical_and) {
@@ -2605,6 +2992,8 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
Visit(expr->right());
} else if (ast_context()->IsEffect()) {
environment()->Pop();
+ } else if (ast_context()->IsTest()) {
+ environment()->Poke(0, jsgraph()->FalseConstant());
}
compare_if.End();
ast_context()->ReplaceValue();
@@ -2622,6 +3011,44 @@ VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
}
+uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
+ DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
+ bool found_eval_scope = false;
+ EnumSet<int, uint32_t> check_depths;
+ for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
+ if (s->num_heap_slots() <= 0) continue;
+ // TODO(mstarzinger): If we have reached an eval scope, we check all
+ // extensions from this point. Replicated from full-codegen, figure out
+ // whether this is still needed. If not, drop {found_eval_scope} below.
+ if (s->is_eval_scope()) found_eval_scope = true;
+ if (!s->calls_sloppy_eval() && !found_eval_scope) continue;
+ int depth = current_scope()->ContextChainLength(s);
+ if (depth > DynamicGlobalAccess::kMaxCheckDepth) {
+ return DynamicGlobalAccess::kFullCheckRequired;
+ }
+ check_depths.Add(depth);
+ }
+ return check_depths.ToIntegral();
+}
+
+
+uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
+ DCHECK_EQ(DYNAMIC_LOCAL, variable->mode());
+ EnumSet<int, uint32_t> check_depths;
+ for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
+ if (s->num_heap_slots() <= 0) continue;
+ if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
+ int depth = current_scope()->ContextChainLength(s);
+ if (depth > DynamicContextAccess::kMaxCheckDepth) {
+ return DynamicContextAccess::kFullCheckRequired;
+ }
+ check_depths.Add(depth);
+ if (s == variable->scope()) break;
+ }
+ return check_depths.ToIntegral();
+}
+
+
Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
DCHECK(environment()->stack_height() >= arity);
Node** all = info()->zone()->NewArray<Node*>(arity);
@@ -2639,9 +3066,7 @@ Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
// object). Otherwise there is nothing left to do here.
if (is_strict(language_mode()) || info()->is_native()) return receiver;
- // There is no need to perform patching if the receiver is never used. Note
- // that scope predicates are purely syntactical, a call to eval might still
- // inspect the receiver value.
+ // There is no need to perform patching if the receiver will never be used.
if (!info()->MayUseThis()) return receiver;
IfBuilder receiver_check(this);
@@ -2659,24 +3084,32 @@ Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context) {
+ Scope* scope = info()->scope();
Node* closure = GetFunctionClosure();
// Allocate a new local context.
Node* local_context =
- info()->scope()->is_script_scope()
- ? BuildLocalScriptContext(info()->scope())
+ scope->is_script_scope()
+ ? BuildLocalScriptContext(scope)
: NewNode(javascript()->CreateFunctionContext(), closure);
+ if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
+ Node* receiver = environment()->RawParameterLookup(0);
+ // Context variable (at bottom of the context chain).
+ Variable* variable = scope->receiver();
+ DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
+ const Operator* op = javascript()->StoreContext(0, variable->index());
+ NewNode(op, local_context, receiver);
+ }
+
// Copy parameters into context if necessary.
- int num_parameters = info()->scope()->num_parameters();
+ int num_parameters = scope->num_parameters();
for (int i = 0; i < num_parameters; i++) {
- Variable* variable = info()->scope()->parameter(i);
+ Variable* variable = scope->parameter(i);
if (!variable->IsContextSlot()) continue;
- // Temporary parameter node. The parameter indices are shifted by 1
- // (receiver is parameter index -1 but environment index 0).
- Node* parameter = NewNode(common()->Parameter(i + 1), graph()->start());
+ Node* parameter = environment()->RawParameterLookup(i + 1);
// Context variable (at bottom of the context chain).
- DCHECK_EQ(0, info()->scope()->ContextChainLength(variable->scope()));
+ DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, local_context, parameter);
}
@@ -2686,12 +3119,12 @@ Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context) {
Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
- Node* closure = GetFunctionClosure();
+ DCHECK(scope->is_script_scope());
// Allocate a new local context.
const Operator* op = javascript()->CreateScriptContext();
Node* scope_info = jsgraph()->Constant(scope->GetScopeInfo(isolate()));
- Node* local_context = NewNode(op, closure, scope_info);
+ Node* local_context = NewNode(op, GetFunctionClosure(), scope_info);
PrepareFrameState(local_context, BailoutId::FunctionEntry());
return local_context;
@@ -2699,12 +3132,12 @@ Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
Node* AstGraphBuilder::BuildLocalBlockContext(Scope* scope) {
- Node* closure = GetFunctionClosure();
+ DCHECK(scope->is_block_scope());
// Allocate a new local context.
const Operator* op = javascript()->CreateBlockContext();
Node* scope_info = jsgraph()->Constant(scope->GetScopeInfo(isolate()));
- Node* local_context = NewNode(op, scope_info, closure);
+ Node* local_context = NewNode(op, scope_info, GetFunctionClosureForContext());
return local_context;
}
@@ -2721,8 +3154,9 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
// Assign the object to the arguments variable.
DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
// This should never lazy deopt, so it is fine to send invalid bailout id.
- BuildVariableAssignment(arguments, object, Token::ASSIGN, BailoutId::None());
-
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair(),
+ BailoutId::None(), states);
return object;
}
@@ -2731,14 +3165,46 @@ Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
if (rest == NULL) return NULL;
DCHECK(index >= 0);
- const Operator* op = javascript()->CallRuntime(Runtime::kNewRestParamSlow, 1);
- Node* object = NewNode(op, jsgraph()->SmiConstant(index));
+ const Operator* op = javascript()->CallRuntime(Runtime::kNewRestParamSlow, 2);
+ Node* object = NewNode(op, jsgraph()->SmiConstant(index),
+ jsgraph()->SmiConstant(language_mode()));
- // Assign the object to the rest array
+ // Assign the object to the rest parameter variable.
DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
// This should never lazy deopt, so it is fine to send invalid bailout id.
- BuildVariableAssignment(rest, object, Token::ASSIGN, BailoutId::None());
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
+ BailoutId::None(), states);
+ return object;
+}
+
+
+Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
+ if (this_function_var == nullptr) return nullptr;
+
+ // Retrieve the closure we were called with.
+ Node* this_function = GetFunctionClosure();
+
+ // Assign the object to the {.this_function} variable.
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(this_function_var, this_function, Token::INIT_CONST,
+ VectorSlotPair(), BailoutId::None(), states);
+ return this_function;
+}
+
+Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
+ if (new_target_var == nullptr) return nullptr;
+
+ // Retrieve the original constructor in case we are called as a constructor.
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kGetOriginalConstructor, 0);
+ Node* object = NewNode(op);
+
+ // Assign the object to the {new.target} variable.
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(new_target_var, object, Token::INIT_CONST,
+ VectorSlotPair(), BailoutId::None(), states);
return object;
}
@@ -2788,22 +3254,24 @@ Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
+ OutputFrameStateCombine combine,
ContextualMode contextual_mode) {
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
Handle<Name> name = variable->name();
- Node* node = BuildNamedLoad(global, name, feedback,
- TypeFeedbackId::None(), contextual_mode);
- PrepareFrameState(node, bailout_id, OutputFrameStateCombine::Push());
- return node;
+ Node* value = BuildGlobalLoad(global, name, feedback, contextual_mode);
+ states.AddToNode(value, bailout_id, combine);
+ return value;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
// Local var, const, or let variable.
Node* value = environment()->Lookup(variable);
if (mode == CONST_LEGACY) {
@@ -2816,15 +3284,18 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
}
} else if (mode == LET || mode == CONST) {
// Perform check for uninitialized let/const variables.
+ // TODO(mstarzinger): For now we cannot use the below optimization for
+ // the {this} parameter, because JSConstructStubForDerived magically
+ // passes {the_hole} as a receiver.
if (value->op() == the_hole->op()) {
value = BuildThrowReferenceError(variable, bailout_id);
- } else if (value->opcode() == IrOpcode::kPhi) {
+ } else if (value->opcode() == IrOpcode::kPhi || variable->is_this()) {
value = BuildHoleCheckThrow(value, variable, value, bailout_id);
}
}
return value;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
bool immutable = variable->maybe_assigned() == kNotAssigned;
@@ -2844,17 +3315,43 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
}
return value;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
- Node* name = jsgraph()->Constant(variable->name());
- Runtime::FunctionId function_id =
- (contextual_mode == CONTEXTUAL)
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
- const Operator* op = javascript()->CallRuntime(function_id, 2);
- Node* pair = NewNode(op, current_context(), name);
- PrepareFrameState(pair, bailout_id, OutputFrameStateCombine::Push(1));
- return NewNode(common()->Projection(0), pair);
+ Node* value = jsgraph()->TheHoleConstant();
+ Handle<String> name = variable->name();
+ if (mode == DYNAMIC_GLOBAL) {
+ uint32_t check_bitset = ComputeBitsetForDynamicGlobal(variable);
+ const Operator* op = javascript()->LoadDynamicGlobal(
+ name, check_bitset, feedback, contextual_mode);
+ value = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(value, bailout_id, combine);
+ } else if (mode == DYNAMIC_LOCAL) {
+ Variable* local = variable->local_if_not_shadowed();
+ DCHECK(local->location() ==
+ VariableLocation::CONTEXT); // Must be context.
+ int depth = current_scope()->ContextChainLength(local->scope());
+ uint32_t check_bitset = ComputeBitsetForDynamicContext(variable);
+ const Operator* op = javascript()->LoadDynamicContext(
+ name, check_bitset, depth, local->index());
+ value = NewNode(op, current_context());
+ PrepareFrameState(value, bailout_id, combine);
+ VariableMode local_mode = local->mode();
+ if (local_mode == CONST_LEGACY) {
+ // Perform check for uninitialized legacy const variables.
+ Node* undefined = jsgraph()->UndefinedConstant();
+ value = BuildHoleCheckSilent(value, undefined, value);
+ } else if (local_mode == LET || local_mode == CONST) {
+ // Perform check for uninitialized let/const variables.
+ value = BuildHoleCheckThrow(value, local, value, bailout_id);
+ }
+ } else if (mode == DYNAMIC) {
+ uint32_t check_bitset = DynamicGlobalAccess::kFullCheckRequired;
+ const Operator* op = javascript()->LoadDynamicGlobal(
+ name, check_bitset, feedback, contextual_mode);
+ value = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(value, bailout_id, combine);
+ }
+ return value;
}
}
UNREACHABLE();
@@ -2862,31 +3359,33 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
}
-Node* AstGraphBuilder::BuildVariableDelete(
- Variable* variable, BailoutId bailout_id,
- OutputFrameStateCombine state_combine) {
+Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
+ BailoutId bailout_id,
+ OutputFrameStateCombine combine) {
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
Node* name = jsgraph()->Constant(variable->name());
const Operator* op = javascript()->DeleteProperty(language_mode());
Node* result = NewNode(op, global, name);
- PrepareFrameState(result, bailout_id, state_combine);
+ PrepareFrameState(result, bailout_id, combine);
return result;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
// Local var, const, or let variable or context variable.
- return jsgraph()->BooleanConstant(variable->is_this());
- case Variable::LOOKUP: {
+ return jsgraph()->BooleanConstant(variable->HasThisName(isolate()));
+ }
+ case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Node* name = jsgraph()->Constant(variable->name());
const Operator* op =
javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
Node* result = NewNode(op, current_context(), name);
- PrepareFrameState(result, bailout_id, state_combine);
+ PrepareFrameState(result, bailout_id, combine);
return result;
}
}
@@ -2896,22 +3395,24 @@ Node* AstGraphBuilder::BuildVariableDelete(
Node* AstGraphBuilder::BuildVariableAssignment(
- Variable* variable, Node* value, Token::Value op, BailoutId bailout_id,
- OutputFrameStateCombine combine) {
+ Variable* variable, Node* value, Token::Value op,
+ const VectorSlotPair& feedback, BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states, OutputFrameStateCombine combine) {
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
Handle<Name> name = variable->name();
- Node* store =
- BuildNamedStore(global, name, value, TypeFeedbackId::None());
- PrepareFrameState(store, bailout_id, combine);
+ Node* store = BuildGlobalStore(global, name, value, feedback,
+ TypeFeedbackId::None());
+ states.AddToNode(store, bailout_id, combine);
return store;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
// Local var, const, or let variable.
if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
// Perform an initialization check for legacy const variables.
@@ -2950,7 +3451,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
}
environment()->Bind(variable, value);
return value;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
@@ -2984,7 +3485,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
const Operator* op = javascript()->StoreContext(depth, variable->index());
return NewNode(op, current_context(), value);
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Node* name = jsgraph()->Constant(variable->name());
Node* language = jsgraph()->Constant(language_mode());
@@ -3003,41 +3504,134 @@ Node* AstGraphBuilder::BuildVariableAssignment(
static inline Node* Record(JSTypeFeedbackTable* js_type_feedback, Node* node,
+ FeedbackVectorICSlot slot) {
+ if (js_type_feedback) {
+ js_type_feedback->Record(node, slot);
+ }
+ return node;
+}
+
+
+static inline Node* Record(JSTypeFeedbackTable* js_type_feedback, Node* node,
TypeFeedbackId id) {
- if (js_type_feedback) js_type_feedback->Record(node, id);
+ if (js_type_feedback) {
+ js_type_feedback->Record(node, id);
+ }
return node;
}
Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
- const VectorSlotPair& feedback,
- TypeFeedbackId id) {
- const Operator* op = javascript()->LoadProperty(feedback);
- return Record(js_type_feedback_, NewNode(op, object, key), id);
+ const VectorSlotPair& feedback) {
+ const Operator* op = javascript()->LoadProperty(feedback, language_mode());
+ Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+ return Record(js_type_feedback_, node, feedback.slot());
}
Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
- const VectorSlotPair& feedback,
- TypeFeedbackId id, ContextualMode mode) {
+ const VectorSlotPair& feedback) {
const Operator* op =
- javascript()->LoadNamed(MakeUnique(name), feedback, mode);
- return Record(js_type_feedback_, NewNode(op, object), id);
+ javascript()->LoadNamed(MakeUnique(name), feedback, language_mode());
+ Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+ return Record(js_type_feedback_, node, feedback.slot());
}
Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
+ const VectorSlotPair& feedback,
TypeFeedbackId id) {
- const Operator* op = javascript()->StoreProperty(language_mode());
- return Record(js_type_feedback_, NewNode(op, object, key, value), id);
+ const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
+ Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+ if (FLAG_vector_stores) {
+ return Record(js_type_feedback_, node, feedback.slot());
+ }
+ return Record(js_type_feedback_, node, id);
}
Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
- Node* value, TypeFeedbackId id) {
+ Node* value,
+ const VectorSlotPair& feedback,
+ TypeFeedbackId id) {
const Operator* op =
- javascript()->StoreNamed(language_mode(), MakeUnique(name));
- return Record(js_type_feedback_, NewNode(op, object, value), id);
+ javascript()->StoreNamed(language_mode(), MakeUnique(name), feedback);
+ Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+ if (FLAG_vector_stores) {
+ return Record(js_type_feedback_, node, feedback.slot());
+ }
+ return Record(js_type_feedback_, node, id);
+}
+
+
+Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
+ Handle<Name> name,
+ const VectorSlotPair& feedback) {
+ Node* name_node = jsgraph()->Constant(name);
+ Node* language = jsgraph()->Constant(language_mode());
+ const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper, 4);
+ Node* node = NewNode(op, receiver, home_object, name_node, language);
+ return Record(js_type_feedback_, node, feedback.slot());
+}
+
+
+Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
+ Node* key,
+ const VectorSlotPair& feedback) {
+ Node* language = jsgraph()->Constant(language_mode());
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ Node* node = NewNode(op, receiver, home_object, key, language);
+ return Record(js_type_feedback_, node, feedback.slot());
+}
+
+
+Node* AstGraphBuilder::BuildKeyedSuperStore(Node* receiver, Node* home_object,
+ Node* key, Node* value,
+ TypeFeedbackId id) {
+ Runtime::FunctionId function_id = is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy;
+ const Operator* op = javascript()->CallRuntime(function_id, 4);
+ Node* node = NewNode(op, receiver, home_object, key, value);
+ return Record(js_type_feedback_, node, id);
+}
+
+
+Node* AstGraphBuilder::BuildNamedSuperStore(Node* receiver, Node* home_object,
+ Handle<Name> name, Node* value,
+ TypeFeedbackId id) {
+ Node* name_node = jsgraph()->Constant(name);
+ Runtime::FunctionId function_id = is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy;
+ const Operator* op = javascript()->CallRuntime(function_id, 4);
+ Node* node = NewNode(op, receiver, home_object, name_node, value);
+ return Record(js_type_feedback_, node, id);
+}
+
+
+Node* AstGraphBuilder::BuildGlobalLoad(Node* object, Handle<Name> name,
+ const VectorSlotPair& feedback,
+ ContextualMode mode) {
+ const Operator* op =
+ javascript()->LoadGlobal(MakeUnique(name), feedback, mode);
+ Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+ return Record(js_type_feedback_, node, feedback.slot());
+}
+
+
+Node* AstGraphBuilder::BuildGlobalStore(Node* object, Handle<Name> name,
+ Node* value,
+ const VectorSlotPair& feedback,
+ TypeFeedbackId id) {
+ const Operator* op =
+ javascript()->StoreGlobal(language_mode(), MakeUnique(name), feedback);
+ Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+ if (FLAG_vector_stores) {
+ return Record(js_type_feedback_, node, feedback.slot());
+ }
+ return Record(js_type_feedback_, node, id);
}
@@ -3047,6 +3641,13 @@ Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
}
+Node* AstGraphBuilder::BuildLoadImmutableObjectField(Node* object, int offset) {
+ return graph()->NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
+ graph()->start(), graph()->start());
+}
+
+
Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
Node* global = BuildLoadGlobalObject();
Node* builtins =
@@ -3058,7 +3659,7 @@ Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
Node* AstGraphBuilder::BuildLoadGlobalObject() {
const Operator* load_op =
javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true);
- return NewNode(load_op, function_context_.get());
+ return NewNode(load_op, GetFunctionContext());
}
@@ -3070,6 +3671,19 @@ Node* AstGraphBuilder::BuildLoadGlobalProxy() {
}
+Node* AstGraphBuilder::BuildLoadFeedbackVector() {
+ if (!feedback_vector_.is_set()) {
+ Node* closure = GetFunctionClosure();
+ Node* shared = BuildLoadImmutableObjectField(
+ closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* vector = BuildLoadImmutableObjectField(
+ shared, SharedFunctionInfo::kFeedbackVectorOffset);
+ feedback_vector_.set(vector);
+ }
+ return feedback_vector_.get();
+}
+
+
Node* AstGraphBuilder::BuildLoadExternal(ExternalReference reference,
MachineType type) {
return NewNode(jsgraph()->machine()->Load(type),
@@ -3088,26 +3702,34 @@ Node* AstGraphBuilder::BuildStoreExternal(ExternalReference reference,
Node* AstGraphBuilder::BuildToBoolean(Node* input) {
- // TODO(titzer): This should be in a JSOperatorReducer.
+ // TODO(bmeurer, mstarzinger): Refactor this into a separate optimization
+ // method.
switch (input->opcode()) {
- case IrOpcode::kInt32Constant:
- return jsgraph_->BooleanConstant(!Int32Matcher(input).Is(0));
- case IrOpcode::kFloat64Constant:
- return jsgraph_->BooleanConstant(!Float64Matcher(input).Is(0));
- case IrOpcode::kNumberConstant:
- return jsgraph_->BooleanConstant(!NumberMatcher(input).Is(0));
+ case IrOpcode::kNumberConstant: {
+ NumberMatcher m(input);
+ return jsgraph_->BooleanConstant(!m.Is(0) && !m.IsNaN());
+ }
case IrOpcode::kHeapConstant: {
- Handle<Object> object = HeapObjectMatcher<Object>(input).Value().handle();
+ Handle<HeapObject> object = HeapObjectMatcher(input).Value().handle();
return jsgraph_->BooleanConstant(object->BooleanValue());
}
+ case IrOpcode::kJSEqual:
+ case IrOpcode::kJSNotEqual:
+ case IrOpcode::kJSStrictEqual:
+ case IrOpcode::kJSStrictNotEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSUnaryNot:
+ case IrOpcode::kJSToBoolean:
+ case IrOpcode::kJSDeleteProperty:
+ case IrOpcode::kJSHasProperty:
+ case IrOpcode::kJSInstanceOf:
+ return input;
default:
break;
}
- if (NodeProperties::IsTyped(input)) {
- Type* upper = NodeProperties::GetBounds(input).upper;
- if (upper->Is(Type::Boolean())) return input;
- }
-
return NewNode(javascript()->ToBoolean(), input);
}
@@ -3122,13 +3744,22 @@ Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
}
+Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
+ Node* object = NewNode(javascript()->ToObject(), input);
+ PrepareFrameState(object, bailout_id, OutputFrameStateCombine::Push());
+ return object;
+}
+
+
Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
- Expression* expr) {
+ Expression* expr,
+ const VectorSlotPair& feedback) {
if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
Handle<Name> name = isolate()->factory()->home_object_symbol();
- Node* store =
- BuildNamedStore(value, name, home_object, TypeFeedbackId::None());
- PrepareFrameState(store, BailoutId::None());
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ Node* store = BuildNamedStore(value, name, home_object, feedback,
+ TypeFeedbackId::None());
+ states.AddToNode(store, BailoutId::None(), OutputFrameStateCombine::Ignore());
return store;
}
@@ -3139,7 +3770,7 @@ Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw(), call);
UpdateControlDependencyToLeaveFunction(control);
- return control;
+ return call;
}
@@ -3152,7 +3783,7 @@ Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw(), call);
UpdateControlDependencyToLeaveFunction(control);
- return control;
+ return call;
}
@@ -3163,7 +3794,7 @@ Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw(), call);
UpdateControlDependencyToLeaveFunction(control);
- return control;
+ return call;
}
@@ -3174,7 +3805,18 @@ Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw(), call);
UpdateControlDependencyToLeaveFunction(control);
- return control;
+ return call;
+}
+
+
+Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+ Node* call = NewNode(op);
+ PrepareFrameState(call, bailout_id);
+ Node* control = NewNode(common()->Throw(), call);
+ UpdateControlDependencyToLeaveFunction(control);
+ return call;
}
@@ -3260,24 +3902,6 @@ void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
}
-void AstGraphBuilder::PrepareFrameStateAfterAndBefore(
- Node* node, BailoutId ast_id, OutputFrameStateCombine combine,
- Node* frame_state_before) {
- if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
- DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node->op()));
-
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node, 0)->opcode());
- NodeProperties::ReplaceFrameStateInput(
- node, 0, environment()->Checkpoint(ast_id, combine));
-
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node, 1)->opcode());
- NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before);
- }
-}
-
-
BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
IterationStatement* stmt) {
if (loop_assignment_analysis_ == NULL) return NULL;
@@ -3325,9 +3949,9 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
}
for (int i = 0; i < frame_state_count; i++) {
// The frame state will be inserted later. Here we misuse
- // the {DeadControl} node as a sentinel to be later overwritten
+ // the {Dead} node as a sentinel to be later overwritten
// with the real frame state.
- *current_input++ = jsgraph()->DeadControl();
+ *current_input++ = jsgraph()->Dead();
}
if (has_effect) {
*current_input++ = environment_->GetEffectDependency();
@@ -3336,23 +3960,35 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
*current_input++ = environment_->GetControlDependency();
}
result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
- if (has_effect) {
- environment_->UpdateEffectDependency(result);
- }
if (!environment()->IsMarkedAsUnreachable()) {
// Update the current control dependency for control-producing nodes.
if (NodeProperties::IsControl(result)) {
environment_->UpdateControlDependency(result);
}
+ // Update the current effect dependency for effect-producing nodes.
+ if (result->op()->EffectOutputCount() > 0) {
+ environment_->UpdateEffectDependency(result);
+ }
// Add implicit exception continuation for throwing nodes.
if (!result->op()->HasProperty(Operator::kNoThrow) && inside_try_scope) {
- Node* on_exception = graph()->NewNode(common()->IfException(), result);
+ // Conservative prediction whether caught locally.
+ IfExceptionHint hint = try_catch_nesting_level_ > 0
+ ? IfExceptionHint::kLocallyCaught
+ : IfExceptionHint::kLocallyUncaught;
+ // Copy the environment for the success continuation.
+ Environment* success_env = environment()->CopyForConditional();
+ const Operator* op = common()->IfException(hint);
+ Node* effect = environment()->GetEffectDependency();
+ Node* on_exception = graph()->NewNode(op, effect, result);
environment_->UpdateControlDependency(on_exception);
+ environment_->UpdateEffectDependency(on_exception);
execution_control()->ThrowValue(on_exception);
+ set_environment(success_env);
}
// Add implicit success continuation for throwing nodes.
if (!result->op()->HasProperty(Operator::kNoThrow)) {
- Node* on_success = graph()->NewNode(common()->IfSuccess(), result);
+ const Operator* op = common()->IfSuccess();
+ Node* on_success = graph()->NewNode(op, result);
environment_->UpdateControlDependency(on_success);
}
}
@@ -3364,11 +4000,8 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
void AstGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
if (environment()->IsMarkedAsUnreachable()) return;
- if (exit_control() != NULL) {
- exit = MergeControl(exit_control(), exit);
- }
environment()->MarkAsUnreachable();
- set_exit_control(exit);
+ exit_controls_.push_back(exit);
}
@@ -3384,21 +4017,23 @@ void AstGraphBuilder::Environment::Merge(Environment* other) {
if (this->IsMarkedAsUnreachable()) {
Node* other_control = other->control_dependency_;
Node* inputs[] = {other_control};
- liveness_block_ = other->liveness_block_;
control_dependency_ =
graph()->NewNode(common()->Merge(1), arraysize(inputs), inputs, true);
effect_dependency_ = other->effect_dependency_;
values_ = other->values_;
contexts_ = other->contexts_;
+ if (IsLivenessAnalysisEnabled()) {
+ liveness_block_ =
+ builder_->liveness_analyzer()->NewBlock(other->liveness_block());
+ }
return;
}
// Record the merge for the local variable liveness calculation.
- // Unfortunately, we have to mirror the logic in the MergeControl method:
- // connect before merge or loop, or create a new merge otherwise.
- if (FLAG_analyze_environment_liveness) {
- if (GetControlDependency()->opcode() != IrOpcode::kLoop &&
- GetControlDependency()->opcode() != IrOpcode::kMerge) {
+ // For loops, we are connecting a back edge into the existing block;
+ // for merges, we create a new merged block.
+ if (IsLivenessAnalysisEnabled()) {
+ if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
liveness_block_ =
builder_->liveness_analyzer()->NewBlock(liveness_block());
}
@@ -3450,13 +4085,19 @@ void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
UpdateEffectDependency(effect);
+ // Connect the loop to end via Terminate if it's not marked as unreachable.
+ if (!IsMarkedAsUnreachable()) {
+ // Connect the Loop node to end via a Terminate node.
+ Node* terminate = builder_->graph()->NewNode(
+ builder_->common()->Terminate(), effect, control);
+ builder_->exit_controls_.push_back(terminate);
+ }
+
if (builder_->info()->is_osr()) {
// Introduce phis for all context values in the case of an OSR graph.
- for (int i = 0; i < static_cast<int>(contexts()->size()); ++i) {
- Node* val = contexts()->at(i);
- if (!IrOpcode::IsConstantOpcode(val->opcode())) {
- contexts()->at(i) = builder_->NewPhi(1, val, control);
- }
+ for (size_t i = 0; i < contexts()->size(); ++i) {
+ Node* context = contexts()->at(i);
+ contexts()->at(i) = builder_->NewPhi(1, context, control);
}
}
@@ -3470,12 +4111,10 @@ void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
builder_->MergeEffect(effect, osr_loop_entry, control);
for (int i = 0; i < size; ++i) {
- Node* val = values()->at(i);
- if (!IrOpcode::IsConstantOpcode(val->opcode())) {
- Node* osr_value =
- graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
- values()->at(i) = builder_->MergeValue(val, osr_value, control);
- }
+ Node* value = values()->at(i);
+ Node* osr_value =
+ graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
+ values()->at(i) = builder_->MergeValue(value, osr_value, control);
}
// Rename all the contexts in the environment.
@@ -3484,17 +4123,15 @@ void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
Node* osr_context = nullptr;
const Operator* op =
builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
+ const Operator* op_inner =
+ builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
int last = static_cast<int>(contexts()->size() - 1);
for (int i = last; i >= 0; i--) {
- Node* val = contexts()->at(i);
- if (!IrOpcode::IsConstantOpcode(val->opcode())) {
- osr_context = (i == last) ? builder_->NewCurrentContextOsrValue()
- : graph->NewNode(op, osr_context, osr_context,
- osr_loop_entry);
- contexts()->at(i) = builder_->MergeValue(val, osr_context, control);
- } else {
- osr_context = val;
- }
+ Node* context = contexts()->at(i);
+ osr_context = (i == last) ? graph->NewNode(op_inner, osr_loop_entry)
+ : graph->NewNode(op, osr_context, osr_context,
+ osr_loop_entry);
+ contexts()->at(i) = builder_->MergeValue(context, osr_context, control);
}
}
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 74728ddf85..07de774c45 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -35,7 +35,7 @@ class AstGraphBuilder : public AstVisitor {
JSTypeFeedbackTable* js_type_feedback = NULL);
// Creates a graph by visiting the entire AST.
- bool CreateGraph(bool constant_context, bool stack_check = true);
+ bool CreateGraph(bool stack_check = true);
// Helpers to create new control nodes.
Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
@@ -67,6 +67,7 @@ class AstGraphBuilder : public AstVisitor {
class ControlScopeForCatch;
class ControlScopeForFinally;
class Environment;
+ class FrameStateBeforeAndAfter;
friend class ControlBuilder;
Zone* local_zone_;
@@ -89,14 +90,18 @@ class AstGraphBuilder : public AstVisitor {
SetOncePointer<Node> function_context_;
// Tracks how many try-blocks are currently entered.
+ int try_catch_nesting_level_;
int try_nesting_level_;
// Temporary storage for building node input lists.
int input_buffer_size_;
Node** input_buffer_;
- // Merge of all control nodes that exit the function body.
- Node* exit_control_;
+ // Optimization to cache loaded feedback vector.
+ SetOncePointer<Node> feedback_vector_;
+
+ // Control nodes that exit the function body.
+ ZoneVector<Node*> exit_controls_;
// Result of loop assignment analysis performed before graph creation.
LoopAssignmentAnalysis* loop_assignment_analysis_;
@@ -107,6 +112,9 @@ class AstGraphBuilder : public AstVisitor {
// Analyzer of local variable liveness.
LivenessAnalyzer liveness_analyzer_;
+ // Function info for frame state construction.
+ const FrameStateFunctionInfo* const frame_state_function_info_;
+
// Type feedback table.
JSTypeFeedbackTable* js_type_feedback_;
@@ -129,24 +137,26 @@ class AstGraphBuilder : public AstVisitor {
ZoneVector<Handle<Object>>* globals() { return &globals_; }
Scope* current_scope() const;
Node* current_context() const;
- Node* exit_control() const { return exit_control_; }
LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
+ const FrameStateFunctionInfo* frame_state_function_info() const {
+ return frame_state_function_info_;
+ }
void set_environment(Environment* env) { environment_ = env; }
void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
void set_execution_control(ControlScope* ctrl) { execution_control_ = ctrl; }
void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; }
- void set_exit_control(Node* exit) { exit_control_ = exit; }
// Create the main graph body by visiting the AST.
void CreateGraphBody(bool stack_check);
- // Create the node that represents the outer context of the function.
- void CreateFunctionContext(bool constant_context);
-
// Get or create the node that represents the outer function closure.
+ Node* GetFunctionClosureForContext();
Node* GetFunctionClosure();
+ // Get or create the node that represents the outer function context.
+ Node* GetFunctionContext();
+
// Node creation helpers.
Node* NewNode(const Operator* op, bool incomplete = false) {
return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
@@ -192,9 +202,6 @@ class AstGraphBuilder : public AstVisitor {
Node* NewPhi(int count, Node* input, Node* control);
Node* NewEffectPhi(int count, Node* input, Node* control);
- Node* NewOuterContextParam();
- Node* NewCurrentContextOsrValue();
-
// Helpers for merging control, effect or value dependencies.
Node* MergeControl(Node* control, Node* other);
Node* MergeEffect(Node* value, Node* other, Node* control);
@@ -209,12 +216,9 @@ class AstGraphBuilder : public AstVisitor {
void UpdateControlDependencyToLeaveFunction(Node* exit);
// Builds deoptimization for a given node.
- void PrepareFrameState(
- Node* node, BailoutId ast_id,
- OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore());
- void PrepareFrameStateAfterAndBefore(Node* node, BailoutId ast_id,
- OutputFrameStateCombine combine,
- Node* frame_state_before);
+ void PrepareFrameState(Node* node, BailoutId ast_id,
+ OutputFrameStateCombine framestate_combine =
+ OutputFrameStateCombine::Ignore());
BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
@@ -237,6 +241,11 @@ class AstGraphBuilder : public AstVisitor {
// Named and keyed loads require a VectorSlotPair for successful lowering.
VectorSlotPair CreateVectorSlotPair(FeedbackVectorICSlot slot) const;
+ // Determine which contexts need to be checked for extension objects that
+ // might shadow the optimistic declaration of dynamic lookup variables.
+ uint32_t ComputeBitsetForDynamicGlobal(Variable* variable);
+ uint32_t ComputeBitsetForDynamicContext(Variable* variable);
+
// ===========================================================================
// The following build methods all generate graph fragments and return one
// resulting node. The operand stack height remains the same, variables and
@@ -256,52 +265,83 @@ class AstGraphBuilder : public AstVisitor {
// Builder to create an array of rest parameters if used
Node* BuildRestArgumentsArray(Variable* rest, int index);
+ // Builder that assigns to the {.this_function} internal variable if needed.
+ Node* BuildThisFunctionVariable(Variable* this_function_var);
+
+ // Builder that assigns to the {new.target} internal variable if needed.
+ Node* BuildNewTargetVariable(Variable* new_target_var);
+
// Builders for variable load and assignment.
- Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op,
+ Node* BuildVariableAssignment(Variable* variable, Node* value,
+ Token::Value op, const VectorSlotPair& slot,
BailoutId bailout_id,
- OutputFrameStateCombine state_combine =
+ FrameStateBeforeAndAfter& states,
+ OutputFrameStateCombine framestate_combine =
OutputFrameStateCombine::Ignore());
- Node* BuildVariableDelete(Variable* var, BailoutId bailout_id,
- OutputFrameStateCombine state_combine);
- Node* BuildVariableLoad(Variable* var, BailoutId bailout_id,
+ Node* BuildVariableDelete(Variable* variable, BailoutId bailout_id,
+ OutputFrameStateCombine framestate_combine);
+ Node* BuildVariableLoad(Variable* variable, BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
+ OutputFrameStateCombine framestate_combine,
ContextualMode mode = CONTEXTUAL);
// Builders for property loads and stores.
Node* BuildKeyedLoad(Node* receiver, Node* key,
- const VectorSlotPair& feedback, TypeFeedbackId id);
+ const VectorSlotPair& feedback);
Node* BuildNamedLoad(Node* receiver, Handle<Name> name,
- const VectorSlotPair& feedback, TypeFeedbackId id,
- ContextualMode mode = NOT_CONTEXTUAL);
+ const VectorSlotPair& feedback);
Node* BuildKeyedStore(Node* receiver, Node* key, Node* value,
- TypeFeedbackId id);
- Node* BuildNamedStore(Node* receiver, Handle<Name>, Node* value,
- TypeFeedbackId id);
+ const VectorSlotPair& feedback, TypeFeedbackId id);
+ Node* BuildNamedStore(Node* receiver, Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback, TypeFeedbackId id);
+
+ // Builders for super property loads and stores.
+ Node* BuildKeyedSuperStore(Node* receiver, Node* home_object, Node* key,
+ Node* value, TypeFeedbackId id);
+ Node* BuildNamedSuperStore(Node* receiver, Node* home_object,
+ Handle<Name> name, Node* value, TypeFeedbackId id);
+ Node* BuildNamedSuperLoad(Node* receiver, Node* home_object,
+ Handle<Name> name, const VectorSlotPair& feedback);
+ Node* BuildKeyedSuperLoad(Node* receiver, Node* home_object, Node* key,
+ const VectorSlotPair& feedback);
+
+ // Builders for global variable loads and stores.
+ Node* BuildGlobalLoad(Node* global, Handle<Name> name,
+ const VectorSlotPair& feedback, ContextualMode mode);
+ Node* BuildGlobalStore(Node* global, Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback, TypeFeedbackId id);
// Builders for accessing the function context.
Node* BuildLoadBuiltinsObject();
Node* BuildLoadGlobalObject();
Node* BuildLoadGlobalProxy();
- Node* BuildLoadClosure();
+ Node* BuildLoadFeedbackVector();
+
+ // Builder for accessing a (potentially immutable) object field.
Node* BuildLoadObjectField(Node* object, int offset);
+ Node* BuildLoadImmutableObjectField(Node* object, int offset);
// Builders for accessing external references.
Node* BuildLoadExternal(ExternalReference ref, MachineType type);
Node* BuildStoreExternal(ExternalReference ref, MachineType type, Node* val);
// Builders for automatic type conversion.
- Node* BuildToBoolean(Node* value);
- Node* BuildToName(Node* value, BailoutId bailout_id);
+ Node* BuildToBoolean(Node* input);
+ Node* BuildToName(Node* input, BailoutId bailout_id);
+ Node* BuildToObject(Node* input, BailoutId bailout_id);
// Builder for adding the [[HomeObject]] to a value if the value came from a
// function literal and needs a home object. Do nothing otherwise.
- Node* BuildSetHomeObject(Node* value, Node* home_object, Expression* expr);
+ Node* BuildSetHomeObject(Node* value, Node* home_object, Expression* expr,
+ const VectorSlotPair& feedback);
// Builders for error reporting at runtime.
Node* BuildThrowError(Node* exception, BailoutId bailout_id);
Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
Node* BuildThrowConstAssignError(BailoutId bailout_id);
Node* BuildThrowStaticPrototypeError(BailoutId bailout_id);
+ Node* BuildThrowUnsupportedSuperError(BailoutId bailout_id);
// Builders for dynamic hole-checks at runtime.
Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
@@ -331,6 +371,7 @@ class AstGraphBuilder : public AstVisitor {
// Visit statements.
void VisitIfNotNull(Statement* stmt);
+ void VisitInScope(Statement* stmt, Scope* scope, Node* context);
// Visit expressions.
void Visit(Expression* expr);
@@ -360,8 +401,8 @@ class AstGraphBuilder : public AstVisitor {
// Dispatched from VisitForInStatement.
void VisitForInAssignment(Expression* expr, Node* value,
+ const VectorSlotPair& feedback,
BailoutId bailout_id);
- void VisitForInBody(ForInStatement* stmt);
// Dispatched from VisitClassLiteral.
void VisitClassLiteralContents(ClassLiteral* expr);
@@ -396,6 +437,10 @@ class AstGraphBuilder::Environment : public ZoneObject {
Node* Lookup(Variable* variable);
void MarkAllLocalsLive();
+ // Raw operations on parameter variables.
+ void RawParameterBind(int index, Node* node);
+ Node* RawParameterLookup(int index);
+
// Operations on the context chain.
Node* Context() const { return contexts_.back(); }
void PushContext(Node* context) { contexts()->push_back(context); }
@@ -459,7 +504,8 @@ class AstGraphBuilder::Environment : public ZoneObject {
// Mark this environment as being unreachable.
void MarkAsUnreachable() {
- UpdateControlDependency(builder()->jsgraph()->DeadControl());
+ UpdateControlDependency(builder()->jsgraph()->Dead());
+ liveness_block_ = nullptr;
}
bool IsMarkedAsUnreachable() {
return GetControlDependency()->opcode() == IrOpcode::kDead;
@@ -469,20 +515,13 @@ class AstGraphBuilder::Environment : public ZoneObject {
void Merge(Environment* other);
// Copies this environment at a control-flow split point.
- Environment* CopyForConditional() { return Copy(); }
+ Environment* CopyForConditional();
// Copies this environment to a potentially unreachable control-flow point.
- Environment* CopyAsUnreachable() {
- Environment* env = Copy();
- env->MarkAsUnreachable();
- return env;
- }
+ Environment* CopyAsUnreachable();
// Copies this environment at a loop header control-flow point.
- Environment* CopyForLoop(BitVector* assigned, bool is_osr = false) {
- PrepareForLoop(assigned, is_osr);
- return CopyAndShareLiveness();
- }
+ Environment* CopyForLoop(BitVector* assigned, bool is_osr = false);
private:
AstGraphBuilder* builder_;
@@ -497,8 +536,8 @@ class AstGraphBuilder::Environment : public ZoneObject {
Node* locals_node_;
Node* stack_node_;
- explicit Environment(Environment* copy);
- Environment* Copy() { return new (zone()) Environment(this); }
+ explicit Environment(Environment* copy,
+ LivenessAnalyzerBlock* liveness_block);
Environment* CopyAndShareLiveness();
void UpdateStateValues(Node** state_values, int offset, int count);
void UpdateStateValuesWithCache(Node** state_values, int offset, int count);
@@ -509,6 +548,8 @@ class AstGraphBuilder::Environment : public ZoneObject {
NodeVector* values() { return &values_; }
NodeVector* contexts() { return &contexts_; }
LivenessAnalyzerBlock* liveness_block() { return liveness_block_; }
+ bool IsLivenessAnalysisEnabled();
+ bool IsLivenessBlockConsistent();
// Prepare environment to be used as loop header.
void PrepareForLoop(BitVector* assigned, bool is_osr = false);
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index 05a4ceb873..61ed4f27cb 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -67,7 +67,8 @@ void ALAA::VisitVariableProxy(VariableProxy* leaf) {}
void ALAA::VisitLiteral(Literal* leaf) {}
void ALAA::VisitRegExpLiteral(RegExpLiteral* leaf) {}
void ALAA::VisitThisFunction(ThisFunction* leaf) {}
-void ALAA::VisitSuperReference(SuperReference* leaf) {}
+void ALAA::VisitSuperPropertyReference(SuperPropertyReference* leaf) {}
+void ALAA::VisitSuperCallReference(SuperCallReference* leaf) {}
// ---------------------------------------------------------------------------
@@ -292,6 +293,6 @@ int LoopAssignmentAnalysis::GetAssignmentCountForTesting(Scope* scope,
}
return count;
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
index a8c3cd692a..ca92951670 100644
--- a/deps/v8/src/compiler/change-lowering.cc
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -257,19 +257,18 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
effect, if_true1);
Node* etrue1 = vtrue1;
- {
- Node* check2 = TestNotSmi(vtrue1);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_true1);
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = LoadHeapNumberValue(vtrue1, if_true2);
+ Node* check2 = TestNotSmi(vtrue1);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_true1);
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2 = ChangeSmiToFloat64(vtrue1);
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = LoadHeapNumberValue(vtrue1, if_true2);
- if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
- vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
- }
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2 = ChangeSmiToFloat64(vtrue1);
+
+ if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
+ vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
Node* vfalse1 = ChangeSmiToFloat64(object);
@@ -279,7 +278,18 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
- NodeProperties::ReplaceWithValue(value, phi1, ephi1, merge1);
+ // Wire the new diamond into the graph, {JSToNumber} can still throw.
+ NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
+
+ // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
+ // the node and places it inside the diamond. Come up with a helper method!
+ for (Node* use : etrue1->uses()) {
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ use->ReplaceUses(merge1);
+ NodeProperties::ReplaceControlInput(branch2, use);
+ }
+ }
+
return Replace(phi1);
}
diff --git a/deps/v8/src/compiler/coalesced-live-ranges.cc b/deps/v8/src/compiler/coalesced-live-ranges.cc
new file mode 100644
index 0000000000..e81f5518bd
--- /dev/null
+++ b/deps/v8/src/compiler/coalesced-live-ranges.cc
@@ -0,0 +1,148 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/coalesced-live-ranges.h"
+#include "src/compiler/greedy-allocator.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+ } while (false)
+
+
+const float CoalescedLiveRanges::kAllocatedRangeMultiplier = 10.0;
+
+void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
+ UpdateWeightAtAllocation(range);
+ for (auto interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ storage().insert({interval->start(), interval->end(), range});
+ }
+}
+
+
+void CoalescedLiveRanges::Remove(LiveRange* range) {
+ for (auto interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ storage().erase({interval->start(), interval->end(), nullptr});
+ }
+ range->UnsetAssignedRegister();
+}
+
+
+float CoalescedLiveRanges::GetMaximumConflictingWeight(
+ const LiveRange* range) const {
+ float ret = LiveRange::kInvalidWeight;
+ auto end = storage().end();
+ for (auto query = range->first_interval(); query != nullptr;
+ query = query->next()) {
+ auto conflict = GetFirstConflict(query);
+
+ if (conflict == end) continue;
+ for (; QueryIntersectsAllocatedInterval(query, conflict); ++conflict) {
+ // It is possible we'll visit the same range multiple times, because
+ // successive (not necessarily consecutive) intervals belong to the same
+ // range, or because different intervals of the query range have the same
+ // range as conflict.
+ DCHECK_NE(conflict->range->weight(), LiveRange::kInvalidWeight);
+ ret = Max(ret, conflict->range->weight());
+ if (ret == LiveRange::kMaxWeight) break;
+ }
+ }
+ return ret;
+}
+
+
+void CoalescedLiveRanges::EvictAndRescheduleConflicts(
+ LiveRange* range, AllocationScheduler* scheduler) {
+ auto end = storage().end();
+
+ for (auto query = range->first_interval(); query != nullptr;
+ query = query->next()) {
+ auto conflict = GetFirstConflict(query);
+ if (conflict == end) continue;
+ while (QueryIntersectsAllocatedInterval(query, conflict)) {
+ LiveRange* range_to_evict = conflict->range;
+ // Bypass successive intervals belonging to the same range, because we're
+ // about to remove this range, and we don't want the storage iterator to
+ // become invalid.
+ while (conflict != end && conflict->range == range_to_evict) {
+ ++conflict;
+ }
+
+ DCHECK(range_to_evict->HasRegisterAssigned());
+ CHECK(!range_to_evict->IsFixed());
+ Remove(range_to_evict);
+ UpdateWeightAtEviction(range_to_evict);
+ TRACE("Evicted range %d.\n", range_to_evict->id());
+ scheduler->Schedule(range_to_evict);
+ }
+ }
+}
+
+
+bool CoalescedLiveRanges::VerifyAllocationsAreValid() const {
+ LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
+ for (auto i : storage_) {
+ if (i.start < last_end) {
+ return false;
+ }
+ last_end = i.end;
+ }
+ return true;
+}
+
+
+void CoalescedLiveRanges::UpdateWeightAtAllocation(LiveRange* range) {
+ DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+ range->set_weight(range->weight() * kAllocatedRangeMultiplier);
+}
+
+
+void CoalescedLiveRanges::UpdateWeightAtEviction(LiveRange* range) {
+ DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+ range->set_weight(range->weight() / kAllocatedRangeMultiplier);
+}
+
+
+CoalescedLiveRanges::interval_iterator CoalescedLiveRanges::GetFirstConflict(
+ const UseInterval* query) const {
+ DCHECK(query != nullptr);
+ auto end = storage().end();
+ LifetimePosition q_start = query->start();
+ LifetimePosition q_end = query->end();
+
+ if (storage().empty() || storage().rbegin()->end <= q_start ||
+ storage().begin()->start >= q_end) {
+ return end;
+ }
+
+ auto ret = storage().upper_bound(AsAllocatedInterval(q_start));
+ // ret is either at the end (no start strictly greater than q_start) or
+ // at some position with the aforementioned property. In either case, the
+ // allocated interval before this one may intersect our query:
+ // either because, although it starts before this query's start, it ends
+ // after; or because it starts exactly at the query start. So unless we're
+ // right at the beginning of the storage - meaning the first allocated
+ // interval is also starting after this query's start - see what's behind.
+ if (ret != storage().begin()) {
+ --ret;
+ if (!QueryIntersectsAllocatedInterval(query, ret)) {
+ // The interval behind wasn't intersecting, so move back.
+ ++ret;
+ }
+ }
+ if (ret != end && QueryIntersectsAllocatedInterval(query, ret)) return ret;
+ return end;
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/coalesced-live-ranges.h b/deps/v8/src/compiler/coalesced-live-ranges.h
new file mode 100644
index 0000000000..f12517203f
--- /dev/null
+++ b/deps/v8/src/compiler/coalesced-live-ranges.h
@@ -0,0 +1,109 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COALESCED_LIVE_RANGES_H_
+#define V8_COALESCED_LIVE_RANGES_H_
+
+#include "src/compiler/register-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+class AllocationScheduler;
+
+
+// Collection of live ranges allocated to the same register.
+// It supports efficiently finding all conflicts for a given, non-allocated
+// range. See AllocatedInterval.
+// Allocated live ranges do not intersect. At most, individual use intervals
+// touch. We store, for a live range, an AllocatedInterval corresponding to each
+// of that range's UseIntervals. We keep the list of AllocatedIntervals sorted
+// by starts. Then, given the non-intersecting property, we know that
+// consecutive AllocatedIntervals have the property that the "smaller"'s end is
+// less or equal to the "larger"'s start.
+// This allows for quick (logarithmic complexity) identification of the first
+// AllocatedInterval to conflict with a given LiveRange, and then for efficient
+// traversal of conflicts.
+class CoalescedLiveRanges : public ZoneObject {
+ public:
+ explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
+ void clear() { storage_.clear(); }
+
+ bool empty() const { return storage_.empty(); }
+
+ // Returns kInvalidWeight if there are no conflicts, or the largest weight of
+ // a range conflicting with the given range.
+ float GetMaximumConflictingWeight(const LiveRange* range) const;
+
+ // Evicts all conflicts of the given range, and reschedules them with the
+ // provided scheduler.
+ void EvictAndRescheduleConflicts(LiveRange* range,
+ AllocationScheduler* scheduler);
+
+ // Allocates a range with a pre-calculated candidate weight.
+ void AllocateRange(LiveRange* range);
+
+ // TODO(mtrofin): remove this in favor of comprehensive unit tests.
+ bool VerifyAllocationsAreValid() const;
+
+ private:
+ static const float kAllocatedRangeMultiplier;
+ // Storage detail for CoalescedLiveRanges.
+ struct AllocatedInterval {
+ LifetimePosition start;
+ LifetimePosition end;
+ LiveRange* range;
+ bool operator<(const AllocatedInterval& other) const {
+ return start < other.start;
+ }
+ bool operator>(const AllocatedInterval& other) const {
+ return start > other.start;
+ }
+ };
+ typedef ZoneSet<AllocatedInterval> IntervalStore;
+ typedef IntervalStore::const_iterator interval_iterator;
+
+ IntervalStore& storage() { return storage_; }
+ const IntervalStore& storage() const { return storage_; }
+
+ // Augment the weight of a range that is about to be allocated.
+ static void UpdateWeightAtAllocation(LiveRange* range);
+
+ // Reduce the weight of a range that has lost allocation.
+ static void UpdateWeightAtEviction(LiveRange* range);
+
+ // Intersection utilities.
+ static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
+ LifetimePosition b_start, LifetimePosition b_end) {
+ return a_start < b_end && b_start < a_end;
+ }
+ static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
+ return {pos, LifetimePosition::Invalid(), nullptr};
+ }
+
+ bool QueryIntersectsAllocatedInterval(const UseInterval* query,
+ interval_iterator& pos) const {
+ DCHECK(query != nullptr);
+ return pos != storage().end() &&
+ Intersects(query->start(), query->end(), pos->start, pos->end);
+ }
+
+ void Remove(LiveRange* range);
+
+ // Get the first interval intersecting query. Since the intervals are sorted,
+ // subsequent intervals intersecting query follow.
+ interval_iterator GetFirstConflict(const UseInterval* query) const;
+
+ IntervalStore storage_;
+ DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+#endif // V8_COALESCED_LIVE_RANGES_H_
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 1cab854ce8..f0762e9bbe 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -57,6 +57,10 @@ class InstructionOperandConverter {
return static_cast<uint8_t>(InputInt32(index) & 0x3F);
}
+ ExternalReference InputExternalReference(size_t index) {
+ return ToExternalReference(instr_->InputAt(index));
+ }
+
Handle<HeapObject> InputHeapObject(size_t index) {
return ToHeapObject(instr_->InputAt(index));
}
@@ -108,6 +112,10 @@ class InstructionOperandConverter {
double ToDouble(InstructionOperand* op) { return ToConstant(op).ToFloat64(); }
+ ExternalReference ToExternalReference(InstructionOperand* op) {
+ return ToConstant(op).ToExternalReference();
+ }
+
Handle<HeapObject> ToHeapObject(InstructionOperand* op) {
return ToConstant(op).ToHeapObject();
}
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 13087ec0aa..2903c3d370 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -7,6 +7,7 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
+#include "src/snapshot/serialize.h" // TODO(turbofan): RootIndexMap
namespace v8 {
namespace internal {
@@ -38,18 +39,20 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
info_(info),
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
current_block_(RpoNumber::Invalid()),
- current_source_position_(SourcePosition::Invalid()),
+ current_source_position_(SourcePosition::Unknown()),
masm_(info->isolate(), NULL, 0),
resolver_(this),
safepoints_(code->zone()),
handlers_(code->zone()),
deoptimization_states_(code->zone()),
deoptimization_literals_(code->zone()),
+ inlined_function_count_(0),
translations_(code->zone()),
last_lazy_deopt_pc_(0),
jump_tables_(nullptr),
ools_(nullptr),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ needs_frame_(frame->GetSpillSlotCount() > 0 || code->ContainsCall()) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -59,6 +62,11 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
Handle<Code> CodeGenerator::GenerateCode() {
CompilationInfo* info = this->info();
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in AssemblePrologue).
+ FrameScope frame_scope(masm(), StackFrame::MANUAL);
+
// Emit a code line info recording start event.
PositionsRecorder* recorder = masm()->positions_recorder();
LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
@@ -72,6 +80,15 @@ Handle<Code> CodeGenerator::GenerateCode() {
info->set_prologue_offset(masm()->pc_offset());
AssemblePrologue();
+ // Define deoptimization literals for all inlined functions.
+ DCHECK_EQ(0u, deoptimization_literals_.size());
+ for (auto shared_info : info->inlined_functions()) {
+ if (!shared_info.is_identical_to(info->shared_info())) {
+ DefineDeoptimizationLiteral(shared_info);
+ }
+ }
+ inlined_function_count_ = deoptimization_literals_.size();
+
// Assemble all non-deferred blocks, followed by deferred ones.
for (int deferred = 0; deferred < 2; ++deferred) {
for (auto const block : code()->instruction_blocks()) {
@@ -80,17 +97,36 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
// Align loop headers on 16-byte boundaries.
if (block->IsLoopHeader()) masm()->Align(16);
+ // Ensure lazy deopt doesn't patch handler entry points.
+ if (block->IsHandler()) EnsureSpaceForLazyDeopt();
// Bind a label for a block.
current_block_ = block->rpo_number();
if (FLAG_code_comments) {
// TODO(titzer): these code comments are a giant memory leak.
Vector<char> buffer = Vector<char>::New(200);
- SNPrintF(buffer, "-- B%d start%s%s%s%s --", block->rpo_number().ToInt(),
- block->IsDeferred() ? " (deferred)" : "",
- block->needs_frame() ? "" : " (no frame)",
- block->must_construct_frame() ? " (construct frame)" : "",
- block->must_deconstruct_frame() ? " (deconstruct frame)" : "");
- masm()->RecordComment(buffer.start());
+ char* buffer_start = buffer.start();
+
+ int next = SNPrintF(
+ buffer, "-- B%d start%s%s%s%s", block->rpo_number().ToInt(),
+ block->IsDeferred() ? " (deferred)" : "",
+ block->needs_frame() ? "" : " (no frame)",
+ block->must_construct_frame() ? " (construct frame)" : "",
+ block->must_deconstruct_frame() ? " (deconstruct frame)" : "");
+
+ buffer = buffer.SubVector(next, buffer.length());
+
+ if (block->IsLoopHeader()) {
+ next =
+ SNPrintF(buffer, " (loop up to %d)", block->loop_end().ToInt());
+ buffer = buffer.SubVector(next, buffer.length());
+ }
+ if (block->loop_header().IsValid()) {
+ next =
+ SNPrintF(buffer, " (in loop %d)", block->loop_header().ToInt());
+ buffer = buffer.SubVector(next, buffer.length());
+ }
+ SNPrintF(buffer, " --");
+ masm()->RecordComment(buffer_start);
}
masm()->bind(GetLabel(current_block_));
for (int i = block->code_start(); i < block->code_end(); ++i) {
@@ -143,8 +179,12 @@ Handle<Code> CodeGenerator::GenerateCode() {
HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
TENURED));
for (size_t i = 0; i < handlers_.size(); ++i) {
+ int position = handlers_[i].handler->pos();
+ HandlerTable::CatchPrediction prediction = handlers_[i].caught_locally
+ ? HandlerTable::CAUGHT
+ : HandlerTable::UNCAUGHT;
table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
- table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
+ table->SetReturnHandler(static_cast<int>(i), position, prediction);
}
result->set_handler_table(*table);
}
@@ -191,7 +231,7 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
int* offset_return) {
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- if (object.is_identical_to(info()->context())) {
+ if (object.is_identical_to(info()->context()) && !info()->is_osr()) {
*offset_return = StandardFrameConstants::kContextOffset;
return true;
} else if (object.is_identical_to(info()->closure())) {
@@ -206,7 +246,12 @@ bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
bool CodeGenerator::IsMaterializableFromRoot(
Handle<HeapObject> object, Heap::RootListIndex* index_return) {
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- return isolate()->heap()->GetRootListIndex(object, index_return);
+ RootIndexMap map(isolate());
+ int root_index = map.Lookup(*object);
+ if (root_index != RootIndexMap::kInvalidRootIndex) {
+ *index_return = static_cast<Heap::RootListIndex>(root_index);
+ return true;
+ }
}
return false;
}
@@ -256,11 +301,10 @@ void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
SourcePosition source_position;
if (!code()->GetSourcePosition(instr, &source_position)) return;
if (source_position == current_source_position_) return;
- DCHECK(!source_position.IsInvalid());
current_source_position_ = source_position;
if (source_position.IsUnknown()) return;
int code_pos = source_position.raw();
- masm()->positions_recorder()->RecordPosition(source_position.raw());
+ masm()->positions_recorder()->RecordPosition(code_pos);
masm()->positions_recorder()->WriteRecordedPositions();
if (FLAG_code_comments) {
Vector<char> buffer = Vector<char>::New(256);
@@ -302,7 +346,8 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translation_array);
- data->SetInlinedFunctionCount(Smi::FromInt(0));
+ data->SetInlinedFunctionCount(
+ Smi::FromInt(static_cast<int>(inlined_function_count_)));
data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
// TODO(jarin) The following code was copied over from Lithium, not sure
// whether the scope or the IsOptimizing condition are really needed.
@@ -366,9 +411,9 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (flags & CallDescriptor::kHasExceptionHandler) {
InstructionOperandConverter i(this, instr);
- RpoNumber handler_rpo =
- i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
- handlers_.push_back({GetLabel(handler_rpo), masm()->pc_offset()});
+ bool caught = flags & CallDescriptor::kHasLocalCatchHandler;
+ RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
+ handlers_.push_back({caught, GetLabel(handler_rpo), masm()->pc_offset()});
}
if (flags & CallDescriptor::kNeedsNopAfterCall) {
@@ -427,17 +472,20 @@ FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
return code()->GetFrameStateDescriptor(state_id);
}
-struct OperandAndType {
- OperandAndType(InstructionOperand* operand, MachineType type)
- : operand_(operand), type_(type) {}
- InstructionOperand* operand_;
- MachineType type_;
+namespace {
+
+struct OperandAndType {
+ InstructionOperand* const operand;
+ MachineType const type;
};
-static OperandAndType TypedOperandForFrameState(
- FrameStateDescriptor* descriptor, Instruction* instr,
- size_t frame_state_offset, size_t index, OutputFrameStateCombine combine) {
+
+OperandAndType TypedOperandForFrameState(FrameStateDescriptor* descriptor,
+ Instruction* instr,
+ size_t frame_state_offset,
+ size_t index,
+ OutputFrameStateCombine combine) {
DCHECK(index < descriptor->GetSize(combine));
switch (combine.kind()) {
case OutputFrameStateCombine::kPushOutput: {
@@ -446,8 +494,7 @@ static OperandAndType TypedOperandForFrameState(
descriptor->GetSize(OutputFrameStateCombine::Ignore());
// If the index is past the existing stack items, return the output.
if (index >= size_without_output) {
- return OperandAndType(instr->OutputAt(index - size_without_output),
- kMachAnyTagged);
+ return {instr->OutputAt(index - size_without_output), kMachAnyTagged};
}
break;
}
@@ -456,51 +503,53 @@ static OperandAndType TypedOperandForFrameState(
descriptor->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
if (index >= index_from_top &&
index < index_from_top + instr->OutputCount()) {
- return OperandAndType(instr->OutputAt(index - index_from_top),
- kMachAnyTagged);
+ return {instr->OutputAt(index - index_from_top), kMachAnyTagged};
}
break;
}
- return OperandAndType(instr->InputAt(frame_state_offset + index),
- descriptor->GetType(index));
+ return {instr->InputAt(frame_state_offset + index),
+ descriptor->GetType(index)};
}
+} // namespace
+
void CodeGenerator::BuildTranslationForFrameStateDescriptor(
FrameStateDescriptor* descriptor, Instruction* instr,
Translation* translation, size_t frame_state_offset,
OutputFrameStateCombine state_combine) {
// Outer-most state must be added to translation first.
- if (descriptor->outer_state() != NULL) {
+ if (descriptor->outer_state() != nullptr) {
BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
translation, frame_state_offset,
OutputFrameStateCombine::Ignore());
}
+ frame_state_offset += descriptor->outer_state()->GetTotalSize();
- int id = Translation::kSelfLiteralId;
- if (!descriptor->jsfunction().is_null()) {
- id = DefineDeoptimizationLiteral(
- Handle<Object>::cast(descriptor->jsfunction().ToHandleChecked()));
+ Handle<SharedFunctionInfo> shared_info;
+ if (!descriptor->shared_info().ToHandle(&shared_info)) {
+ shared_info = info()->shared_info();
}
+ int shared_info_id = DefineDeoptimizationLiteral(shared_info);
switch (descriptor->type()) {
- case JS_FRAME:
+ case FrameStateType::kJavaScriptFunction:
translation->BeginJSFrame(
- descriptor->bailout_id(), id,
+ descriptor->bailout_id(), shared_info_id,
static_cast<unsigned int>(descriptor->GetSize(state_combine) -
- descriptor->parameters_count()));
+ (1 + descriptor->parameters_count())));
break;
- case ARGUMENTS_ADAPTOR:
+ case FrameStateType::kArgumentsAdaptor:
translation->BeginArgumentsAdaptorFrame(
- id, static_cast<unsigned int>(descriptor->parameters_count()));
+ shared_info_id,
+ static_cast<unsigned int>(descriptor->parameters_count()));
break;
}
- frame_state_offset += descriptor->outer_state()->GetTotalSize();
for (size_t i = 0; i < descriptor->GetSize(state_combine); i++) {
OperandAndType op = TypedOperandForFrameState(
descriptor, instr, frame_state_offset, i, state_combine);
- AddTranslationForOperand(translation, instr, op.operand_, op.type_);
+ AddTranslationForOperand(translation, instr, op.operand, op.type);
}
}
@@ -577,10 +626,7 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
isolate()->factory()->NewNumberFromInt(constant.ToInt32());
break;
case Constant::kFloat64:
- DCHECK(type == kMachFloat64 || type == kMachAnyTagged ||
- type == kRepTagged || type == (kTypeNumber | kRepTagged) ||
- type == (kTypeInt32 | kRepTagged) ||
- type == (kTypeUint32 | kRepTagged));
+ DCHECK((type & (kRepFloat64 | kRepTagged)) != 0);
constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
break;
case Constant::kHeapObject:
@@ -590,8 +636,12 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
default:
CHECK(false);
}
- int literal_id = DefineDeoptimizationLiteral(constant_object);
- translation->StoreLiteral(literal_id);
+ if (constant_object.is_identical_to(info()->closure())) {
+ translation->StoreJSFrameFunction();
+ } else {
+ int literal_id = DefineDeoptimizationLiteral(constant_object);
+ translation->StoreLiteral(literal_id);
+ }
} else {
CHECK(false);
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 4ffb3dd617..d1545d10b9 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -160,6 +160,7 @@ class CodeGenerator final : public GapResolver::Assembler {
};
struct HandlerInfo {
+ bool caught_locally;
Label* handler;
int pc_offset;
};
@@ -171,6 +172,7 @@ class CodeGenerator final : public GapResolver::Assembler {
InstructionSequence* const code_;
CompilationInfo* const info_;
Label* const labels_;
+ Label return_label_;
RpoNumber current_block_;
SourcePosition current_source_position_;
MacroAssembler masm_;
@@ -179,11 +181,13 @@ class CodeGenerator final : public GapResolver::Assembler {
ZoneVector<HandlerInfo> handlers_;
ZoneDeque<DeoptimizationState*> deoptimization_states_;
ZoneDeque<Handle<Object>> deoptimization_literals_;
+ size_t inlined_function_count_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
JumpTable* jump_tables_;
OutOfLineCode* ools_;
int osr_pc_offset_;
+ bool needs_frame_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 824cdbdab1..c1cd75ef7b 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -7,20 +7,65 @@
#include <algorithm>
#include "src/compiler/common-operator.h"
-#include "src/compiler/js-graph.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
+enum class Decision { kUnknown, kTrue, kFalse };
+
+Decision DecideCondition(Node* const cond) {
+ switch (cond->opcode()) {
+ case IrOpcode::kInt32Constant: {
+ Int32Matcher mcond(cond);
+ return mcond.Value() ? Decision::kTrue : Decision::kFalse;
+ }
+ case IrOpcode::kInt64Constant: {
+ Int64Matcher mcond(cond);
+ return mcond.Value() ? Decision::kTrue : Decision::kFalse;
+ }
+ case IrOpcode::kHeapConstant: {
+ HeapObjectMatcher mcond(cond);
+ return mcond.Value().handle()->BooleanValue() ? Decision::kTrue
+ : Decision::kFalse;
+ }
+ default:
+ return Decision::kUnknown;
+ }
+}
+
+} // namespace
+
+
+CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine)
+ : AdvancedReducer(editor),
+ graph_(graph),
+ common_(common),
+ machine_(machine),
+ dead_(graph->NewNode(common->Dead())) {}
+
+
Reduction CommonOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kBranch:
+ return ReduceBranch(node);
+ case IrOpcode::kMerge:
+ return ReduceMerge(node);
case IrOpcode::kEffectPhi:
return ReduceEffectPhi(node);
case IrOpcode::kPhi:
return ReducePhi(node);
+ case IrOpcode::kReturn:
+ return ReduceReturn(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
default:
@@ -30,46 +75,155 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
}
+Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ Node* const cond = node->InputAt(0);
+ // Swap IfTrue/IfFalse on {branch} if {cond} is a BooleanNot and use the input
+ // to BooleanNot as new condition for {branch}. Note we assume that {cond} was
+ // already properly optimized before we get here (as guaranteed by the graph
+ // reduction logic).
+ if (cond->opcode() == IrOpcode::kBooleanNot) {
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ use->set_op(common()->IfFalse());
+ break;
+ case IrOpcode::kIfFalse:
+ use->set_op(common()->IfTrue());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ // Update the condition of {branch}. No need to mark the uses for revisit,
+ // since we tell the graph reducer that the {branch} was changed and the
+ // graph reduction logic will ensure that the uses are revisited properly.
+ node->ReplaceInput(0, cond->InputAt(0));
+ // Negate the hint for {branch}.
+ node->set_op(common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
+ return Changed(node);
+ }
+ Decision const decision = DecideCondition(cond);
+ if (decision == Decision::kUnknown) return NoChange();
+ Node* const control = node->InputAt(1);
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ Replace(use, (decision == Decision::kTrue) ? control : dead());
+ break;
+ case IrOpcode::kIfFalse:
+ Replace(use, (decision == Decision::kFalse) ? control : dead());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return Replace(dead());
+}
+
+
+Reduction CommonOperatorReducer::ReduceMerge(Node* node) {
+ DCHECK_EQ(IrOpcode::kMerge, node->opcode());
+ //
+ // Check if this is a merge that belongs to an unused diamond, which means
+ // that:
+ //
+ // a) the {Merge} has no {Phi} or {EffectPhi} uses, and
+ // b) the {Merge} has two inputs, one {IfTrue} and one {IfFalse}, which are
+ // both owned by the Merge, and
+ // c) and the {IfTrue} and {IfFalse} nodes point to the same {Branch}.
+ //
+ if (node->InputCount() == 2) {
+ for (Node* const use : node->uses()) {
+ if (IrOpcode::IsPhiOpcode(use->opcode())) return NoChange();
+ }
+ Node* if_true = node->InputAt(0);
+ Node* if_false = node->InputAt(1);
+ if (if_true->opcode() != IrOpcode::kIfTrue) std::swap(if_true, if_false);
+ if (if_true->opcode() == IrOpcode::kIfTrue &&
+ if_false->opcode() == IrOpcode::kIfFalse &&
+ if_true->InputAt(0) == if_false->InputAt(0) && if_true->OwnedBy(node) &&
+ if_false->OwnedBy(node)) {
+ Node* const branch = if_true->InputAt(0);
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ DCHECK(branch->OwnedBy(if_true, if_false));
+ Node* const control = branch->InputAt(1);
+ // Mark the {branch} as {Dead}.
+ branch->set_op(common()->Dead());
+ branch->TrimInputCount(0);
+ return Replace(control);
+ }
+ }
+ return NoChange();
+}
+
+
Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
- int const input_count = node->InputCount();
- if (input_count > 1) {
- Node* const replacement = node->InputAt(0);
- for (int i = 1; i < input_count - 1; ++i) {
- if (node->InputAt(i) != replacement) return NoChange();
+ int const input_count = node->InputCount() - 1;
+ DCHECK_LE(1, input_count);
+ Node* const merge = node->InputAt(input_count);
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ DCHECK_EQ(input_count, merge->InputCount());
+ Node* const effect = node->InputAt(0);
+ DCHECK_NE(node, effect);
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = node->InputAt(i);
+ if (input == node) {
+ // Ignore redundant inputs.
+ DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
+ continue;
}
- return Replace(replacement);
+ if (input != effect) return NoChange();
}
- return NoChange();
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Replace(effect);
}
Reduction CommonOperatorReducer::ReducePhi(Node* node) {
DCHECK_EQ(IrOpcode::kPhi, node->opcode());
- int const input_count = node->InputCount();
- if (input_count == 3) {
- Node* vtrue = NodeProperties::GetValueInput(node, 0);
- Node* vfalse = NodeProperties::GetValueInput(node, 1);
- Node* merge = NodeProperties::GetControlInput(node);
- DiamondMatcher matcher(merge);
- if (matcher.Matched()) {
- if (matcher.IfTrue() == merge->InputAt(1)) std::swap(vtrue, vfalse);
- Node* cond = matcher.Branch()->InputAt(0);
+ int const input_count = node->InputCount() - 1;
+ DCHECK_LE(1, input_count);
+ Node* const merge = node->InputAt(input_count);
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ DCHECK_EQ(input_count, merge->InputCount());
+ if (input_count == 2) {
+ Node* vtrue = node->InputAt(0);
+ Node* vfalse = node->InputAt(1);
+ Node* if_true = merge->InputAt(0);
+ Node* if_false = merge->InputAt(1);
+ if (if_true->opcode() != IrOpcode::kIfTrue) {
+ std::swap(if_true, if_false);
+ std::swap(vtrue, vfalse);
+ }
+ if (if_true->opcode() == IrOpcode::kIfTrue &&
+ if_false->opcode() == IrOpcode::kIfFalse &&
+ if_true->InputAt(0) == if_false->InputAt(0)) {
+ Node* const branch = if_true->InputAt(0);
+ Node* const cond = branch->InputAt(0);
if (cond->opcode() == IrOpcode::kFloat32LessThan) {
Float32BinopMatcher mcond(cond);
if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
vfalse->opcode() == IrOpcode::kFloat32Sub) {
Float32BinopMatcher mvfalse(vfalse);
if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
return Change(node, machine()->Float32Abs(), vtrue);
}
}
if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
- machine()->HasFloat32Min()) {
- return Change(node, machine()->Float32Min(), vtrue, vfalse);
+ machine()->Float32Min().IsSupported()) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float32Min().op(), vtrue, vfalse);
} else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
- machine()->HasFloat32Max()) {
- return Change(node, machine()->Float32Max(), vtrue, vfalse);
+ machine()->Float32Max().IsSupported()) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float32Max().op(), vtrue, vfalse);
}
} else if (cond->opcode() == IrOpcode::kFloat64LessThan) {
Float64BinopMatcher mcond(cond);
@@ -77,25 +231,72 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
vfalse->opcode() == IrOpcode::kFloat64Sub) {
Float64BinopMatcher mvfalse(vfalse);
if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
return Change(node, machine()->Float64Abs(), vtrue);
}
}
if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
- machine()->HasFloat64Min()) {
- return Change(node, machine()->Float64Min(), vtrue, vfalse);
+ machine()->Float64Min().IsSupported()) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float64Min().op(), vtrue, vfalse);
} else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
- machine()->HasFloat64Max()) {
- return Change(node, machine()->Float64Max(), vtrue, vfalse);
+ machine()->Float64Max().IsSupported()) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float64Max().op(), vtrue, vfalse);
}
}
}
}
- if (input_count > 1) {
- Node* const replacement = node->InputAt(0);
- for (int i = 1; i < input_count - 1; ++i) {
- if (node->InputAt(i) != replacement) return NoChange();
+ Node* const value = node->InputAt(0);
+ DCHECK_NE(node, value);
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = node->InputAt(i);
+ if (input == node) {
+ // Ignore redundant inputs.
+ DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
+ continue;
+ }
+ if (input != value) return NoChange();
+ }
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Replace(value);
+}
+
+
+Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
+ DCHECK_EQ(IrOpcode::kReturn, node->opcode());
+ Node* const value = node->InputAt(0);
+ Node* const effect = node->InputAt(1);
+ Node* const control = node->InputAt(2);
+ if (value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(value) == control &&
+ effect->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(effect) == control &&
+ control->opcode() == IrOpcode::kMerge) {
+ int const control_input_count = control->InputCount();
+ DCHECK_NE(0, control_input_count);
+ DCHECK_EQ(control_input_count, value->InputCount() - 1);
+ DCHECK_EQ(control_input_count, effect->InputCount() - 1);
+ Node* const end = graph()->end();
+ DCHECK_EQ(IrOpcode::kEnd, end->opcode());
+ DCHECK_NE(0, end->InputCount());
+ for (int i = 0; i < control_input_count; ++i) {
+ // Create a new {Return} and connect it to {end}. We don't need to mark
+ // {end} as revisit, because we mark {node} as {Dead} below, which was
+ // previously connected to {end}, so we know for sure that at some point
+ // the reducer logic will visit {end} again.
+ Node* ret = graph()->NewNode(common()->Return(), value->InputAt(i),
+ effect->InputAt(i), control->InputAt(i));
+ end->set_op(common()->End(end->InputCount() + 1));
+ end->AppendInput(graph()->zone(), ret);
}
- return Replace(replacement);
+ // Mark the merge {control} and return {node} as {dead}.
+ Replace(control, dead());
+ return Replace(dead());
}
return NoChange();
}
@@ -103,15 +304,19 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
DCHECK_EQ(IrOpcode::kSelect, node->opcode());
- Node* cond = NodeProperties::GetValueInput(node, 0);
- Node* vtrue = NodeProperties::GetValueInput(node, 1);
- Node* vfalse = NodeProperties::GetValueInput(node, 2);
+ Node* const cond = node->InputAt(0);
+ Node* const vtrue = node->InputAt(1);
+ Node* const vfalse = node->InputAt(2);
if (vtrue == vfalse) return Replace(vtrue);
+ switch (DecideCondition(cond)) {
+ case Decision::kTrue:
+ return Replace(vtrue);
+ case Decision::kFalse:
+ return Replace(vfalse);
+ case Decision::kUnknown:
+ break;
+ }
switch (cond->opcode()) {
- case IrOpcode::kHeapConstant: {
- HeapObjectMatcher<HeapObject> mcond(cond);
- return Replace(mcond.Value().handle()->BooleanValue() ? vtrue : vfalse);
- }
case IrOpcode::kFloat32LessThan: {
Float32BinopMatcher mcond(cond);
if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
@@ -122,11 +327,11 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
}
}
if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
- machine()->HasFloat32Min()) {
- return Change(node, machine()->Float32Min(), vtrue, vfalse);
+ machine()->Float32Min().IsSupported()) {
+ return Change(node, machine()->Float32Min().op(), vtrue, vfalse);
} else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
- machine()->HasFloat32Max()) {
- return Change(node, machine()->Float32Max(), vtrue, vfalse);
+ machine()->Float32Max().IsSupported()) {
+ return Change(node, machine()->Float32Max().op(), vtrue, vfalse);
}
break;
}
@@ -140,11 +345,11 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
}
}
if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
- machine()->HasFloat64Min()) {
- return Change(node, machine()->Float64Min(), vtrue, vfalse);
+ machine()->Float64Min().IsSupported()) {
+ return Change(node, machine()->Float64Min().op(), vtrue, vfalse);
} else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
- machine()->HasFloat64Max()) {
- return Change(node, machine()->Float64Max(), vtrue, vfalse);
+ machine()->Float64Max().IsSupported()) {
+ return Change(node, machine()->Float64Max().op(), vtrue, vfalse);
}
break;
}
@@ -173,19 +378,6 @@ Reduction CommonOperatorReducer::Change(Node* node, Operator const* op, Node* a,
return Changed(node);
}
-
-CommonOperatorBuilder* CommonOperatorReducer::common() const {
- return jsgraph()->common();
-}
-
-
-Graph* CommonOperatorReducer::graph() const { return jsgraph()->graph(); }
-
-
-MachineOperatorBuilder* CommonOperatorReducer::machine() const {
- return jsgraph()->machine();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 851971fac8..8582d6b633 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -14,33 +14,40 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
class Graph;
-class JSGraph;
class MachineOperatorBuilder;
class Operator;
// Performs strength reduction on nodes that have common operators.
-class CommonOperatorReducer final : public Reducer {
+class CommonOperatorReducer final : public AdvancedReducer {
public:
- explicit CommonOperatorReducer(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
+ CommonOperatorReducer(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine);
~CommonOperatorReducer() final {}
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceBranch(Node* node);
+ Reduction ReduceMerge(Node* node);
Reduction ReduceEffectPhi(Node* node);
Reduction ReducePhi(Node* node);
+ Reduction ReduceReturn(Node* node);
Reduction ReduceSelect(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
- CommonOperatorBuilder* common() const;
- Graph* graph() const;
- JSGraph* jsgraph() const { return jsgraph_; }
- MachineOperatorBuilder* machine() const;
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ Node* dead() const { return dead_; }
- JSGraph* const jsgraph_;
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ MachineOperatorBuilder* const machine_;
+ Node* const dead_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 7e88134a24..ac1f754575 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -36,6 +36,21 @@ BranchHint BranchHintOf(const Operator* const op) {
}
+size_t hash_value(IfExceptionHint hint) { return static_cast<size_t>(hint); }
+
+
+std::ostream& operator<<(std::ostream& os, IfExceptionHint hint) {
+ switch (hint) {
+ case IfExceptionHint::kLocallyCaught:
+ return os << "Caught";
+ case IfExceptionHint::kLocallyUncaught:
+ return os << "Uncaught";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
return lhs.type() == rhs.type() && lhs.hint() == rhs.hint();
}
@@ -101,21 +116,30 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
#define CACHED_OP_LIST(V) \
- V(Dead, Operator::kFoldable, 0, 0, 0, 0, 0, 1) \
- V(End, Operator::kKontrol, 0, 0, 1, 0, 0, 0) \
+ V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(IfException, Operator::kKontrol, 0, 0, 1, 1, 0, 1) \
V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
V(Deoptimize, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
V(Return, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
- V(Terminate, Operator::kNoThrow, 0, 1, 1, 0, 0, 1) \
+ V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)
+#define CACHED_END_LIST(V) \
+ V(1) \
+ V(2) \
+ V(3) \
+ V(4) \
+ V(5) \
+ V(6) \
+ V(7) \
+ V(8)
+
+
#define CACHED_EFFECT_PHI_LIST(V) \
V(1) \
V(2) \
@@ -200,6 +224,31 @@ struct CommonOperatorGlobalCache final {
CACHED_OP_LIST(CACHED)
#undef CACHED
+ template <IfExceptionHint kCaughtLocally>
+ struct IfExceptionOperator final : public Operator1<IfExceptionHint> {
+ IfExceptionOperator()
+ : Operator1<IfExceptionHint>( // --
+ IrOpcode::kIfException, Operator::kKontrol, // opcode
+ "IfException", // name
+ 0, 1, 1, 1, 1, 1, // counts
+ kCaughtLocally) {} // parameter
+ };
+ IfExceptionOperator<IfExceptionHint::kLocallyCaught> kIfExceptionCOperator;
+ IfExceptionOperator<IfExceptionHint::kLocallyUncaught> kIfExceptionUOperator;
+
+ template <size_t kInputCount>
+ struct EndOperator final : public Operator {
+ EndOperator()
+ : Operator( // --
+ IrOpcode::kEnd, Operator::kKontrol, // opcode
+ "End", // name
+ 0, 0, kInputCount, 0, 0, 0) {} // counts
+ };
+#define CACHED_END(input_count) \
+ EndOperator<input_count> kEnd##input_count##Operator;
+ CACHED_END_LIST(CACHED_END)
+#undef CACHED_END
+
template <BranchHint kBranchHint>
struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
@@ -329,6 +378,25 @@ CACHED_OP_LIST(CACHED)
#undef CACHED
+const Operator* CommonOperatorBuilder::End(size_t control_input_count) {
+ DCHECK_NE(0u, control_input_count); // Disallow empty ends.
+ switch (control_input_count) {
+#define CACHED_END(input_count) \
+ case input_count: \
+ return &cache_.kEnd##input_count##Operator;
+ CACHED_END_LIST(CACHED_END)
+#undef CACHED_END
+ default:
+ break;
+ }
+ // Uncached.
+ return new (zone()) Operator( //--
+ IrOpcode::kEnd, Operator::kKontrol, // opcode
+ "End", // name
+ 0, 0, control_input_count, 0, 0, 0); // counts
+}
+
+
const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
switch (hint) {
case BranchHint::kNone:
@@ -343,6 +411,18 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
}
+const Operator* CommonOperatorBuilder::IfException(IfExceptionHint hint) {
+ switch (hint) {
+ case IfExceptionHint::kLocallyCaught:
+ return &cache_.kIfExceptionCOperator;
+ case IfExceptionHint::kLocallyUncaught:
+ return &cache_.kIfExceptionUOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
DCHECK_GE(control_output_count, 3u); // Disallow trivial switches.
return new (zone()) Operator( // --
@@ -361,9 +441,7 @@ const Operator* CommonOperatorBuilder::IfValue(int32_t index) {
}
-const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
- // Outputs are formal parameters, plus context, receiver, and JSFunction.
- const int value_output_count = num_formal_parameters + 3;
+const Operator* CommonOperatorBuilder::Start(int value_output_count) {
return new (zone()) Operator( // --
IrOpcode::kStart, Operator::kFoldable, // opcode
"Start", // name
@@ -608,13 +686,14 @@ const Operator* CommonOperatorBuilder::TypedStateValues(
const Operator* CommonOperatorBuilder::FrameState(
- FrameStateType type, BailoutId bailout_id,
- OutputFrameStateCombine state_combine, MaybeHandle<JSFunction> jsfunction) {
- return new (zone()) Operator1<FrameStateCallInfo>( // --
- IrOpcode::kFrameState, Operator::kPure, // opcode
- "FrameState", // name
- 4, 0, 0, 1, 0, 0, // counts
- FrameStateCallInfo(type, bailout_id, state_combine, jsfunction));
+ BailoutId bailout_id, OutputFrameStateCombine state_combine,
+ const FrameStateFunctionInfo* function_info) {
+ FrameStateInfo state_info(bailout_id, state_combine, function_info);
+ return new (zone()) Operator1<FrameStateInfo>( // --
+ IrOpcode::kFrameState, Operator::kPure, // opcode
+ "FrameState", // name
+ 5, 0, 0, 1, 0, 0, // counts
+ state_info); // parameter
}
@@ -694,6 +773,14 @@ const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
}
+const FrameStateFunctionInfo*
+CommonOperatorBuilder::CreateFrameStateFunctionInfo(
+ FrameStateType type, int parameter_count, int local_count,
+ Handle<SharedFunctionInfo> shared_info) {
+ return new (zone()->New(sizeof(FrameStateFunctionInfo)))
+ FrameStateFunctionInfo(type, parameter_count, local_count, shared_info);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index eea93717b0..d9e5f85b9e 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -27,6 +27,19 @@ class Operator;
// Prediction hint for branches.
enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
+inline BranchHint NegateBranchHint(BranchHint hint) {
+ switch (hint) {
+ case BranchHint::kNone:
+ return hint;
+ case BranchHint::kTrue:
+ return BranchHint::kFalse;
+ case BranchHint::kFalse:
+ return BranchHint::kTrue;
+ }
+ UNREACHABLE();
+ return hint;
+}
+
inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
std::ostream& operator<<(std::ostream&, BranchHint);
@@ -34,6 +47,14 @@ std::ostream& operator<<(std::ostream&, BranchHint);
BranchHint BranchHintOf(const Operator* const);
+// Prediction whether throw-site is surrounded by any local catch-scope.
+enum class IfExceptionHint { kLocallyUncaught, kLocallyCaught };
+
+size_t hash_value(IfExceptionHint hint);
+
+std::ostream& operator<<(std::ostream&, IfExceptionHint);
+
+
class SelectParameters final {
public:
explicit SelectParameters(MachineType type,
@@ -89,12 +110,12 @@ class CommonOperatorBuilder final : public ZoneObject {
explicit CommonOperatorBuilder(Zone* zone);
const Operator* Dead();
- const Operator* End();
+ const Operator* End(size_t control_input_count);
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
- const Operator* IfException();
+ const Operator* IfException(IfExceptionHint hint);
const Operator* Switch(size_t control_output_count);
const Operator* IfValue(int32_t value);
const Operator* IfDefault();
@@ -103,7 +124,7 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* Return();
const Operator* Terminate();
- const Operator* Start(int num_formal_parameters);
+ const Operator* Start(int value_output_count);
const Operator* Loop(int control_input_count);
const Operator* Merge(int control_input_count);
const Operator* Parameter(int index, const char* debug_name = nullptr);
@@ -128,10 +149,9 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* Finish(int arguments);
const Operator* StateValues(int arguments);
const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
- const Operator* FrameState(
- FrameStateType type, BailoutId bailout_id,
- OutputFrameStateCombine state_combine,
- MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>());
+ const Operator* FrameState(BailoutId bailout_id,
+ OutputFrameStateCombine state_combine,
+ const FrameStateFunctionInfo* function_info);
const Operator* Call(const CallDescriptor* descriptor);
const Operator* TailCall(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
@@ -140,6 +160,11 @@ class CommonOperatorBuilder final : public ZoneObject {
// with {size} inputs.
const Operator* ResizeMergeOrPhi(const Operator* op, int size);
+ // Constructs function info for frame state construction.
+ const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
+ FrameStateType type, int parameter_count, int local_count,
+ Handle<SharedFunctionInfo> shared_info);
+
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/control-builders.cc b/deps/v8/src/compiler/control-builders.cc
index 0e4f1683b8..3579828355 100644
--- a/deps/v8/src/compiler/control-builders.cc
+++ b/deps/v8/src/compiler/control-builders.cc
@@ -143,6 +143,16 @@ void BlockBuilder::Break() {
}
+void BlockBuilder::BreakWhen(Node* condition, BranchHint hint) {
+ IfBuilder control_if(builder_);
+ control_if.If(condition, hint);
+ control_if.Then();
+ Break();
+ control_if.Else();
+ control_if.End();
+}
+
+
void BlockBuilder::EndBlock() {
break_environment_->Merge(environment());
set_environment(break_environment_);
@@ -150,6 +160,7 @@ void BlockBuilder::EndBlock() {
void TryCatchBuilder::BeginTry() {
+ exit_environment_ = environment()->CopyAsUnreachable();
catch_environment_ = environment()->CopyAsUnreachable();
catch_environment_->Push(the_hole());
}
@@ -164,7 +175,7 @@ void TryCatchBuilder::Throw(Node* exception) {
void TryCatchBuilder::EndTry() {
- exit_environment_ = environment();
+ exit_environment_->Merge(environment());
exception_node_ = catch_environment_->Pop();
set_environment(catch_environment_);
}
diff --git a/deps/v8/src/compiler/control-builders.h b/deps/v8/src/compiler/control-builders.h
index c85714ea10..9f3afce836 100644
--- a/deps/v8/src/compiler/control-builders.h
+++ b/deps/v8/src/compiler/control-builders.h
@@ -131,6 +131,9 @@ class BlockBuilder final : public ControlBuilder {
// Primitive support for break.
void Break() final;
+ // Compound control commands for conditional break.
+ void BreakWhen(Node* condition, BranchHint = BranchHint::kNone);
+
private:
Environment* break_environment_; // Environment after the block exits.
};
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index c2198046e3..25e183e1f6 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -4,7 +4,8 @@
#include "src/compiler/control-flow-optimizer.h"
-#include "src/compiler/js-graph.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -12,10 +13,15 @@ namespace v8 {
namespace internal {
namespace compiler {
-ControlFlowOptimizer::ControlFlowOptimizer(JSGraph* jsgraph, Zone* zone)
- : jsgraph_(jsgraph),
+ControlFlowOptimizer::ControlFlowOptimizer(Graph* graph,
+ CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine,
+ Zone* zone)
+ : graph_(graph),
+ common_(common),
+ machine_(machine),
queue_(zone),
- queued_(jsgraph->graph(), 2),
+ queued_(graph, 2),
zone_(zone) {}
@@ -267,19 +273,6 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
return true;
}
-
-CommonOperatorBuilder* ControlFlowOptimizer::common() const {
- return jsgraph()->common();
-}
-
-
-Graph* ControlFlowOptimizer::graph() const { return jsgraph()->graph(); }
-
-
-MachineOperatorBuilder* ControlFlowOptimizer::machine() const {
- return jsgraph()->machine();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index 82f672a2a0..f72fa58ad7 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -15,14 +15,14 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
class Graph;
-class JSGraph;
class MachineOperatorBuilder;
class Node;
class ControlFlowOptimizer final {
public:
- ControlFlowOptimizer(JSGraph* jsgraph, Zone* zone);
+ ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine, Zone* zone);
void Optimize();
@@ -34,13 +34,14 @@ class ControlFlowOptimizer final {
bool TryBuildSwitch(Node* node);
bool TryCloneBranch(Node* node);
- CommonOperatorBuilder* common() const;
- Graph* graph() const;
- JSGraph* jsgraph() const { return jsgraph_; }
- MachineOperatorBuilder* machine() const;
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
Zone* zone() const { return zone_; }
- JSGraph* const jsgraph_;
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ MachineOperatorBuilder* const machine_;
ZoneQueue<Node*> queue_;
NodeMarker<bool> queued_;
Zone* const zone_;
diff --git a/deps/v8/src/compiler/control-reducer.cc b/deps/v8/src/compiler/control-reducer.cc
deleted file mode 100644
index 6910e6c043..0000000000
--- a/deps/v8/src/compiler/control-reducer.cc
+++ /dev/null
@@ -1,603 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/control-reducer.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-marker.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#define TRACE(...) \
- do { \
- if (FLAG_trace_turbo_reduction) PrintF(__VA_ARGS__); \
- } while (false)
-
-enum VisitState { kUnvisited = 0, kOnStack = 1, kRevisit = 2, kVisited = 3 };
-enum Decision { kFalse, kUnknown, kTrue };
-
-class ReachabilityMarker : public NodeMarker<uint8_t> {
- public:
- explicit ReachabilityMarker(Graph* graph) : NodeMarker<uint8_t>(graph, 8) {}
- bool SetReachableFromEnd(Node* node) {
- uint8_t before = Get(node);
- Set(node, before | kFromEnd);
- return before & kFromEnd;
- }
- bool IsReachableFromEnd(Node* node) { return Get(node) & kFromEnd; }
- bool SetReachableFromStart(Node* node) {
- uint8_t before = Get(node);
- Set(node, before | kFromStart);
- return before & kFromStart;
- }
- bool IsReachableFromStart(Node* node) { return Get(node) & kFromStart; }
- void Push(Node* node) { Set(node, Get(node) | kFwStack); }
- void Pop(Node* node) { Set(node, Get(node) & ~kFwStack); }
- bool IsOnStack(Node* node) { return Get(node) & kFwStack; }
-
- private:
- enum Bit { kFromEnd = 1, kFromStart = 2, kFwStack = 4 };
-};
-
-
-class ControlReducerImpl final : public AdvancedReducer {
- public:
- Zone* zone_;
- JSGraph* jsgraph_;
- int max_phis_for_select_;
-
- ControlReducerImpl(Editor* editor, Zone* zone, JSGraph* jsgraph)
- : AdvancedReducer(editor),
- zone_(zone),
- jsgraph_(jsgraph),
- max_phis_for_select_(0) {}
-
- Graph* graph() { return jsgraph_->graph(); }
- CommonOperatorBuilder* common() { return jsgraph_->common(); }
- Node* dead() { return jsgraph_->DeadControl(); }
-
- // Finish reducing the graph by trimming nodes and/or connecting NTLs.
- bool Finish() final {
- bool done = true;
- // Gather all nodes backwards-reachable from end (through inputs).
- ReachabilityMarker marked(graph());
- NodeVector nodes(zone_);
- AddNodesReachableFromRoots(marked, nodes);
-
- // Walk forward through control nodes, looking for back edges to nodes
- // that are not connected to end. Those are non-terminating loops (NTLs).
- Node* start = graph()->start();
- marked.Push(start);
- marked.SetReachableFromStart(start);
-
- // We use a stack of (Node, Node::UseEdges::iterator) pairs to avoid
- // O(n^2) traversal.
- typedef std::pair<Node*, Node::UseEdges::iterator> FwIter;
- ZoneVector<FwIter> fw_stack(zone_);
- fw_stack.push_back(FwIter(start, start->use_edges().begin()));
-
- while (!fw_stack.empty()) {
- Node* node = fw_stack.back().first;
- TRACE("ControlFw: #%d:%s\n", node->id(), node->op()->mnemonic());
- bool pop = true;
- while (fw_stack.back().second != node->use_edges().end()) {
- Edge edge = *(fw_stack.back().second);
- Node* succ = edge.from();
- if (NodeProperties::IsControlEdge(edge) &&
- succ->op()->ControlOutputCount() > 0) {
- // Only walk control edges to control nodes.
- if (marked.IsOnStack(succ) && !marked.IsReachableFromEnd(succ)) {
- // {succ} is on stack and not reachable from end.
- Node* added = ConnectNTL(succ);
- nodes.push_back(added);
- marked.SetReachableFromEnd(added);
- AddBackwardsReachableNodes(marked, nodes, nodes.size() - 1);
-
- // Reset the use iterators for the entire stack.
- for (size_t i = 0; i < fw_stack.size(); i++) {
- FwIter& iter = fw_stack[i];
- fw_stack[i] = FwIter(iter.first, iter.first->use_edges().begin());
- }
- pop = false; // restart traversing successors of this node.
- break;
- }
- if (!marked.IsReachableFromStart(succ)) {
- // {succ} is not yet reached from start.
- marked.SetReachableFromStart(succ);
- if (succ->opcode() != IrOpcode::kOsrLoopEntry) {
- // Skip OsrLoopEntry; forms a confusing irredducible loop.
- marked.Push(succ);
- fw_stack.push_back(FwIter(succ, succ->use_edges().begin()));
- pop = false; // "recurse" into successor control node.
- break;
- }
- }
- }
- ++fw_stack.back().second;
- }
- if (pop) {
- marked.Pop(node);
- fw_stack.pop_back();
- }
- }
-
- // Trim references from dead nodes to live nodes first.
- TrimNodes(marked, nodes);
-
- // Any control nodes not reachable from start are dead, even loops.
- for (size_t i = 0; i < nodes.size(); i++) {
- Node* node = nodes[i];
- if (node->op()->ControlOutputCount() > 0 &&
- !marked.IsReachableFromStart(node) &&
- node->opcode() != IrOpcode::kDead) {
- TRACE("Dead: #%d:%s\n", node->id(), node->op()->mnemonic());
- node->ReplaceUses(dead());
- done = false;
- }
- }
-
- return done;
- }
-
- // Connect {loop}, the header of a non-terminating loop, to the end node.
- Node* ConnectNTL(Node* loop) {
- TRACE("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic());
- DCHECK_EQ(IrOpcode::kLoop, loop->opcode());
-
- // Collect all loop effects.
- NodeVector effects(zone_);
- for (auto edge : loop->use_edges()) {
- DCHECK_EQ(loop, edge.to());
- DCHECK(NodeProperties::IsControlEdge(edge));
- switch (edge.from()->opcode()) {
- case IrOpcode::kPhi:
- break;
- case IrOpcode::kEffectPhi:
- effects.push_back(edge.from());
- break;
- default:
- break;
- }
- }
-
- // Compute effects for the Return.
- Node* effect = graph()->start();
- int const effects_count = static_cast<int>(effects.size());
- if (effects_count == 1) {
- effect = effects[0];
- } else if (effects_count > 1) {
- effect = graph()->NewNode(common()->EffectSet(effects_count),
- effects_count, &effects.front());
- }
-
- // Add a terminate to connect the NTL to the end.
- Node* terminate = graph()->NewNode(common()->Terminate(), effect, loop);
-
- Node* end = graph()->end();
- if (end->opcode() == IrOpcode::kDead) {
- // End is actually the dead node. Make a new end.
- end = graph()->NewNode(common()->End(), terminate);
- graph()->SetEnd(end);
- return end;
- }
- // End is not dead.
- Node* merge = end->InputAt(0);
- if (merge == NULL || merge->opcode() == IrOpcode::kDead) {
- // The end node died; just connect end to {terminate}.
- end->ReplaceInput(0, terminate);
- } else if (merge->opcode() != IrOpcode::kMerge) {
- // Introduce a final merge node for {end->InputAt(0)} and {terminate}.
- merge = graph()->NewNode(common()->Merge(2), merge, terminate);
- end->ReplaceInput(0, merge);
- terminate = merge;
- } else {
- // Append a new input to the final merge at the end.
- merge->AppendInput(graph()->zone(), terminate);
- merge->set_op(common()->Merge(merge->InputCount()));
- }
- return terminate;
- }
-
- void AddNodesReachableFromRoots(ReachabilityMarker& marked,
- NodeVector& nodes) {
- jsgraph_->GetCachedNodes(&nodes); // Consider cached nodes roots.
- Node* end = graph()->end();
- marked.SetReachableFromEnd(end);
- if (!end->IsDead()) nodes.push_back(end); // Consider end to be a root.
- for (Node* node : nodes) marked.SetReachableFromEnd(node);
- AddBackwardsReachableNodes(marked, nodes, 0);
- }
-
- void AddBackwardsReachableNodes(ReachabilityMarker& marked, NodeVector& nodes,
- size_t cursor) {
- while (cursor < nodes.size()) {
- Node* node = nodes[cursor++];
- for (Node* const input : node->inputs()) {
- if (!marked.SetReachableFromEnd(input)) {
- nodes.push_back(input);
- }
- }
- }
- }
-
- void Trim() {
- // Gather all nodes backwards-reachable from end through inputs.
- ReachabilityMarker marked(graph());
- NodeVector nodes(zone_);
- jsgraph_->GetCachedNodes(&nodes);
- AddNodesReachableFromRoots(marked, nodes);
- TrimNodes(marked, nodes);
- }
-
- void TrimNodes(ReachabilityMarker& marked, NodeVector& nodes) {
- // Remove dead->live edges.
- for (size_t j = 0; j < nodes.size(); j++) {
- Node* node = nodes[j];
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- if (!marked.IsReachableFromEnd(use)) {
- TRACE("DeadLink: #%d:%s(%d) -> #%d:%s\n", use->id(),
- use->op()->mnemonic(), edge.index(), node->id(),
- node->op()->mnemonic());
- edge.UpdateTo(NULL);
- }
- }
- }
-#if DEBUG
- // Verify that no inputs to live nodes are NULL.
- for (Node* node : nodes) {
- for (int index = 0; index < node->InputCount(); index++) {
- Node* input = node->InputAt(index);
- if (input == nullptr) {
- std::ostringstream str;
- str << "GraphError: node #" << node->id() << ":" << *node->op()
- << "(input @" << index << ") == null";
- FATAL(str.str().c_str());
- }
- if (input->opcode() == IrOpcode::kDead) {
- std::ostringstream str;
- str << "GraphError: node #" << node->id() << ":" << *node->op()
- << "(input @" << index << ") == dead";
- FATAL(str.str().c_str());
- }
- }
- for (Node* use : node->uses()) {
- CHECK(marked.IsReachableFromEnd(use));
- }
- }
-#endif
- }
-
- //===========================================================================
- // Reducer implementation: perform reductions on a node.
- //===========================================================================
- Reduction Reduce(Node* node) override {
- if (node->op()->ControlInputCount() == 1 ||
- node->opcode() == IrOpcode::kLoop) {
- // If a node has only one control input and it is dead, replace with dead.
- Node* control = NodeProperties::GetControlInput(node);
- if (control->opcode() == IrOpcode::kDead) {
- TRACE("ControlDead: #%d:%s\n", node->id(), node->op()->mnemonic());
- return Replace(control);
- }
- }
-
- Node* result = node;
- // Reduce branches, phis, and merges.
- switch (node->opcode()) {
- case IrOpcode::kBranch:
- result = ReduceBranch(node);
- break;
- case IrOpcode::kIfTrue:
- result = ReduceIfProjection(node, kTrue);
- break;
- case IrOpcode::kIfFalse:
- result = ReduceIfProjection(node, kFalse);
- break;
- case IrOpcode::kLoop: // fallthrough
- case IrOpcode::kMerge:
- result = ReduceMerge(node);
- break;
- case IrOpcode::kSelect:
- result = ReduceSelect(node);
- break;
- case IrOpcode::kPhi:
- case IrOpcode::kEffectPhi:
- result = ReducePhi(node);
- break;
- default:
- break;
- }
-
- return result == node ? NoChange() : Replace(result);
- }
-
- // Try to statically fold a condition.
- Decision DecideCondition(Node* cond, bool recurse = true) {
- switch (cond->opcode()) {
- case IrOpcode::kInt32Constant:
- return Int32Matcher(cond).Is(0) ? kFalse : kTrue;
- case IrOpcode::kInt64Constant:
- return Int64Matcher(cond).Is(0) ? kFalse : kTrue;
- case IrOpcode::kNumberConstant:
- return NumberMatcher(cond).Is(0) ? kFalse : kTrue;
- case IrOpcode::kHeapConstant: {
- Handle<Object> object =
- HeapObjectMatcher<Object>(cond).Value().handle();
- return object->BooleanValue() ? kTrue : kFalse;
- }
- case IrOpcode::kPhi: {
- if (!recurse) return kUnknown; // Only go one level deep checking phis.
- Decision result = kUnknown;
- // Check if all inputs to a phi result in the same decision.
- for (int i = cond->op()->ValueInputCount() - 1; i >= 0; i--) {
- // Recurse only one level, since phis can be involved in cycles.
- Decision decision = DecideCondition(cond->InputAt(i), false);
- if (decision == kUnknown) return kUnknown;
- if (result == kUnknown) result = decision;
- if (result != decision) return kUnknown;
- }
- return result;
- }
- default:
- break;
- }
- if (NodeProperties::IsTyped(cond)) {
- // If the node has a range type, check whether the range excludes 0.
- Type* type = NodeProperties::GetBounds(cond).upper;
- if (type->IsRange() && (type->Min() > 0 || type->Max() < 0)) return kTrue;
- }
- return kUnknown;
- }
-
- // Reduce redundant selects.
- Node* ReduceSelect(Node* const node) {
- Node* const tvalue = node->InputAt(1);
- Node* const fvalue = node->InputAt(2);
- if (tvalue == fvalue) return tvalue;
- Decision result = DecideCondition(node->InputAt(0));
- if (result == kTrue) return tvalue;
- if (result == kFalse) return fvalue;
- return node;
- }
-
- // Reduce redundant phis.
- Node* ReducePhi(Node* node) {
- int n = node->InputCount();
- if (n <= 1) return dead(); // No non-control inputs.
- if (n == 2) return node->InputAt(0); // Only one non-control input.
-
- // Never remove an effect phi from a (potentially non-terminating) loop.
- // Otherwise, we might end up eliminating effect nodes, such as calls,
- // before the loop.
- if (node->opcode() == IrOpcode::kEffectPhi &&
- NodeProperties::GetControlInput(node)->opcode() == IrOpcode::kLoop) {
- return node;
- }
-
- Node* replacement = NULL;
- auto const inputs = node->inputs();
- for (auto it = inputs.begin(); n > 1; --n, ++it) {
- Node* input = *it;
- if (input->opcode() == IrOpcode::kDead) continue; // ignore dead inputs.
- if (input != node && input != replacement) { // non-redundant input.
- if (replacement != NULL) return node;
- replacement = input;
- }
- }
- return replacement == NULL ? dead() : replacement;
- }
-
- // Reduce branches.
- Node* ReduceBranch(Node* branch) {
- if (DecideCondition(branch->InputAt(0)) != kUnknown) {
- for (Node* use : branch->uses()) Revisit(use);
- }
- return branch;
- }
-
- // Reduce merges by trimming away dead inputs from the merge and phis.
- Node* ReduceMerge(Node* node) {
- // Count the number of live inputs.
- int live = 0;
- int index = 0;
- int live_index = 0;
- for (Node* const input : node->inputs()) {
- if (input->opcode() != IrOpcode::kDead) {
- live++;
- live_index = index;
- }
- index++;
- }
-
- TRACE("ReduceMerge: #%d:%s (%d of %d live)\n", node->id(),
- node->op()->mnemonic(), live, index);
-
- if (live == 0) return dead(); // no remaining inputs.
-
- // Gather phis and effect phis to be edited.
- NodeVector phis(zone_);
- for (Node* const use : node->uses()) {
- if (NodeProperties::IsPhi(use)) phis.push_back(use);
- }
-
- if (live == 1) {
- // All phis are redundant. Replace them with their live input.
- for (Node* const phi : phis) {
- Replace(phi, phi->InputAt(live_index));
- }
- // The merge itself is redundant.
- return node->InputAt(live_index);
- }
-
- DCHECK_LE(2, live);
-
- if (live < node->InputCount()) {
- // Edit phis in place, removing dead inputs and revisiting them.
- for (Node* const phi : phis) {
- TRACE(" PhiInMerge: #%d:%s (%d live)\n", phi->id(),
- phi->op()->mnemonic(), live);
- RemoveDeadInputs(node, phi);
- Revisit(phi);
- }
- // Edit the merge in place, removing dead inputs.
- RemoveDeadInputs(node, node);
- }
-
- DCHECK_EQ(live, node->InputCount());
-
- // Try to remove dead diamonds or introduce selects.
- if (live == 2 && CheckPhisForSelect(phis)) {
- DiamondMatcher matcher(node);
- if (matcher.Matched() && matcher.IfProjectionsAreOwned()) {
- // Dead diamond, i.e. neither the IfTrue nor the IfFalse nodes
- // have uses except for the Merge. Remove the branch if there
- // are no phis or replace phis with selects.
- Node* control = NodeProperties::GetControlInput(matcher.Branch());
- if (phis.size() == 0) {
- // No phis. Remove the branch altogether.
- TRACE(" DeadDiamond: #%d:Branch #%d:IfTrue #%d:IfFalse\n",
- matcher.Branch()->id(), matcher.IfTrue()->id(),
- matcher.IfFalse()->id());
- return control;
- } else {
- // A small number of phis. Replace with selects.
- Node* cond = matcher.Branch()->InputAt(0);
- for (Node* phi : phis) {
- Node* select = graph()->NewNode(
- common()->Select(OpParameter<MachineType>(phi),
- BranchHintOf(matcher.Branch()->op())),
- cond, matcher.TrueInputOf(phi), matcher.FalseInputOf(phi));
- TRACE(" MatchSelect: #%d:Branch #%d:IfTrue #%d:IfFalse -> #%d\n",
- matcher.Branch()->id(), matcher.IfTrue()->id(),
- matcher.IfFalse()->id(), select->id());
- Replace(phi, select);
- }
- return control;
- }
- }
- }
-
- return node;
- }
-
- bool CheckPhisForSelect(const NodeVector& phis) {
- if (phis.size() > static_cast<size_t>(max_phis_for_select_)) return false;
- for (Node* phi : phis) {
- if (phi->opcode() != IrOpcode::kPhi) return false; // no EffectPhis.
- }
- return true;
- }
-
- // Reduce if projections if the branch has a constant input.
- Node* ReduceIfProjection(Node* node, Decision decision) {
- Node* branch = node->InputAt(0);
- DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
- Decision result = DecideCondition(branch->InputAt(0));
- if (result == decision) {
- // Fold a branch by replacing IfTrue/IfFalse with the branch control.
- TRACE(" BranchReduce: #%d:%s => #%d:%s\n", branch->id(),
- branch->op()->mnemonic(), node->id(), node->op()->mnemonic());
- return branch->InputAt(1);
- }
- return result == kUnknown ? node : dead();
- }
-
- // Remove inputs to {node} corresponding to the dead inputs to {merge}
- // and compact the remaining inputs, updating the operator.
- void RemoveDeadInputs(Node* merge, Node* node) {
- int live = 0;
- for (int i = 0; i < merge->InputCount(); i++) {
- // skip dead inputs.
- if (merge->InputAt(i)->opcode() == IrOpcode::kDead) continue;
- // compact live inputs.
- if (live != i) node->ReplaceInput(live, node->InputAt(i));
- live++;
- }
- // compact remaining inputs.
- int total = live;
- for (int i = merge->InputCount(); i < node->InputCount(); i++) {
- if (total != i) node->ReplaceInput(total, node->InputAt(i));
- total++;
- }
- DCHECK_EQ(total, live + node->InputCount() - merge->InputCount());
- DCHECK_NE(total, node->InputCount());
- node->TrimInputCount(total);
- node->set_op(common()->ResizeMergeOrPhi(node->op(), live));
- }
-};
-
-
-void ControlReducer::ReduceGraph(Zone* zone, JSGraph* jsgraph,
- int max_phis_for_select) {
- GraphReducer graph_reducer(jsgraph->graph(), zone);
- ControlReducerImpl impl(&graph_reducer, zone, jsgraph);
- impl.max_phis_for_select_ = max_phis_for_select;
- graph_reducer.AddReducer(&impl);
- graph_reducer.ReduceGraph();
-}
-
-
-namespace {
-
-class DummyEditor final : public AdvancedReducer::Editor {
- public:
- void Replace(Node* node, Node* replacement) final {
- node->ReplaceUses(replacement);
- }
- void Revisit(Node* node) final {}
-};
-
-} // namespace
-
-
-void ControlReducer::TrimGraph(Zone* zone, JSGraph* jsgraph) {
- DummyEditor editor;
- ControlReducerImpl impl(&editor, zone, jsgraph);
- impl.Trim();
-}
-
-
-Node* ControlReducer::ReduceMerge(JSGraph* jsgraph, Node* node,
- int max_phis_for_select) {
- Zone zone;
- DummyEditor editor;
- ControlReducerImpl impl(&editor, &zone, jsgraph);
- impl.max_phis_for_select_ = max_phis_for_select;
- return impl.ReduceMerge(node);
-}
-
-
-Node* ControlReducer::ReducePhiForTesting(JSGraph* jsgraph, Node* node) {
- Zone zone;
- DummyEditor editor;
- ControlReducerImpl impl(&editor, &zone, jsgraph);
- return impl.ReducePhi(node);
-}
-
-
-Node* ControlReducer::ReduceIfNodeForTesting(JSGraph* jsgraph, Node* node) {
- Zone zone;
- DummyEditor editor;
- ControlReducerImpl impl(&editor, &zone, jsgraph);
- switch (node->opcode()) {
- case IrOpcode::kIfTrue:
- return impl.ReduceIfProjection(node, kTrue);
- case IrOpcode::kIfFalse:
- return impl.ReduceIfProjection(node, kFalse);
- default:
- return node;
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/control-reducer.h b/deps/v8/src/compiler/control-reducer.h
deleted file mode 100644
index 06fb9e5df4..0000000000
--- a/deps/v8/src/compiler/control-reducer.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_CONTROL_REDUCER_H_
-#define V8_COMPILER_CONTROL_REDUCER_H_
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class Zone;
-
-
-namespace compiler {
-
-// Forward declarations.
-class JSGraph;
-class CommonOperatorBuilder;
-class Node;
-
-class ControlReducer {
- public:
- // Perform branch folding and dead code elimination on the graph.
- static void ReduceGraph(Zone* zone, JSGraph* graph,
- int max_phis_for_select = 0);
-
- // Trim nodes in the graph that are not reachable from end.
- static void TrimGraph(Zone* zone, JSGraph* graph);
-
- // Reduces a single merge node and attached phis.
- static Node* ReduceMerge(JSGraph* graph, Node* node,
- int max_phis_for_select = 0);
-
- // Testing interface.
- static Node* ReducePhiForTesting(JSGraph* graph, Node* node);
- static Node* ReduceIfNodeForTesting(JSGraph* graph, Node* node);
-};
-}
-}
-} // namespace v8::internal::compiler
-
-#endif // V8_COMPILER_CONTROL_REDUCER_H_
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
new file mode 100644
index 0000000000..755620a3cd
--- /dev/null
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -0,0 +1,145 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/dead-code-elimination.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common)
+ : AdvancedReducer(editor),
+ graph_(graph),
+ common_(common),
+ dead_(graph->NewNode(common->Dead())) {}
+
+
+Reduction DeadCodeElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kEnd:
+ return ReduceEnd(node);
+ case IrOpcode::kLoop:
+ case IrOpcode::kMerge:
+ return ReduceLoopOrMerge(node);
+ default:
+ return ReduceNode(node);
+ }
+ UNREACHABLE();
+ return NoChange();
+}
+
+
+Reduction DeadCodeElimination::ReduceEnd(Node* node) {
+ DCHECK_EQ(IrOpcode::kEnd, node->opcode());
+ int const input_count = node->InputCount();
+ DCHECK_LE(1, input_count);
+ int live_input_count = 0;
+ for (int i = 0; i < input_count; ++i) {
+ Node* const input = node->InputAt(i);
+ // Skip dead inputs.
+ if (input->opcode() == IrOpcode::kDead) continue;
+ // Compact live inputs.
+ if (i != live_input_count) node->ReplaceInput(live_input_count, input);
+ ++live_input_count;
+ }
+ if (live_input_count == 0) {
+ return Replace(dead());
+ } else if (live_input_count < input_count) {
+ node->set_op(common()->End(live_input_count));
+ node->TrimInputCount(live_input_count);
+ return Changed(node);
+ }
+ DCHECK_EQ(input_count, live_input_count);
+ return NoChange();
+}
+
+
+Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
+ DCHECK(IrOpcode::IsMergeOpcode(node->opcode()));
+ int const input_count = node->InputCount();
+ DCHECK_LE(1, input_count);
+ // Count the number of live inputs to {node} and compact them on the fly, also
+ // compacting the inputs of the associated {Phi} and {EffectPhi} uses at the
+ // same time. We consider {Loop}s dead even if only the first control input
+ // is dead.
+ int live_input_count = 0;
+ if (node->opcode() != IrOpcode::kLoop ||
+ node->InputAt(0)->opcode() != IrOpcode::kDead) {
+ for (int i = 0; i < input_count; ++i) {
+ Node* const input = node->InputAt(i);
+ // Skip dead inputs.
+ if (input->opcode() == IrOpcode::kDead) continue;
+ // Compact live inputs.
+ if (live_input_count != i) {
+ node->ReplaceInput(live_input_count, input);
+ for (Node* const use : node->uses()) {
+ if (NodeProperties::IsPhi(use)) {
+ DCHECK_EQ(input_count + 1, use->InputCount());
+ use->ReplaceInput(live_input_count, use->InputAt(i));
+ }
+ }
+ }
+ ++live_input_count;
+ }
+ }
+ if (live_input_count == 0) {
+ return Replace(dead());
+ } else if (live_input_count == 1) {
+ // Due to compaction above, the live input is at offset 0.
+ for (Node* const use : node->uses()) {
+ if (NodeProperties::IsPhi(use)) {
+ Replace(use, use->InputAt(0));
+ } else if (use->opcode() == IrOpcode::kTerminate) {
+ DCHECK_EQ(IrOpcode::kLoop, node->opcode());
+ Replace(use, dead());
+ }
+ }
+ return Replace(node->InputAt(0));
+ }
+ DCHECK_LE(2, live_input_count);
+ DCHECK_LE(live_input_count, input_count);
+ // Trim input count for the {Merge} or {Loop} node.
+ if (live_input_count < input_count) {
+ // Trim input counts for all phi uses and revisit them.
+ for (Node* const use : node->uses()) {
+ if (NodeProperties::IsPhi(use)) {
+ use->ReplaceInput(live_input_count, node);
+ TrimMergeOrPhi(use, live_input_count);
+ Revisit(use);
+ }
+ }
+ TrimMergeOrPhi(node, live_input_count);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction DeadCodeElimination::ReduceNode(Node* node) {
+ // If {node} has exactly one control input and this is {Dead},
+ // replace {node} with {Dead}.
+ int const control_input_count = node->op()->ControlInputCount();
+ if (control_input_count == 0) return NoChange();
+ DCHECK_EQ(1, control_input_count);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (control->opcode() == IrOpcode::kDead) return Replace(control);
+ return NoChange();
+}
+
+
+void DeadCodeElimination::TrimMergeOrPhi(Node* node, int size) {
+ const Operator* const op = common()->ResizeMergeOrPhi(node->op(), size);
+ node->TrimInputCount(OperatorProperties::GetTotalInputCount(op));
+ node->set_op(op);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
new file mode 100644
index 0000000000..e5996c88ff
--- /dev/null
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -0,0 +1,52 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DEAD_CODE_ELIMINATION_H_
+#define V8_COMPILER_DEAD_CODE_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+
+
+// Propagates {Dead} control through the graph and thereby removes dead code.
+// Note that this does not include trimming dead uses from the graph, and it
+// also does not include detecting dead code by any other means than seeing a
+// {Dead} control input; that is left to other reducers.
+class DeadCodeElimination final : public AdvancedReducer {
+ public:
+ DeadCodeElimination(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common);
+ ~DeadCodeElimination() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceEnd(Node* node);
+ Reduction ReduceLoopOrMerge(Node* node);
+ Reduction ReduceNode(Node* node);
+
+ void TrimMergeOrPhi(Node* node, int size);
+
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ Node* dead() const { return dead_; }
+
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ Node* const dead_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_DEAD_CODE_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/frame-elider.cc b/deps/v8/src/compiler/frame-elider.cc
index 1168467612..f800b7786f 100644
--- a/deps/v8/src/compiler/frame-elider.cc
+++ b/deps/v8/src/compiler/frame-elider.cc
@@ -48,7 +48,7 @@ void FrameElider::MarkDeConstruction() {
// deconstructions.
for (auto succ : block->successors()) {
if (!InstructionBlockAt(succ)->needs_frame()) {
- DCHECK_EQ(1, block->SuccessorCount());
+ DCHECK_EQ(1U, block->SuccessorCount());
block->mark_must_deconstruct_frame();
}
}
@@ -56,7 +56,7 @@ void FrameElider::MarkDeConstruction() {
// Find "no frame -> frame" transitions, inserting frame constructions.
for (auto succ : block->successors()) {
if (InstructionBlockAt(succ)->needs_frame()) {
- DCHECK_NE(1, block->SuccessorCount());
+ DCHECK_NE(1U, block->SuccessorCount());
InstructionBlockAt(succ)->mark_must_construct_frame();
}
}
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index d0dbfbb2be..76d6749d0f 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -27,27 +27,47 @@ std::ostream& operator<<(std::ostream& os, OutputFrameStateCombine const& sc) {
}
-bool operator==(FrameStateCallInfo const& lhs, FrameStateCallInfo const& rhs) {
+bool operator==(FrameStateInfo const& lhs, FrameStateInfo const& rhs) {
return lhs.type() == rhs.type() && lhs.bailout_id() == rhs.bailout_id() &&
- lhs.state_combine() == rhs.state_combine();
+ lhs.state_combine() == rhs.state_combine() &&
+ lhs.function_info() == rhs.function_info();
}
-bool operator!=(FrameStateCallInfo const& lhs, FrameStateCallInfo const& rhs) {
+bool operator!=(FrameStateInfo const& lhs, FrameStateInfo const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(FrameStateCallInfo const& info) {
- return base::hash_combine(info.type(), info.bailout_id(),
+size_t hash_value(FrameStateInfo const& info) {
+ return base::hash_combine(static_cast<int>(info.type()), info.bailout_id(),
info.state_combine());
}
-std::ostream& operator<<(std::ostream& os, FrameStateCallInfo const& info) {
- return os << info.type() << ", " << info.bailout_id() << ", "
- << info.state_combine();
-}
-}
+std::ostream& operator<<(std::ostream& os, FrameStateType type) {
+ switch (type) {
+ case FrameStateType::kJavaScriptFunction:
+ os << "JS_FRAME";
+ break;
+ case FrameStateType::kArgumentsAdaptor:
+ os << "ARGUMENTS_ADAPTOR";
+ break;
+ }
+ return os;
}
+
+
+std::ostream& operator<<(std::ostream& os, FrameStateInfo const& info) {
+ os << info.type() << ", " << info.bailout_id() << ", "
+ << info.state_combine();
+ Handle<SharedFunctionInfo> shared_info;
+ if (info.shared_info().ToHandle(&shared_info)) {
+ os << ", " << Brief(*shared_info);
+ }
+ return os;
}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index ee98c06ffe..42c41f9107 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -5,9 +5,7 @@
#ifndef V8_COMPILER_FRAME_STATES_H_
#define V8_COMPILER_FRAME_STATES_H_
-#include "src/handles-inl.h" // TODO(everyone): Fix our inl.h crap
-#include "src/objects-inl.h" // TODO(everyone): Fix our inl.h crap
-#include "src/utils.h"
+#include "src/handles-inl.h"
namespace v8 {
namespace internal {
@@ -72,42 +70,81 @@ class OutputFrameStateCombine {
// The type of stack frame that a FrameState node represents.
-enum FrameStateType {
- JS_FRAME, // Represents an unoptimized JavaScriptFrame.
- ARGUMENTS_ADAPTOR // Represents an ArgumentsAdaptorFrame.
+enum class FrameStateType {
+ kJavaScriptFunction, // Represents an unoptimized JavaScriptFrame.
+ kArgumentsAdaptor // Represents an ArgumentsAdaptorFrame.
};
-class FrameStateCallInfo final {
+class FrameStateFunctionInfo {
public:
- FrameStateCallInfo(
- FrameStateType type, BailoutId bailout_id,
- OutputFrameStateCombine state_combine,
- MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>())
+ FrameStateFunctionInfo(FrameStateType type, int parameter_count,
+ int local_count,
+ Handle<SharedFunctionInfo> shared_info)
: type_(type),
- bailout_id_(bailout_id),
- frame_state_combine_(state_combine),
- jsfunction_(jsfunction) {}
+ parameter_count_(parameter_count),
+ local_count_(local_count),
+ shared_info_(shared_info) {}
+ int local_count() const { return local_count_; }
+ int parameter_count() const { return parameter_count_; }
+ Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
FrameStateType type() const { return type_; }
+
+ private:
+ FrameStateType const type_;
+ int const parameter_count_;
+ int const local_count_;
+ Handle<SharedFunctionInfo> const shared_info_;
+};
+
+
+class FrameStateInfo final {
+ public:
+ FrameStateInfo(BailoutId bailout_id, OutputFrameStateCombine state_combine,
+ const FrameStateFunctionInfo* info)
+ : bailout_id_(bailout_id),
+ frame_state_combine_(state_combine),
+ info_(info) {}
+
+ FrameStateType type() const {
+ return info_ == nullptr ? FrameStateType::kJavaScriptFunction
+ : info_->type();
+ }
BailoutId bailout_id() const { return bailout_id_; }
OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
- MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
+ MaybeHandle<SharedFunctionInfo> shared_info() const {
+ return info_ == nullptr ? MaybeHandle<SharedFunctionInfo>()
+ : info_->shared_info();
+ }
+ int parameter_count() const {
+ return info_ == nullptr ? 0 : info_->parameter_count();
+ }
+ int local_count() const {
+ return info_ == nullptr ? 0 : info_->local_count();
+ }
+ const FrameStateFunctionInfo* function_info() const { return info_; }
private:
- FrameStateType type_;
- BailoutId bailout_id_;
- OutputFrameStateCombine frame_state_combine_;
- MaybeHandle<JSFunction> jsfunction_;
+ BailoutId const bailout_id_;
+ OutputFrameStateCombine const frame_state_combine_;
+ const FrameStateFunctionInfo* const info_;
};
-bool operator==(FrameStateCallInfo const&, FrameStateCallInfo const&);
-bool operator!=(FrameStateCallInfo const&, FrameStateCallInfo const&);
+bool operator==(FrameStateInfo const&, FrameStateInfo const&);
+bool operator!=(FrameStateInfo const&, FrameStateInfo const&);
-size_t hash_value(FrameStateCallInfo const&);
+size_t hash_value(FrameStateInfo const&);
-std::ostream& operator<<(std::ostream&, FrameStateCallInfo const&);
+std::ostream& operator<<(std::ostream&, FrameStateInfo const&);
+static const int kFrameStateParametersInput = 0;
+static const int kFrameStateLocalsInput = 1;
+static const int kFrameStateStackInput = 2;
+static const int kFrameStateContextInput = 3;
+static const int kFrameStateFunctionInput = 4;
+static const int kFrameStateOuterStateInput = 5;
+static const int kFrameStateInputCount = kFrameStateOuterStateInput + 1;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index e5de73781b..bad0a92274 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -111,6 +111,6 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
}
}
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index fbd97bf499..80b40a7d9a 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -8,14 +8,12 @@
#include "src/compiler/graph.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
namespace compiler {
-bool Reducer::Finish() { return true; }
-
-
enum class GraphReducer::State : uint8_t {
kUnvisited,
kRevisit,
@@ -24,8 +22,9 @@ enum class GraphReducer::State : uint8_t {
};
-GraphReducer::GraphReducer(Graph* graph, Zone* zone)
+GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
: graph_(graph),
+ dead_(dead),
state_(graph, 4),
reducers_(zone),
revisit_(zone),
@@ -66,23 +65,7 @@ void GraphReducer::ReduceNode(Node* node) {
}
-void GraphReducer::ReduceGraph() {
- for (;;) {
- ReduceNode(graph()->end());
- // TODO(turbofan): Remove this once the dead node trimming is in the
- // GraphReducer.
- bool done = true;
- for (Reducer* const reducer : reducers_) {
- if (!reducer->Finish()) {
- done = false;
- break;
- }
- }
- if (done) break;
- // Reset all marks on the graph in preparation to re-reduce the graph.
- state_.Reset(graph());
- }
-}
+void GraphReducer::ReduceGraph() { ReduceNode(graph()->end()); }
Reduction GraphReducer::Reduce(Node* const node) {
@@ -136,7 +119,7 @@ void GraphReducer::ReduceTop() {
}
// Remember the max node id before reduction.
- NodeId const max_id = graph()->NodeCount() - 1;
+ NodeId const max_id = static_cast<NodeId>(graph()->NodeCount() - 1);
// All inputs should be visited or on stack. Apply reductions to node.
Reduction reduction = Reduce(node);
@@ -209,6 +192,42 @@ void GraphReducer::Replace(Node* node, Node* replacement, NodeId max_id) {
}
+void GraphReducer::ReplaceWithValue(Node* node, Node* value, Node* effect,
+ Node* control) {
+ if (effect == nullptr && node->op()->EffectInputCount() > 0) {
+ effect = NodeProperties::GetEffectInput(node);
+ }
+ if (control == nullptr && node->op()->ControlInputCount() > 0) {
+ control = NodeProperties::GetControlInput(node);
+ }
+
+ // Requires distinguishing between value, effect and control edges.
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ DCHECK(!user->IsDead());
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (user->opcode() == IrOpcode::kIfSuccess) {
+ Replace(user, control);
+ } else if (user->opcode() == IrOpcode::kIfException) {
+ DCHECK_NOT_NULL(dead_);
+ edge.UpdateTo(dead_);
+ Revisit(user);
+ } else {
+ UNREACHABLE();
+ }
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ DCHECK_NOT_NULL(effect);
+ edge.UpdateTo(effect);
+ Revisit(user);
+ } else {
+ DCHECK_NOT_NULL(value);
+ edge.UpdateTo(value);
+ Revisit(user);
+ }
+ }
+}
+
+
void GraphReducer::Pop() {
Node* node = stack_.top().node;
state_.Set(node, State::kVisited);
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 1c90b3018b..39c302f892 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -19,7 +19,7 @@ class Node;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
-typedef int32_t NodeId;
+typedef uint32_t NodeId;
// Represents the result of trying to reduce a node in the graph.
@@ -47,13 +47,6 @@ class Reducer {
// Try to reduce a node if possible.
virtual Reduction Reduce(Node* node) = 0;
- // Ask this reducer to finish operation, returns {true} if the reducer is
- // done, while {false} indicates that the graph might need to be reduced
- // again.
- // TODO(turbofan): Remove this once the dead node trimming is in the
- // GraphReducer.
- virtual bool Finish();
-
// Helper functions for subclasses to produce reductions for a node.
static Reduction NoChange() { return Reduction(); }
static Reduction Replace(Node* node) { return Reduction(node); }
@@ -74,6 +67,11 @@ class AdvancedReducer : public Reducer {
virtual void Replace(Node* node, Node* replacement) = 0;
// Revisit the {node} again later.
virtual void Revisit(Node* node) = 0;
+ // Replace value uses of {node} with {value} and effect uses of {node} with
+ // {effect}. If {effect == NULL}, then use the effect input to {node}. All
+ // control uses will be relaxed assuming {node} cannot throw.
+ virtual void ReplaceWithValue(Node* node, Node* value, Node* effect,
+ Node* control) = 0;
};
explicit AdvancedReducer(Editor* editor) : editor_(editor) {}
@@ -91,6 +89,24 @@ class AdvancedReducer : public Reducer {
DCHECK_NOT_NULL(editor_);
editor_->Revisit(node);
}
+ void ReplaceWithValue(Node* node, Node* value, Node* effect = nullptr,
+ Node* control = nullptr) {
+ DCHECK_NOT_NULL(editor_);
+ editor_->ReplaceWithValue(node, value, effect, control);
+ }
+
+ // Relax the effects of {node} by immediately replacing effect and control
+ // uses of {node} with the effect and control input to {node}.
+ // TODO(turbofan): replace the effect input to {node} with {graph->start()}.
+ void RelaxEffectsAndControls(Node* node) {
+ ReplaceWithValue(node, node, nullptr, nullptr);
+ }
+
+ // Relax the control uses of {node} by immediately replacing them with the
+ // control input to {node}.
+ void RelaxControls(Node* node) {
+ ReplaceWithValue(node, node, node, nullptr);
+ }
private:
Editor* const editor_;
@@ -98,10 +114,10 @@ class AdvancedReducer : public Reducer {
// Performs an iterative reduction of a node graph.
-class GraphReducer final : public AdvancedReducer::Editor {
+class GraphReducer : public AdvancedReducer::Editor {
public:
- GraphReducer(Graph* graph, Zone* zone);
- ~GraphReducer() final;
+ GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr);
+ ~GraphReducer();
Graph* graph() const { return graph_; }
@@ -126,6 +142,13 @@ class GraphReducer final : public AdvancedReducer::Editor {
// Replace {node} with {replacement}.
void Replace(Node* node, Node* replacement) final;
+
+ // Replace value uses of {node} with {value} and effect uses of {node} with
+ // {effect}. If {effect == NULL}, then use the effect input to {node}. All
+ // control uses will be relaxed assuming {node} cannot throw.
+ void ReplaceWithValue(Node* node, Node* value, Node* effect,
+ Node* control) final;
+
// Replace all uses of {node} with {replacement} if the id of {replacement} is
// less than or equal to {max_id}. Otherwise, replace all uses of {node} whose
// id is less than or equal to {max_id} with the {replacement}.
@@ -140,6 +163,7 @@ class GraphReducer final : public AdvancedReducer::Editor {
void Revisit(Node* node) final;
Graph* const graph_;
+ Node* const dead_;
NodeMarker<State> state_;
ZoneVector<Reducer*> reducers_;
ZoneStack<Node*> revisit_;
diff --git a/deps/v8/src/compiler/graph-trimmer.cc b/deps/v8/src/compiler/graph-trimmer.cc
new file mode 100644
index 0000000000..5fae425e1e
--- /dev/null
+++ b/deps/v8/src/compiler/graph-trimmer.cc
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-trimmer.h"
+
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphTrimmer::GraphTrimmer(Zone* zone, Graph* graph)
+ : graph_(graph), is_live_(graph, 2), live_(zone) {
+ live_.reserve(graph->NodeCount());
+}
+
+
+GraphTrimmer::~GraphTrimmer() {}
+
+
+void GraphTrimmer::TrimGraph() {
+ // Mark end node as live.
+ MarkAsLive(graph()->end());
+ // Compute transitive closure of live nodes.
+ for (size_t i = 0; i < live_.size(); ++i) {
+ for (Node* const input : live_[i]->inputs()) MarkAsLive(input);
+ }
+ // Remove dead->live edges.
+ for (Node* const live : live_) {
+ DCHECK(IsLive(live));
+ for (Edge edge : live->use_edges()) {
+ Node* const user = edge.from();
+ if (!IsLive(user)) {
+ if (FLAG_trace_turbo_reduction) {
+ OFStream os(stdout);
+ os << "DeadLink: " << *user << "(" << edge.index() << ") -> " << *live
+ << std::endl;
+ }
+ edge.UpdateTo(nullptr);
+ }
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/graph-trimmer.h b/deps/v8/src/compiler/graph-trimmer.h
new file mode 100644
index 0000000000..d8258becc8
--- /dev/null
+++ b/deps/v8/src/compiler/graph-trimmer.h
@@ -0,0 +1,57 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_TRIMMER_H_
+#define V8_COMPILER_GRAPH_TRIMMER_H_
+
+#include "src/compiler/node-marker.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+
+
+// Trims dead nodes from the node graph.
+class GraphTrimmer final {
+ public:
+ GraphTrimmer(Zone* zone, Graph* graph);
+ ~GraphTrimmer();
+
+ // Trim nodes in the {graph} that are not reachable from {graph->end()}.
+ void TrimGraph();
+
+ // Trim nodes in the {graph} that are not reachable from either {graph->end()}
+ // or any of the roots in the sequence [{begin},{end}[.
+ template <typename ForwardIterator>
+ void TrimGraph(ForwardIterator begin, ForwardIterator end) {
+ while (begin != end) MarkAsLive(*begin++);
+ TrimGraph();
+ }
+
+ private:
+ V8_INLINE bool IsLive(Node* const node) { return is_live_.Get(node); }
+ V8_INLINE void MarkAsLive(Node* const node) {
+ if (!node->IsDead() && !IsLive(node)) {
+ is_live_.Set(node, true);
+ live_.push_back(node);
+ }
+ }
+
+ Graph* graph() const { return graph_; }
+
+ Graph* const graph_;
+ NodeMarker<bool> is_live_;
+ NodeVector live_;
+
+ DISALLOW_COPY_AND_ASSIGN(GraphTrimmer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_GRAPH_TRIMMER_H_
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 8fb14653e1..313edb9b65 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -122,8 +122,7 @@ class JSONGraphNodeWriter {
os_ << ",\"rankInputs\":[0]";
}
SourcePosition position = positions_->GetSourcePosition(node);
- if (!position.IsUnknown()) {
- DCHECK(!position.IsInvalid());
+ if (position.IsKnown()) {
os_ << ",\"pos\":" << position.raw();
}
os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
@@ -650,8 +649,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
}
if (positions != NULL) {
SourcePosition position = positions->GetSourcePosition(node);
- if (!position.IsUnknown()) {
- DCHECK(!position.IsInvalid());
+ if (position.IsKnown()) {
os_ << " pos:" << position.raw();
}
}
@@ -828,6 +826,6 @@ std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
}
return os;
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index 193861187b..00074b5513 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -22,9 +22,9 @@ Graph::Graph(Zone* zone)
decorators_(zone) {}
-void Graph::Decorate(Node* node, bool incomplete) {
+void Graph::Decorate(Node* node) {
for (auto const decorator : decorators_) {
- decorator->Decorate(node, incomplete);
+ decorator->Decorate(node);
}
}
@@ -46,14 +46,22 @@ Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
DCHECK_LE(op->ValueInputCount(), input_count);
Node* const node =
Node::New(zone(), NextNodeId(), op, input_count, inputs, incomplete);
- Decorate(node, incomplete);
+ Decorate(node);
return node;
}
+Node* Graph::CloneNode(const Node* node) {
+ DCHECK_NOT_NULL(node);
+ Node* const clone = Node::Clone(zone(), NextNodeId(), node);
+ Decorate(clone);
+ return clone;
+}
+
+
NodeId Graph::NextNodeId() {
NodeId const id = next_node_id_;
- CHECK(!base::bits::SignedAddOverflow32(id, 1, &next_node_id_));
+ CHECK(!base::bits::UnsignedAddOverflow32(id, 1, &next_node_id_));
return id;
}
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index d71bb84ac7..cb073b312a 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -26,7 +26,7 @@ typedef uint32_t Mark;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
-typedef int32_t NodeId;
+typedef uint32_t NodeId;
class Graph : public ZoneObject {
@@ -74,6 +74,14 @@ class Graph : public ZoneObject {
Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8};
return NewNode(op, arraysize(nodes), nodes);
}
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
+
+ // Clone the {node}, and assign a new node id to the copy.
+ Node* CloneNode(const Node* node);
template <class Visitor>
inline void VisitNodeInputsFromEnd(Visitor* visitor);
@@ -85,9 +93,9 @@ class Graph : public ZoneObject {
void SetStart(Node* start) { start_ = start; }
void SetEnd(Node* end) { end_ = end; }
- int NodeCount() const { return next_node_id_; }
+ size_t NodeCount() const { return next_node_id_; }
- void Decorate(Node* node, bool incomplete);
+ void Decorate(Node* node);
void AddDecorator(GraphDecorator* decorator);
void RemoveDecorator(GraphDecorator* decorator);
@@ -112,7 +120,7 @@ class Graph : public ZoneObject {
class GraphDecorator : public ZoneObject {
public:
virtual ~GraphDecorator() {}
- virtual void Decorate(Node* node, bool incomplete) = 0;
+ virtual void Decorate(Node* node) = 0;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/greedy-allocator.cc b/deps/v8/src/compiler/greedy-allocator.cc
new file mode 100644
index 0000000000..8d658c39ff
--- /dev/null
+++ b/deps/v8/src/compiler/greedy-allocator.cc
@@ -0,0 +1,350 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/greedy-allocator.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+ } while (false)
+
+
+namespace {
+
+
+void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
+ int reg_id = range->assigned_register();
+ range->SetUseHints(reg_id);
+ if (range->is_phi()) {
+ data->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
+ }
+}
+
+
+LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
+ LifetimePosition pos) {
+ DCHECK(range->Start() < pos && pos < range->End());
+ DCHECK(pos.IsStart() || pos.IsGapPosition() ||
+ (data->code()
+ ->GetInstructionBlock(pos.ToInstructionIndex())
+ ->last_instruction_index() != pos.ToInstructionIndex()));
+ LiveRange* result = data->NewChildRangeFor(range);
+ range->SplitAt(pos, result, data->allocation_zone());
+ return result;
+}
+
+
+// TODO(mtrofin): explain why splitting in gap START is always OK.
+LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
+ const InstructionSequence* code,
+ int instruction_index) {
+ LifetimePosition ret = LifetimePosition::Invalid();
+
+ ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
+ if (range->Start() >= ret || ret >= range->End()) {
+ return LifetimePosition::Invalid();
+ }
+ return ret;
+}
+
+
+int GetFirstGapIndex(const UseInterval* interval) {
+ LifetimePosition start = interval->start();
+ int ret = start.ToInstructionIndex();
+ return ret;
+}
+
+
+int GetLastGapIndex(const UseInterval* interval) {
+ LifetimePosition end = interval->end();
+ return end.ToInstructionIndex();
+}
+
+
+// Basic heuristic for advancing the algorithm, if any other splitting heuristic
+// failed.
+LifetimePosition GetLastResortSplitPosition(const LiveRange* range,
+ const InstructionSequence* code) {
+ if (range->first_interval()->next() != nullptr) {
+ return range->first_interval()->next()->start();
+ }
+
+ UseInterval* interval = range->first_interval();
+ int first = GetFirstGapIndex(interval);
+ int last = GetLastGapIndex(interval);
+ if (first == last) return LifetimePosition::Invalid();
+
+ // TODO(mtrofin:) determine why we can't just split somewhere arbitrary
+ // within the range, e.g. it's middle.
+ for (UsePosition* pos = range->first_pos(); pos != nullptr;
+ pos = pos->next()) {
+ if (pos->type() != UsePositionType::kRequiresRegister) continue;
+ LifetimePosition before = GetSplitPositionForInstruction(
+ range, code, pos->pos().ToInstructionIndex());
+ if (before.IsValid()) return before;
+ LifetimePosition after = GetSplitPositionForInstruction(
+ range, code, pos->pos().ToInstructionIndex() + 1);
+ if (after.IsValid()) return after;
+ }
+ return LifetimePosition::Invalid();
+}
+
+
+bool IsProgressPossible(const LiveRange* range,
+ const InstructionSequence* code) {
+ return range->CanBeSpilled(range->Start()) ||
+ GetLastResortSplitPosition(range, code).IsValid();
+}
+} // namespace
+
+
+AllocationCandidate AllocationScheduler::GetNext() {
+ DCHECK(!queue_.empty());
+ AllocationCandidate ret = queue_.top();
+ queue_.pop();
+ return ret;
+}
+
+
+void AllocationScheduler::Schedule(LiveRange* range) {
+ TRACE("Scheduling live range %d.\n", range->id());
+ queue_.push(AllocationCandidate(range));
+}
+
+GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
+ RegisterKind kind, Zone* local_zone)
+ : RegisterAllocator(data, kind),
+ local_zone_(local_zone),
+ allocations_(local_zone),
+ scheduler_(local_zone) {}
+
+
+void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
+ TRACE("Assigning register %s to live range %d\n", RegisterName(reg_id),
+ range->id());
+
+ DCHECK(!range->HasRegisterAssigned());
+
+ current_allocations(reg_id)->AllocateRange(range);
+
+ TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
+ range->set_assigned_register(reg_id);
+
+ DCHECK(current_allocations(reg_id)->VerifyAllocationsAreValid());
+}
+
+
+void GreedyAllocator::PreallocateFixedRanges() {
+ allocations_.resize(num_registers());
+ for (int i = 0; i < num_registers(); i++) {
+ allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
+ }
+
+ for (LiveRange* fixed_range : GetFixedRegisters()) {
+ if (fixed_range != nullptr) {
+ DCHECK_EQ(mode(), fixed_range->kind());
+ DCHECK(fixed_range->IsFixed());
+
+ int reg_nr = fixed_range->assigned_register();
+ EnsureValidRangeWeight(fixed_range);
+ current_allocations(reg_nr)->AllocateRange(fixed_range);
+ }
+ }
+}
+
+
+void GreedyAllocator::ScheduleAllocationCandidates() {
+ for (auto range : data()->live_ranges()) {
+ if (CanProcessRange(range) && !range->spilled()) {
+ scheduler().Schedule(range);
+ }
+ }
+}
+
+
+void GreedyAllocator::TryAllocateCandidate(
+ const AllocationCandidate& candidate) {
+ // At this point, this is just a live range. TODO: groups.
+ TryAllocateLiveRange(candidate.live_range());
+}
+
+
+void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
+ // TODO(mtrofin): once we introduce groups, we'll want to first try and
+ // allocate at the preferred register.
+ TRACE("Attempting to allocate live range %d\n", range->id());
+ int free_reg = -1;
+ int evictable_reg = -1;
+ EnsureValidRangeWeight(range);
+ DCHECK(range->weight() != LiveRange::kInvalidWeight);
+
+ float smallest_weight = LiveRange::kMaxWeight;
+
+ // Seek either the first free register, or, from the set of registers
+ // where the maximum conflict is lower than the candidate's weight, the one
+ // with the smallest such weight.
+ for (int i = 0; i < num_registers(); i++) {
+ float max_conflict_weight =
+ current_allocations(i)->GetMaximumConflictingWeight(range);
+ if (max_conflict_weight == LiveRange::kInvalidWeight) {
+ free_reg = i;
+ break;
+ }
+ if (max_conflict_weight < range->weight() &&
+ max_conflict_weight < smallest_weight) {
+ smallest_weight = max_conflict_weight;
+ evictable_reg = i;
+ }
+ }
+
+ // We have a free register, so we use it.
+ if (free_reg >= 0) {
+ TRACE("Found free register %s for live range %d\n", RegisterName(free_reg),
+ range->id());
+ AssignRangeToRegister(free_reg, range);
+ return;
+ }
+
+ // We found a register to perform evictions, so we evict and allocate our
+ // candidate.
+ if (evictable_reg >= 0) {
+ TRACE("Found evictable register %s for live range %d\n",
+ RegisterName(free_reg), range->id());
+ current_allocations(evictable_reg)
+ ->EvictAndRescheduleConflicts(range, &scheduler());
+ AssignRangeToRegister(evictable_reg, range);
+ return;
+ }
+
+ // The range needs to be split or spilled.
+ SplitOrSpillBlockedRange(range);
+}
+
+
+void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
+ size_t initial_range_count = data()->live_ranges().size();
+ for (size_t i = 0; i < initial_range_count; ++i) {
+ auto range = data()->live_ranges()[i];
+ if (!CanProcessRange(range)) continue;
+ if (range->HasNoSpillType()) continue;
+
+ LifetimePosition start = range->Start();
+ TRACE("Live range %d is defined by a spill operand.\n", range->id());
+ auto next_pos = start;
+ if (next_pos.IsGapPosition()) {
+ next_pos = next_pos.NextStart();
+ }
+ auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ // If the range already has a spill operand and it doesn't need a
+ // register immediately, split it and spill the first part of the range.
+ if (pos == nullptr) {
+ Spill(range);
+ } else if (pos->pos() > range->Start().NextStart()) {
+ // Do not spill live range eagerly if use position that can benefit from
+ // the register is too close to the start of live range.
+ auto split_pos = pos->pos();
+ if (data()->IsBlockBoundary(split_pos.Start())) {
+ split_pos = split_pos.Start();
+ } else {
+ split_pos = split_pos.PrevStart().End();
+ }
+ Split(range, data(), split_pos);
+ Spill(range);
+ }
+ }
+}
+
+
+void GreedyAllocator::AllocateRegisters() {
+ CHECK(scheduler().empty());
+ CHECK(allocations_.empty());
+
+ TRACE("Begin allocating function %s with the Greedy Allocator\n",
+ data()->debug_name());
+
+ SplitAndSpillRangesDefinedByMemoryOperand();
+ PreallocateFixedRanges();
+ ScheduleAllocationCandidates();
+
+ while (!scheduler().empty()) {
+ AllocationCandidate candidate = scheduler().GetNext();
+ TryAllocateCandidate(candidate);
+ }
+
+
+ // We do not rely on the hint mechanism used by LinearScan, so no need to
+ // actively update/reset operands until the end.
+ for (auto range : data()->live_ranges()) {
+ if (CanProcessRange(range) && range->HasRegisterAssigned()) {
+ UpdateOperands(range, data());
+ }
+ }
+
+ for (size_t i = 0; i < allocations_.size(); ++i) {
+ if (!allocations_[i]->empty()) {
+ data()->MarkAllocated(mode(), static_cast<int>(i));
+ }
+ }
+ allocations_.clear();
+
+ TRACE("End allocating function %s with the Greedy Allocator\n",
+ data()->debug_name());
+}
+
+
+void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
+ // The live range weight will be invalidated when ranges are created or split.
+ // Otherwise, it is consistently updated when the range is allocated or
+ // unallocated.
+ if (range->weight() != LiveRange::kInvalidWeight) return;
+
+ if (range->IsFixed()) {
+ range->set_weight(LiveRange::kMaxWeight);
+ return;
+ }
+ if (!IsProgressPossible(range, code())) {
+ range->set_weight(LiveRange::kMaxWeight);
+ return;
+ }
+
+ float use_count = 0.0;
+ for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
+ ++use_count;
+ }
+ range->set_weight(use_count / static_cast<float>(range->GetSize()));
+}
+
+
+void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
+ LifetimePosition start = range->Start();
+ CHECK(range->CanBeSpilled(start));
+
+ DCHECK(range->NextRegisterPosition(start) == nullptr);
+ Spill(range);
+}
+
+
+void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
+ // TODO(mtrofin): replace the call below with the entry point selecting
+ // heuristics, once they exist, out of which GLRSP is the last one.
+ auto pos = GetLastResortSplitPosition(range, code());
+ if (pos.IsValid()) {
+ LiveRange* tail = Split(range, data(), pos);
+ DCHECK(tail != range);
+ scheduler().Schedule(tail);
+ scheduler().Schedule(range);
+ return;
+ }
+ SpillRangeAsLastResort(range);
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/greedy-allocator.h b/deps/v8/src/compiler/greedy-allocator.h
new file mode 100644
index 0000000000..3ec180b2ba
--- /dev/null
+++ b/deps/v8/src/compiler/greedy-allocator.h
@@ -0,0 +1,111 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_GREEDY_ALLOCATOR_H_
+#define V8_GREEDY_ALLOCATOR_H_
+
+#include "src/compiler/coalesced-live-ranges.h"
+#include "src/compiler/register-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// The object of allocation scheduling. At minimum, this is a LiveRange, but
+// we may extend this to groups of LiveRanges. It has to be comparable.
+class AllocationCandidate {
+ public:
+ explicit AllocationCandidate(LiveRange* range) : range_(range) {}
+
+ // Strict ordering operators
+ bool operator<(const AllocationCandidate& other) const {
+ return range_->GetSize() < other.range_->GetSize();
+ }
+
+ bool operator>(const AllocationCandidate& other) const {
+ return range_->GetSize() > other.range_->GetSize();
+ }
+
+ LiveRange* live_range() const { return range_; }
+
+ private:
+ LiveRange* range_;
+};
+
+
+// Schedule processing (allocating) of AllocationCandidates.
+class AllocationScheduler final : ZoneObject {
+ public:
+ explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
+ void Schedule(LiveRange* range);
+ AllocationCandidate GetNext();
+ bool empty() const { return queue_.empty(); }
+
+ private:
+ typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
+ ScheduleQueue queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
+};
+
+
+// A variant of the LLVM Greedy Register Allocator. See
+// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
+class GreedyAllocator final : public RegisterAllocator {
+ public:
+ explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
+ Zone* local_zone);
+
+ void AllocateRegisters();
+
+ private:
+ AllocationScheduler& scheduler() { return scheduler_; }
+ CoalescedLiveRanges* current_allocations(unsigned i) {
+ return allocations_[i];
+ }
+ Zone* local_zone() const { return local_zone_; }
+
+ // Insert fixed ranges.
+ void PreallocateFixedRanges();
+
+ // Schedule unassigned live ranges for allocation.
+ // TODO(mtrofin): groups.
+ void ScheduleAllocationCandidates();
+
+ // Find the optimal split for ranges defined by a memory operand, e.g.
+ // constants or function parameters passed on the stack.
+ void SplitAndSpillRangesDefinedByMemoryOperand();
+
+ void TryAllocateCandidate(const AllocationCandidate& candidate);
+ void TryAllocateLiveRange(LiveRange* range);
+
+ bool CanProcessRange(LiveRange* range) const {
+ return range != nullptr && !range->IsEmpty() && range->kind() == mode();
+ }
+
+ // Calculate the weight of a candidate for allocation.
+ void EnsureValidRangeWeight(LiveRange* range);
+
+ // Calculate the new weight of a range that is about to be allocated.
+ float GetAllocatedRangeWeight(float candidate_weight);
+
+ // This is the extension point for splitting heuristics.
+ void SplitOrSpillBlockedRange(LiveRange* range);
+
+ // Necessary heuristic: spill when all else failed.
+ void SpillRangeAsLastResort(LiveRange* range);
+
+ void AssignRangeToRegister(int reg_id, LiveRange* range);
+
+ Zone* local_zone_;
+ ZoneVector<CoalescedLiveRanges*> allocations_;
+ AllocationScheduler scheduler_;
+ DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
+};
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+#endif // V8_GREEDY_ALLOCATOR_H_
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 0262c2a732..4690a8cc05 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -290,13 +290,6 @@ void CodeGenerator::AssembleDeconstructActivationRecord() {
if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ mov(esp, ebp);
__ pop(ebp);
- int32_t bytes_to_pop =
- descriptor->IsJSFunctionCall()
- ? static_cast<int32_t>(descriptor->JSParameterCount() *
- kPointerSize)
- : 0;
- __ pop(Operand(esp, bytes_to_pop));
- __ add(esp, Immediate(bytes_to_pop));
}
}
@@ -352,6 +345,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
@@ -376,6 +385,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchStackPointer:
__ mov(i.OutputRegister(), esp);
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), ebp);
+ break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
@@ -867,6 +879,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ push(i.InputOperand(0));
}
break;
+ case kIA32Poke: {
+ int const slot = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
+ } else {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
+ }
+ break;
+ }
case kIA32StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
@@ -1256,7 +1277,7 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else if (stack_slots > 0) {
+ } else if (needs_frame_) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -1310,13 +1331,25 @@ void CodeGenerator::AssembleReturn() {
__ pop(ebp); // Pop caller's frame pointer.
__ ret(0);
}
- } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ mov(esp, ebp); // Move stack pointer back to frame pointer.
- __ pop(ebp); // Pop caller's frame pointer.
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Ret(pop_count * kPointerSize, ebx);
+ } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : (info()->IsStub()
+ ? info()->code_stub()->GetStackParameterCount()
+ : 0);
+ if (pop_count == 0) {
+ __ ret(0);
+ } else {
+ __ Ret(pop_count * kPointerSize, ebx);
+ }
+ }
} else {
__ ret(0);
}
@@ -1508,7 +1541,6 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
__ Nop(padding_size);
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 3962040104..4002a6776d 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -91,6 +91,7 @@ namespace compiler {
V(IA32Movsd) \
V(IA32Lea) \
V(IA32Push) \
+ V(IA32Poke) \
V(IA32StoreWriteBarrier) \
V(IA32StackCheck)
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 8343dc1a09..105ca8287b 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -127,15 +127,14 @@ class IA32OperandGenerator final : public OperandGenerator {
namespace {
-void VisitROFloat(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
-void VisitRRFloat(InstructionSelector* selector, Node* node,
- InstructionCode opcode) {
+void VisitRR(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
IA32OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
@@ -648,38 +647,43 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEFloat32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat32ToFloat64);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEInt32ToFloat64);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEUint32ToFloat64);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat64ToInt32);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat64ToUint32);
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEFloat64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat64ToFloat32);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, node, kArchTruncateDoubleToI);
+ case TruncationMode::kRoundToZero:
+ return VisitRO(this, node, kSSEFloat64ToInt32);
+ }
+ UNREACHABLE();
}
@@ -791,22 +795,22 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- VisitROFloat(this, node, kSSEFloat32Sqrt);
+ VisitRO(this, node, kSSEFloat32Sqrt);
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitROFloat(this, node, kSSEFloat64Sqrt);
+ VisitRO(this, node, kSSEFloat64Sqrt);
}
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- VisitRRFloat(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- VisitRRFloat(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
@@ -830,19 +834,43 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, true);
- // Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
- Emit(kIA32Push, g.NoOutput(), value);
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr, temp_count, temps);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
+ if (Node* node = buffer.pushed_nodes[n]) {
+ int const slot = static_cast<int>(n);
+ InstructionOperand value =
+ g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
+ Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ // TODO(titzer): handle pushing double parameters.
+ InstructionOperand value =
+ g.CanBeImmediate(node)
+ ? g.UseImmediate(node)
+ : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ Emit(kIA32Push, g.NoOutput(), value);
+ }
}
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
@@ -850,18 +878,21 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
@@ -879,16 +910,13 @@ void InstructionSelector::VisitTailCall(Node* node) {
DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
// TODO(turbofan): Relax restriction for stack parameters.
- if (descriptor->UsesOnlyRegisters() &&
- descriptor->HasSameReturnLocationsAs(
- linkage()->GetIncomingDescriptor())) {
+
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, true);
- DCHECK_EQ(0u, buffer.pushed_nodes.size());
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
diff --git a/deps/v8/src/compiler/ia32/linkage-ia32.cc b/deps/v8/src/compiler/ia32/linkage-ia32.cc
index bfe201bf6f..930a86c69f 100644
--- a/deps/v8/src/compiler/ia32/linkage-ia32.cc
+++ b/deps/v8/src/compiler/ia32/linkage-ia32.cc
@@ -22,8 +22,10 @@ struct IA32LinkageHelperTraits {
static RegList CCalleeSaveRegisters() {
return esi.bit() | edi.bit() | ebx.bit();
}
+ static RegList CCalleeSaveFPRegisters() { return 0; }
static Register CRegisterParameter(int i) { return no_reg; }
static int CRegisterParametersLength() { return 0; }
+ static int CStackBackingStoreLength() { return 0; }
};
typedef LinkageHelper<IA32LinkageHelperTraits> LH;
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 7b42eb7784..a9db281fd3 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -21,6 +21,8 @@
#include "src/compiler/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC
#include "src/compiler/ppc/instruction-codes-ppc.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/compiler/x87/instruction-codes-x87.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
@@ -33,31 +35,34 @@ namespace compiler {
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define ARCH_OPCODE_LIST(V) \
- V(ArchCallCodeObject) \
- V(ArchTailCallCodeObject) \
- V(ArchCallJSFunction) \
- V(ArchTailCallJSFunction) \
- V(ArchJmp) \
- V(ArchLookupSwitch) \
- V(ArchTableSwitch) \
- V(ArchNop) \
- V(ArchDeoptimize) \
- V(ArchRet) \
- V(ArchStackPointer) \
- V(ArchTruncateDoubleToI) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
+#define ARCH_OPCODE_LIST(V) \
+ V(ArchCallCodeObject) \
+ V(ArchTailCallCodeObject) \
+ V(ArchCallJSFunction) \
+ V(ArchTailCallJSFunction) \
+ V(ArchPrepareCallCFunction) \
+ V(ArchCallCFunction) \
+ V(ArchJmp) \
+ V(ArchLookupSwitch) \
+ V(ArchTableSwitch) \
+ V(ArchNop) \
+ V(ArchDeoptimize) \
+ V(ArchRet) \
+ V(ArchStackPointer) \
+ V(ArchFramePointer) \
+ V(ArchTruncateDoubleToI) \
+ V(CheckedLoadInt8) \
+ V(CheckedLoadUint8) \
+ V(CheckedLoadInt16) \
+ V(CheckedLoadUint16) \
+ V(CheckedLoadWord32) \
+ V(CheckedLoadFloat32) \
+ V(CheckedLoadFloat64) \
+ V(CheckedStoreWord8) \
+ V(CheckedStoreWord16) \
+ V(CheckedStoreWord32) \
+ V(CheckedStoreFloat32) \
+ V(CheckedStoreFloat64) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 9603079444..813da4f132 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -336,8 +336,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
(call_address_immediate &&
- (callee->opcode() == IrOpcode::kInt32Constant ||
- callee->opcode() == IrOpcode::kInt64Constant))
+ callee->opcode() == IrOpcode::kExternalConstant)
? g.UseImmediate(callee)
: g.UseRegister(callee));
break;
@@ -373,7 +372,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// not appear as arguments to the call. Everything else ends up
// as an InstructionOperand argument to the call.
auto iter(call->inputs().begin());
- int pushed_count = 0;
+ size_t pushed_count = 0;
for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
@@ -393,10 +392,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->instruction_args.push_back(op);
}
}
- CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
- DCHECK(static_cast<size_t>(input_count) ==
- (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
- buffer->frame_state_value_count()));
+ DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
+ buffer->frame_state_value_count());
}
@@ -423,10 +420,9 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
if (instructions_.size() == current_node_end) continue;
// Mark source position on first instruction emitted.
SourcePosition source_position = source_positions_->GetSourcePosition(node);
- if (source_position.IsUnknown()) continue;
- DCHECK(!source_position.IsInvalid());
- if (source_position_mode_ == kAllSourcePositions ||
- node->opcode() == IrOpcode::kCall) {
+ if (source_position.IsKnown() &&
+ (source_position_mode_ == kAllSourcePositions ||
+ node->opcode() == IrOpcode::kCall)) {
sequence()->SetSourcePosition(instructions_[current_node_end],
source_position);
}
@@ -678,6 +674,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitUint64Div(node);
case IrOpcode::kUint64LessThan:
return VisitUint64LessThan(node);
+ case IrOpcode::kUint64LessThanOrEqual:
+ return VisitUint64LessThanOrEqual(node);
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kChangeFloat32ToFloat64:
@@ -762,6 +760,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
+ case IrOpcode::kLoadFramePointer:
+ return VisitLoadFramePointer(node);
case IrOpcode::kCheckedLoad: {
MachineType rep = OpParameter<MachineType>(node);
MarkAsRepresentation(rep, node);
@@ -779,16 +779,15 @@ void InstructionSelector::VisitNode(Node* node) {
#if V8_TURBOFAN_BACKEND
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
- Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ Emit(kArchStackPointer, g.DefineAsRegister(node));
}
-void InstructionSelector::VisitLoadStackPointer(Node* node) {
+void InstructionSelector::VisitLoadFramePointer(Node* node) {
OperandGenerator g(this);
- Emit(kArchStackPointer, g.DefineAsRegister(node));
+ Emit(kArchFramePointer, g.DefineAsRegister(node));
}
@@ -831,7 +830,7 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
#endif // V8_TURBOFAN_BACKEND
// 32 bit targets do not implement the following instructions.
-#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
+#if !V8_TURBOFAN_BACKEND_64
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
@@ -886,6 +885,11 @@ void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
@@ -924,7 +928,7 @@ void InstructionSelector::VisitParameter(Node* node) {
void InstructionSelector::VisitIfException(Node* node) {
OperandGenerator g(this);
- Node* call = node->InputAt(0);
+ Node* call = node->InputAt(1);
DCHECK_EQ(IrOpcode::kCall, call->opcode());
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(call);
Emit(kArchNop, g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
@@ -1029,25 +1033,29 @@ void InstructionSelector::VisitThrow(Node* value) {
FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
Node* state) {
DCHECK(state->opcode() == IrOpcode::kFrameState);
- DCHECK_EQ(5, state->InputCount());
- DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(0)->opcode());
- DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(1)->opcode());
- DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(2)->opcode());
- FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
+ DCHECK_EQ(kFrameStateInputCount, state->InputCount());
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(state);
- int parameters =
- static_cast<int>(StateValuesAccess(state->InputAt(0)).size());
- int locals = static_cast<int>(StateValuesAccess(state->InputAt(1)).size());
- int stack = static_cast<int>(StateValuesAccess(state->InputAt(2)).size());
+ int parameters = static_cast<int>(
+ StateValuesAccess(state->InputAt(kFrameStateParametersInput)).size());
+ int locals = static_cast<int>(
+ StateValuesAccess(state->InputAt(kFrameStateLocalsInput)).size());
+ int stack = static_cast<int>(
+ StateValuesAccess(state->InputAt(kFrameStateStackInput)).size());
+
+ DCHECK_EQ(parameters, state_info.parameter_count());
+ DCHECK_EQ(locals, state_info.local_count());
FrameStateDescriptor* outer_state = NULL;
- Node* outer_node = state->InputAt(4);
+ Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
if (outer_node->opcode() == IrOpcode::kFrameState) {
outer_state = GetFrameStateDescriptor(outer_node);
}
return new (instruction_zone()) FrameStateDescriptor(
- instruction_zone(), state_info, parameters, locals, stack, outer_state);
+ instruction_zone(), state_info.type(), state_info.bailout_id(),
+ state_info.state_combine(), parameters, locals, stack,
+ state_info.shared_info(), outer_state);
}
@@ -1069,18 +1077,16 @@ void InstructionSelector::AddFrameStateInputs(
FrameStateDescriptor* descriptor) {
DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
- if (descriptor->outer_state() != NULL) {
- AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
+ if (descriptor->outer_state()) {
+ AddFrameStateInputs(state->InputAt(kFrameStateOuterStateInput), inputs,
+ descriptor->outer_state());
}
- Node* parameters = state->InputAt(0);
- Node* locals = state->InputAt(1);
- Node* stack = state->InputAt(2);
- Node* context = state->InputAt(3);
-
- DCHECK_EQ(IrOpcode::kTypedStateValues, parameters->op()->opcode());
- DCHECK_EQ(IrOpcode::kTypedStateValues, locals->op()->opcode());
- DCHECK_EQ(IrOpcode::kTypedStateValues, stack->op()->opcode());
+ Node* parameters = state->InputAt(kFrameStateParametersInput);
+ Node* locals = state->InputAt(kFrameStateLocalsInput);
+ Node* stack = state->InputAt(kFrameStateStackInput);
+ Node* context = state->InputAt(kFrameStateContextInput);
+ Node* function = state->InputAt(kFrameStateFunctionInput);
DCHECK_EQ(descriptor->parameters_count(),
StateValuesAccess(parameters).size());
@@ -1092,6 +1098,8 @@ void InstructionSelector::AddFrameStateInputs(
OperandGenerator g(this);
size_t value_index = 0;
+ inputs->push_back(SlotOrImmediate(&g, function));
+ descriptor->SetType(value_index++, kMachAnyTagged);
for (StateValuesAccess::TypedNode input_node :
StateValuesAccess(parameters)) {
inputs->push_back(SlotOrImmediate(&g, input_node.node));
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index cea74a7012..83b45b39dd 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -403,7 +403,7 @@ void PhiInstruction::SetInput(size_t offset, int virtual_register) {
InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
RpoNumber loop_header, RpoNumber loop_end,
- bool deferred)
+ bool deferred, bool handler)
: successors_(zone),
predecessors_(zone),
phis_(zone),
@@ -414,6 +414,7 @@ InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
code_start_(-1),
code_end_(-1),
deferred_(deferred),
+ handler_(handler),
needs_frame_(false),
must_construct_frame_(false),
must_deconstruct_frame_(false) {}
@@ -443,9 +444,11 @@ static RpoNumber GetLoopEndRpo(const BasicBlock* block) {
static InstructionBlock* InstructionBlockFor(Zone* zone,
const BasicBlock* block) {
+ bool is_handler =
+ !block->empty() && block->front()->opcode() == IrOpcode::kIfException;
InstructionBlock* instr_block = new (zone)
InstructionBlock(zone, GetRpo(block), GetRpo(block->loop_header()),
- GetLoopEndRpo(block), block->deferred());
+ GetLoopEndRpo(block), block->deferred(), is_handler);
// Map successors and precessors
instr_block->successors().reserve(block->SuccessorCount());
for (BasicBlock* successor : block->successors()) {
@@ -657,29 +660,31 @@ bool InstructionSequence::GetSourcePosition(const Instruction* instr,
void InstructionSequence::SetSourcePosition(const Instruction* instr,
SourcePosition value) {
- DCHECK(!value.IsInvalid());
- DCHECK(!value.IsUnknown());
source_positions_.insert(std::make_pair(instr, value));
}
FrameStateDescriptor::FrameStateDescriptor(
- Zone* zone, const FrameStateCallInfo& state_info, size_t parameters_count,
- size_t locals_count, size_t stack_count, FrameStateDescriptor* outer_state)
- : type_(state_info.type()),
- bailout_id_(state_info.bailout_id()),
- frame_state_combine_(state_info.state_combine()),
+ Zone* zone, FrameStateType type, BailoutId bailout_id,
+ OutputFrameStateCombine state_combine, size_t parameters_count,
+ size_t locals_count, size_t stack_count,
+ MaybeHandle<SharedFunctionInfo> shared_info,
+ FrameStateDescriptor* outer_state)
+ : type_(type),
+ bailout_id_(bailout_id),
+ frame_state_combine_(state_combine),
parameters_count_(parameters_count),
locals_count_(locals_count),
stack_count_(stack_count),
types_(zone),
- outer_state_(outer_state),
- jsfunction_(state_info.jsfunction()) {
+ shared_info_(shared_info),
+ outer_state_(outer_state) {
types_.resize(GetSize(), kMachNone);
}
+
size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
- size_t size = parameters_count() + locals_count() + stack_count() +
+ size_t size = 1 + parameters_count() + locals_count() + stack_count() +
(HasContext() ? 1 : 0);
switch (combine.kind()) {
case OutputFrameStateCombine::kPushOutput:
@@ -716,7 +721,7 @@ size_t FrameStateDescriptor::GetJSFrameCount() const {
size_t count = 0;
for (const FrameStateDescriptor* iter = this; iter != NULL;
iter = iter->outer_state_) {
- if (iter->type_ == JS_FRAME) {
+ if (iter->type_ == FrameStateType::kJavaScriptFunction) {
++count;
}
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 39fdb2aab3..a87ef7dc9c 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -862,10 +862,12 @@ class Constant final {
class FrameStateDescriptor : public ZoneObject {
public:
- FrameStateDescriptor(Zone* zone, const FrameStateCallInfo& state_info,
+ FrameStateDescriptor(Zone* zone, FrameStateType type, BailoutId bailout_id,
+ OutputFrameStateCombine state_combine,
size_t parameters_count, size_t locals_count,
size_t stack_count,
- FrameStateDescriptor* outer_state = NULL);
+ MaybeHandle<SharedFunctionInfo> shared_info,
+ FrameStateDescriptor* outer_state = nullptr);
FrameStateType type() const { return type_; }
BailoutId bailout_id() const { return bailout_id_; }
@@ -873,9 +875,11 @@ class FrameStateDescriptor : public ZoneObject {
size_t parameters_count() const { return parameters_count_; }
size_t locals_count() const { return locals_count_; }
size_t stack_count() const { return stack_count_; }
+ MaybeHandle<SharedFunctionInfo> shared_info() const { return shared_info_; }
FrameStateDescriptor* outer_state() const { return outer_state_; }
- MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
- bool HasContext() const { return type_ == JS_FRAME; }
+ bool HasContext() const {
+ return type_ == FrameStateType::kJavaScriptFunction;
+ }
size_t GetSize(OutputFrameStateCombine combine =
OutputFrameStateCombine::Ignore()) const;
@@ -894,8 +898,8 @@ class FrameStateDescriptor : public ZoneObject {
size_t locals_count_;
size_t stack_count_;
ZoneVector<MachineType> types_;
+ MaybeHandle<SharedFunctionInfo> const shared_info_;
FrameStateDescriptor* outer_state_;
- MaybeHandle<JSFunction> jsfunction_;
};
std::ostream& operator<<(std::ostream& os, const Constant& constant);
@@ -928,7 +932,7 @@ class PhiInstruction final : public ZoneObject {
class InstructionBlock final : public ZoneObject {
public:
InstructionBlock(Zone* zone, RpoNumber rpo_number, RpoNumber loop_header,
- RpoNumber loop_end, bool deferred);
+ RpoNumber loop_end, bool deferred, bool handler);
// Instruction indexes (used by the register allocator).
int first_instruction_index() const {
@@ -951,6 +955,7 @@ class InstructionBlock final : public ZoneObject {
void set_code_end(int32_t end) { code_end_ = end; }
bool IsDeferred() const { return deferred_; }
+ bool IsHandler() const { return handler_; }
RpoNumber ao_number() const { return ao_number_; }
RpoNumber rpo_number() const { return rpo_number_; }
@@ -998,6 +1003,7 @@ class InstructionBlock final : public ZoneObject {
int32_t code_start_; // start index of arch-specific code.
int32_t code_end_; // end index of arch-specific code.
const bool deferred_; // Block contains deferred code.
+ const bool handler_; // Block is a handler entry point.
bool needs_frame_;
bool must_construct_frame_;
bool must_deconstruct_frame_;
@@ -1150,6 +1156,9 @@ class InstructionSequence final : public ZoneObject {
StateId AddFrameStateDescriptor(FrameStateDescriptor* descriptor);
FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
int GetFrameStateDescriptorCount();
+ DeoptimizationVector const& frame_state_descriptors() const {
+ return deoptimization_entries_;
+ }
RpoNumber InputRpo(Instruction* instr, size_t index);
@@ -1157,11 +1166,17 @@ class InstructionSequence final : public ZoneObject {
SourcePosition* result) const;
void SetSourcePosition(const Instruction* instr, SourcePosition value);
+ bool ContainsCall() const {
+ for (Instruction* instr : instructions_) {
+ if (instr->IsCall()) return true;
+ }
+ return false;
+ }
+
private:
friend std::ostream& operator<<(std::ostream& os,
const PrintableInstructionSequence& code);
- typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
typedef ZoneMap<const Instruction*, SourcePosition> SourcePositionMap;
Isolate* isolate_;
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 12b0e2f6cc..9c45a043ee 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -14,17 +14,6 @@ namespace internal {
namespace compiler {
-// Helper method that assumes replacement nodes are pure values that don't
-// produce an effect. Replaces {node} with {reduction} and relaxes effects.
-static Reduction ReplaceWithPureReduction(Node* node, Reduction reduction) {
- if (reduction.Changed()) {
- NodeProperties::ReplaceWithValue(node, reduction.replacement());
- return reduction;
- }
- return Reducer::NoChange();
-}
-
-
// Helper class to access JSCallFunction nodes that are potential candidates
// for reduction when they have a BuiltinFunctionId associated with them.
class JSCallReduction {
@@ -35,7 +24,7 @@ class JSCallReduction {
// constant callee being a well-known builtin with a BuiltinFunctionId.
bool HasBuiltinFunctionId() {
if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
- HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false;
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
return function->shared()->HasBuiltinFunctionId();
@@ -44,7 +33,7 @@ class JSCallReduction {
// Retrieves the BuiltinFunctionId as described above.
BuiltinFunctionId GetBuiltinFunctionId() {
DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
- HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
return function->shared()->builtin_function_id();
}
@@ -96,8 +85,10 @@ class JSCallReduction {
};
-JSBuiltinReducer::JSBuiltinReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ simplified_(jsgraph->zone()) {}
// ECMA-262, section 15.8.2.11.
@@ -153,21 +144,30 @@ Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
Reduction JSBuiltinReducer::Reduce(Node* node) {
+ Reduction reduction = NoChange();
JSCallReduction r(node);
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
case kMathMax:
- return ReplaceWithPureReduction(node, ReduceMathMax(node));
+ reduction = ReduceMathMax(node);
+ break;
case kMathImul:
- return ReplaceWithPureReduction(node, ReduceMathImul(node));
+ reduction = ReduceMathImul(node);
+ break;
case kMathFround:
- return ReplaceWithPureReduction(node, ReduceMathFround(node));
+ reduction = ReduceMathFround(node);
+ break;
default:
break;
}
- return NoChange();
+
+ // Replace builtin call assuming replacement nodes are pure values that don't
+ // produce an effect. Replaces {node} with {reduction} and relaxes effects.
+ if (reduction.Changed()) ReplaceWithValue(node, reduction.replacement());
+
+ return reduction;
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 14bf67595b..66b5723246 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -18,9 +18,9 @@ class JSGraph;
class MachineOperatorBuilder;
-class JSBuiltinReducer final : public Reducer {
+class JSBuiltinReducer final : public AdvancedReducer {
public:
- explicit JSBuiltinReducer(JSGraph* jsgraph);
+ explicit JSBuiltinReducer(Editor* editor, JSGraph* jsgraph);
~JSBuiltinReducer() final {}
Reduction Reduce(Node* node) final;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 1bf19ac7d6..e4d4d80f52 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -5,29 +5,53 @@
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/contexts.h"
namespace v8 {
namespace internal {
namespace compiler {
-Reduction JSContextSpecializer::Reduce(Node* node) {
- if (node->opcode() == IrOpcode::kJSLoadContext) {
- return ReduceJSLoadContext(node);
+Reduction JSContextSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kParameter:
+ return ReduceParameter(node);
+ case IrOpcode::kJSLoadContext:
+ return ReduceJSLoadContext(node);
+ case IrOpcode::kJSStoreContext:
+ return ReduceJSStoreContext(node);
+ default:
+ break;
}
- if (node->opcode() == IrOpcode::kJSStoreContext) {
- return ReduceJSStoreContext(node);
+ return NoChange();
+}
+
+
+Reduction JSContextSpecialization::ReduceParameter(Node* node) {
+ DCHECK_EQ(IrOpcode::kParameter, node->opcode());
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(node->op());
+ // The context is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ if (index == start->op()->ValueOutputCount() - 2) {
+ Handle<Context> context_constant;
+ if (context().ToHandle(&context_constant)) {
+ return Replace(jsgraph()->Constant(context_constant));
+ }
}
return NoChange();
}
-Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
+Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
- HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
// If the context is not constant, no reduction can occur.
if (!m.HasValue()) {
return NoChange();
@@ -36,9 +60,9 @@ Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
const ContextAccess& access = ContextAccessOf(node->op());
// Find the right parent context.
- Context* context = *m.Value().handle();
+ Handle<Context> context = Handle<Context>::cast(m.Value().handle());
for (size_t i = access.depth(); i > 0; --i) {
- context = context->previous();
+ context = handle(context->previous(), isolate());
}
// If the access itself is mutable, only fold-in the parent.
@@ -50,13 +74,11 @@ Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
const Operator* op = jsgraph_->javascript()->LoadContext(
0, access.index(), access.immutable());
node->set_op(op);
- Handle<Object> context_handle =
- Handle<Object>(context, jsgraph_->isolate());
- node->ReplaceInput(0, jsgraph_->Constant(context_handle));
+ node->ReplaceInput(0, jsgraph_->Constant(context));
return Changed(node);
}
- Handle<Object> value = Handle<Object>(
- context->get(static_cast<int>(access.index())), jsgraph_->isolate());
+ Handle<Object> value =
+ handle(context->get(static_cast<int>(access.index())), isolate());
// Even though the context slot is immutable, the context might have escaped
// before the function to which it belongs has initialized the slot.
@@ -70,15 +92,15 @@ Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
// TODO(titzer): record the specialization for sharing code across multiple
// contexts that have the same value in the corresponding context slot.
Node* constant = jsgraph_->Constant(value);
- NodeProperties::ReplaceWithValue(node, constant);
+ ReplaceWithValue(node, constant);
return Replace(constant);
}
-Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
+Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
- HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
// If the context is not constant, no reduction can occur.
if (!m.HasValue()) {
return NoChange();
@@ -92,20 +114,26 @@ Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
}
// Find the right parent context.
- Context* context = *m.Value().handle();
+ Handle<Context> context = Handle<Context>::cast(m.Value().handle());
for (size_t i = access.depth(); i > 0; --i) {
- context = context->previous();
+ context = handle(context->previous(), isolate());
}
- const Operator* op = jsgraph_->javascript()->StoreContext(0, access.index());
- node->set_op(op);
- Handle<Object> new_context_handle =
- Handle<Object>(context, jsgraph_->isolate());
- node->ReplaceInput(0, jsgraph_->Constant(new_context_handle));
-
+ node->set_op(javascript()->StoreContext(0, access.index()));
+ node->ReplaceInput(0, jsgraph_->Constant(context));
return Changed(node);
}
+
+Isolate* JSContextSpecialization::isolate() const {
+ return jsgraph()->isolate();
+}
+
+
+JSOperatorBuilder* JSContextSpecialization::javascript() const {
+ return jsgraph()->javascript();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index 4a2ce9829a..2ede6b5e17 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -6,30 +6,44 @@
#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/contexts.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
+class JSGraph;
+class JSOperatorBuilder;
+
+
// Specializes a given JSGraph to a given context, potentially constant folding
// some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
-class JSContextSpecializer : public Reducer {
+class JSContextSpecialization final : public AdvancedReducer {
public:
- explicit JSContextSpecializer(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
+ JSContextSpecialization(Editor* editor, JSGraph* jsgraph,
+ MaybeHandle<Context> context)
+ : AdvancedReducer(editor), jsgraph_(jsgraph), context_(context) {}
- Reduction Reduce(Node* node) override;
+ Reduction Reduce(Node* node) final;
- // Visible for unit testing.
+ private:
+ Reduction ReduceParameter(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
- private:
- JSGraph* jsgraph_;
+ Isolate* isolate() const;
+ JSOperatorBuilder* javascript() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ MaybeHandle<Context> context() const { return context_; }
+
+ JSGraph* const jsgraph_;
+ MaybeHandle<Context> context_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
diff --git a/deps/v8/src/compiler/js-frame-specialization.cc b/deps/v8/src/compiler/js-frame-specialization.cc
new file mode 100644
index 0000000000..98b1827492
--- /dev/null
+++ b/deps/v8/src/compiler/js-frame-specialization.cc
@@ -0,0 +1,69 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-frame-specialization.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/frames-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction JSFrameSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kOsrValue:
+ return ReduceOsrValue(node);
+ case IrOpcode::kParameter:
+ return ReduceParameter(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
+ DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
+ DisallowHeapAllocation no_gc;
+ Object* object;
+ int const index = OpParameter<int>(node);
+ int const parameters_count = frame()->ComputeParametersCount() + 1;
+ if (index == Linkage::kOsrContextSpillSlotIndex) {
+ object = frame()->context();
+ } else if (index >= parameters_count) {
+ object = frame()->GetExpression(index - parameters_count);
+ } else {
+ // The OsrValue index 0 is the receiver.
+ object = index ? frame()->GetParameter(index - 1) : frame()->receiver();
+ }
+ return Replace(jsgraph()->Constant(handle(object, isolate())));
+}
+
+
+Reduction JSFrameSpecialization::ReduceParameter(Node* node) {
+ DCHECK_EQ(IrOpcode::kParameter, node->opcode());
+ DisallowHeapAllocation no_gc;
+ Object* object;
+ int const index = ParameterIndexOf(node->op());
+ int const parameters_count = frame()->ComputeParametersCount() + 1;
+ if (index == Linkage::kJSFunctionCallClosureParamIndex) {
+ object = frame()->function();
+ } else if (index == parameters_count) {
+ // The Parameter index (arity + 1) is the context.
+ object = frame()->context();
+ } else {
+ // The Parameter index 0 is the receiver.
+ object = index ? frame()->GetParameter(index - 1) : frame()->receiver();
+ }
+ return Replace(jsgraph()->Constant(handle(object, isolate())));
+}
+
+
+Isolate* JSFrameSpecialization::isolate() const { return jsgraph()->isolate(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-frame-specialization.h b/deps/v8/src/compiler/js-frame-specialization.h
new file mode 100644
index 0000000000..c6fc561c5c
--- /dev/null
+++ b/deps/v8/src/compiler/js-frame-specialization.h
@@ -0,0 +1,44 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_FRAME_SPECIALIZATION_H_
+#define V8_COMPILER_JS_FRAME_SPECIALIZATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+
+
+class JSFrameSpecialization final : public Reducer {
+ public:
+ JSFrameSpecialization(JavaScriptFrame const* frame, JSGraph* jsgraph)
+ : frame_(frame), jsgraph_(jsgraph) {}
+ ~JSFrameSpecialization() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceOsrValue(Node* node);
+ Reduction ReduceParameter(Node* node);
+
+ Isolate* isolate() const;
+ JavaScriptFrame const* frame() const { return frame_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+
+ JavaScriptFrame const* const frame_;
+ JSGraph* const jsgraph_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSFrameSpecialization);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_FRAME_SPECIALIZATION_H_
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index c60af1d467..da42aba523 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -17,6 +17,19 @@ namespace v8 {
namespace internal {
namespace compiler {
+static CallDescriptor::Flags AdjustFrameStatesForCall(Node* node) {
+ int count = OperatorProperties::GetFrameStateInputCount(node->op());
+ if (count > 1) {
+ int index = NodeProperties::FirstFrameStateIndex(node) + 1;
+ do {
+ node->RemoveInput(index);
+ } while (--count > 1);
+ }
+ return count > 0 ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
+}
+
+
JSGenericLowering::JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph)
: is_typing_enabled_(is_typing_enabled), jsgraph_(jsgraph) {}
@@ -41,7 +54,6 @@ Reduction JSGenericLowering::Reduce(Node* node) {
Node* test = graph()->NewNode(machine()->WordEqual(), condition,
jsgraph()->TrueConstant());
node->ReplaceInput(0, test);
- break;
}
// Fall-through.
default:
@@ -52,11 +64,13 @@ Reduction JSGenericLowering::Reduce(Node* node) {
}
-#define REPLACE_BINARY_OP_IC_CALL(op, token) \
- void JSGenericLowering::Lower##op(Node* node) { \
- ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token, \
- OpParameter<LanguageMode>(node)), \
- CallDescriptor::kPatchableCallSiteWithNop); \
+#define REPLACE_BINARY_OP_IC_CALL(op, token) \
+ void JSGenericLowering::Lower##op(Node* node) { \
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
+ ReplaceWithStubCall(node, CodeFactory::BinaryOpIC( \
+ isolate(), token, \
+ strength(OpParameter<LanguageMode>(node))), \
+ CallDescriptor::kPatchableCallSiteWithNop | flags); \
}
REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
@@ -116,11 +130,8 @@ static CallDescriptor::Flags FlagsForNode(Node* node) {
void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
- Callable callable = CodeFactory::CompareIC(isolate(), token);
- CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 0,
- CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node),
- Operator::kNoProperties, kMachIntPtr);
+ Callable callable = CodeFactory::CompareIC(
+ isolate(), token, strength(OpParameter<LanguageMode>(node)));
// Create a new call node asking a CompareIC for help.
NodeVector inputs(zone());
@@ -140,6 +151,10 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
inputs.push_back(NodeProperties::GetEffectInput(node));
inputs.push_back(NodeProperties::GetControlInput(node));
}
+ CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0,
+ CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node),
+ Operator::kNoProperties, kMachIntPtr);
Node* compare =
graph()->NewNode(common()->Call(desc_compare),
static_cast<int>(inputs.size()), &inputs.front());
@@ -179,10 +194,7 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
Node* booleanize = graph()->NewNode(op, compare, jsgraph()->ZeroConstant());
// Finally patch the original node to select a boolean.
- NodeProperties::ReplaceWithValue(node, node, compare);
- // TODO(mstarzinger): Just a work-around because SelectLowering might
- // otherwise introduce a Phi without any uses, making Scheduler unhappy.
- if (node->UseCount() == 0) return;
+ NodeProperties::ReplaceUses(node, node, compare, compare, compare);
node->TrimInputCount(3);
node->ReplaceInput(0, booleanize);
node->ReplaceInput(1, true_value);
@@ -194,52 +206,40 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags) {
Operator::Properties properties = node->op()->properties();
- flags |= FlagsForNode(node);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0, flags, properties);
- const Operator* new_op = common()->Call(desc);
-
- // Take care of frame states.
- int old_frame_state_count =
- OperatorProperties::GetFrameStateInputCount(node->op());
- int new_frame_state_count =
- (flags & CallDescriptor::kNeedsFrameState) ? 1 : 0;
- DCHECK_GE(old_frame_state_count, new_frame_state_count);
- // If there are extra frame states, get rid of them.
- for (int i = new_frame_state_count; i < old_frame_state_count; i++) {
- node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) +
- new_frame_state_count);
- }
-
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(zone(), 0, stub_code);
- node->set_op(new_op);
+ node->set_op(common()->Call(desc));
}
void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
Builtins::JavaScript id,
int nargs) {
+ Node* context_input = NodeProperties::GetContextInput(node);
+ Node* effect_input = NodeProperties::GetEffectInput(node);
+
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Operator::Properties properties = node->op()->properties();
Callable callable =
CodeFactory::CallFunction(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
- CallDescriptor* desc =
- Linkage::GetStubCallDescriptor(isolate(), zone(), callable.descriptor(),
- nargs, FlagsForNode(node), properties);
- Node* global_object = graph()->NewNode(
- machine()->Load(kMachAnyTagged), NodeProperties::GetContextInput(node),
- jsgraph()->IntPtrConstant(
- Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- NodeProperties::GetEffectInput(node), graph()->start());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), nargs, flags, properties);
+ Node* global_object =
+ graph()->NewNode(machine()->Load(kMachAnyTagged), context_input,
+ jsgraph()->IntPtrConstant(
+ Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
+ effect_input, graph()->start());
Node* builtins_object = graph()->NewNode(
machine()->Load(kMachAnyTagged), global_object,
jsgraph()->IntPtrConstant(GlobalObject::kBuiltinsOffset - kHeapObjectTag),
- NodeProperties::GetEffectInput(node), graph()->start());
+ effect_input, graph()->start());
Node* function = graph()->NewNode(
machine()->Load(kMachAnyTagged), builtins_object,
jsgraph()->IntPtrConstant(JSBuiltinsObject::OffsetOfFunctionWithId(id) -
kHeapObjectTag),
- NodeProperties::GetEffectInput(node), graph()->start());
+ effect_input, graph()->start());
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 1, function);
@@ -265,28 +265,34 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
void JSGenericLowering::LowerJSUnaryNot(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable = CodeFactory::ToBoolean(
isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
void JSGenericLowering::LowerJSTypeOf(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable = CodeFactory::Typeof(isolate());
- ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags);
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSToBoolean(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable =
CodeFactory::ToBoolean(isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
void JSGenericLowering::LowerJSToNumber(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable = CodeFactory::ToNumber(isolate());
- ReplaceWithStubCall(node, callable, FlagsForNode(node));
+ ReplaceWithStubCall(node, callable, flags);
}
@@ -306,51 +312,84 @@ void JSGenericLowering::LowerJSToObject(Node* node) {
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const LoadPropertyParameters& p = LoadPropertyParametersOf(node->op());
- Callable callable =
- CodeFactory::KeyedLoadICInOptimizedCode(isolate(), UNINITIALIZED);
- if (FLAG_vector_ics) {
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->InsertInput(zone(), 3,
- jsgraph()->HeapConstant(p.feedback().vector()));
- }
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), p.language_mode(), UNINITIALIZED);
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
- Callable callable =
- p.load_ic() == NAMED
- ? CodeFactory::LoadICInOptimizedCode(isolate(), p.contextual_mode(),
- UNINITIALIZED)
- : CodeFactory::KeyedLoadICInOptimizedCode(isolate(), UNINITIALIZED);
+ Callable callable = CodeFactory::LoadICInOptimizedCode(
+ isolate(), p.contextual_mode(), p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- if (FLAG_vector_ics) {
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->InsertInput(zone(), 3,
- jsgraph()->HeapConstant(p.feedback().vector()));
- }
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable, flags);
+}
+
+
+void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ const LoadNamedParameters& p = LoadGlobalParametersOf(node->op());
+ Callable callable = CodeFactory::LoadICInOptimizedCode(
+ isolate(), p.contextual_mode(), SLOPPY, UNINITIALIZED);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ const StorePropertyParameters& p = StorePropertyParametersOf(node->op());
LanguageMode language_mode = OpParameter<LanguageMode>(node);
Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), language_mode, UNINITIALIZED);
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ if (FLAG_vector_stores) {
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ } else {
+ node->RemoveInput(3);
+ }
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
- Callable callable = p.store_ic() == NAMED
- ? CodeFactory::StoreIC(isolate(), p.language_mode())
- : CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), p.language_mode(), UNINITIALIZED);
+ Callable callable = CodeFactory::StoreICInOptimizedCode(
+ isolate(), p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ if (FLAG_vector_stores) {
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ } else {
+ node->RemoveInput(3);
+ }
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
+}
+
+
+void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ const StoreNamedParameters& p = StoreGlobalParametersOf(node->op());
+ Callable callable = CodeFactory::StoreIC(isolate(), p.language_mode());
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ if (FLAG_vector_stores) {
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ } else {
+ node->RemoveInput(3);
+ }
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
@@ -367,16 +406,12 @@ void JSGenericLowering::LowerJSHasProperty(Node* node) {
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
- InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ InstanceofStub::Flags stub_flags = static_cast<InstanceofStub::Flags>(
InstanceofStub::kReturnTrueFalseObject |
InstanceofStub::kArgsInRegisters);
- InstanceofStub stub(isolate(), flags);
- CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(isolate(), zone(), d, 0,
- FlagsForNode(node));
- Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
- node->InsertInput(zone(), 0, stub_code);
- node->set_op(common()->Call(desc));
+ Callable callable = CodeFactory::Instanceof(isolate(), stub_flags);
+ ReplaceWithStubCall(node, callable, flags);
}
@@ -417,6 +452,31 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
}
+void JSGenericLowering::LowerJSLoadDynamicGlobal(Node* node) {
+ const DynamicGlobalAccess& access = DynamicGlobalAccessOf(node->op());
+ Runtime::FunctionId function_id =
+ (access.mode() == CONTEXTUAL) ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ Node* projection = graph()->NewNode(common()->Projection(0), node);
+ NodeProperties::ReplaceUses(node, projection, node, node, node);
+ node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
+ node->RemoveInput(NodeProperties::FirstValueIndex(node));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
+ ReplaceWithRuntimeCall(node, function_id);
+ projection->ReplaceInput(0, node);
+}
+
+
+void JSGenericLowering::LowerJSLoadDynamicContext(Node* node) {
+ const DynamicContextAccess& access = DynamicContextAccessOf(node->op());
+ Node* projection = graph()->NewNode(common()->Projection(0), node);
+ NodeProperties::ReplaceUses(node, projection, node, node, node);
+ node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
+ ReplaceWithRuntimeCall(node, Runtime::kLoadLookupSlot);
+ projection->ReplaceInput(0, node);
+}
+
+
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CreateClosureParameters p = CreateClosureParametersOf(node->op());
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.shared_info()));
@@ -450,8 +510,9 @@ void JSGenericLowering::LowerJSCallConstruct(Node* node) {
int arity = OpParameter<int>(node);
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), d, arity, FlagsForNode(node));
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor* desc =
+ Linkage::GetStubCallDescriptor(isolate(), zone(), d, arity, flags);
Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
Node* construct = NodeProperties::GetValueInput(node, 0);
node->InsertInput(zone(), 0, stub_code);
@@ -462,59 +523,17 @@ void JSGenericLowering::LowerJSCallConstruct(Node* node) {
}
-bool JSGenericLowering::TryLowerDirectJSCall(Node* node) {
- // Lower to a direct call to a constant JSFunction if legal.
- const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
- int arg_count = static_cast<int>(p.arity() - 2);
-
- // Check the function is a constant and is really a JSFunction.
- HeapObjectMatcher<Object> function_const(node->InputAt(0));
- if (!function_const.HasValue()) return false; // not a constant.
- Handle<Object> func = function_const.Value().handle();
- if (!func->IsJSFunction()) return false; // not a function.
- Handle<JSFunction> function = Handle<JSFunction>::cast(func);
- if (arg_count != function->shared()->internal_formal_parameter_count()) {
- return false;
- }
-
- // Check the receiver doesn't need to be wrapped.
- Node* receiver = node->InputAt(1);
- if (!NodeProperties::IsTyped(receiver)) return false;
- Type* ok_receiver = Type::Union(Type::Undefined(), Type::Receiver(), zone());
- if (!NodeProperties::GetBounds(receiver).upper->Is(ok_receiver)) return false;
-
- int index = NodeProperties::FirstContextIndex(node);
-
- // TODO(titzer): total hack to share function context constants.
- // Remove this when the JSGraph canonicalizes heap constants.
- Node* context = node->InputAt(index);
- HeapObjectMatcher<Context> context_const(context);
- if (!context_const.HasValue() ||
- *(context_const.Value().handle()) != function->context()) {
- context = jsgraph()->HeapConstant(Handle<Context>(function->context()));
- }
- node->ReplaceInput(index, context);
- CallDescriptor::Flags flags = FlagsForNode(node);
- if (is_strict(p.language_mode())) flags |= CallDescriptor::kSupportsTailCalls;
- CallDescriptor* desc =
- Linkage::GetJSCallDescriptor(zone(), false, 1 + arg_count, flags);
- node->set_op(common()->Call(desc));
- return true;
-}
-
-
void JSGenericLowering::LowerJSCallFunction(Node* node) {
- // Fast case: call function directly.
- if (TryLowerDirectJSCall(node)) return;
-
- // General case: CallFunctionStub.
const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
int arg_count = static_cast<int>(p.arity() - 2);
CallFunctionStub stub(isolate(), arg_count, p.flags());
CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ if (p.AllowTailCalls()) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), d, static_cast<int>(p.arity() - 1),
- FlagsForNode(node));
+ isolate(), zone(), d, static_cast<int>(p.arity() - 1), flags);
Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
node->InsertInput(zone(), 0, stub_code);
node->set_op(common()->Call(desc));
@@ -527,6 +546,212 @@ void JSGenericLowering::LowerJSCallRuntime(Node* node) {
}
+void JSGenericLowering::LowerJSForInDone(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kForInDone);
+}
+
+
+void JSGenericLowering::LowerJSForInNext(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kForInNext);
+}
+
+
+void JSGenericLowering::LowerJSForInPrepare(Node* node) {
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+
+ // Get the set of properties to enumerate.
+ Runtime::Function const* function =
+ Runtime::FunctionForId(Runtime::kGetPropertyNamesFast);
+ CallDescriptor const* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function->function_id, 1, Operator::kNoProperties);
+ Node* cache_type = effect = graph()->NewNode(
+ common()->Call(descriptor),
+ jsgraph()->CEntryStubConstant(function->result_size), object,
+ jsgraph()->ExternalConstant(function->function_id),
+ jsgraph()->Int32Constant(1), context, frame_state, effect, control);
+ control = graph()->NewNode(common()->IfSuccess(), cache_type);
+
+ Node* object_map = effect = graph()->NewNode(
+ machine()->Load(kMachAnyTagged), object,
+ jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+ effect, control);
+ Node* cache_type_map = effect = graph()->NewNode(
+ machine()->Load(kMachAnyTagged), cache_type,
+ jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+ effect, control);
+ Node* meta_map = jsgraph()->HeapConstant(isolate()->factory()->meta_map());
+
+ // If we got a map from the GetPropertyNamesFast runtime call, we can do a
+ // fast modification check. Otherwise, we got a fixed array, and we have to
+ // perform a slow check on every iteration.
+ Node* check0 =
+ graph()->NewNode(machine()->WordEqual(), cache_type_map, meta_map);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* cache_array_true0;
+ Node* cache_length_true0;
+ Node* cache_type_true0;
+ Node* etrue0;
+ {
+ // Enum cache case.
+ Node* cache_type_enum_length = etrue0 = graph()->NewNode(
+ machine()->Load(kMachUint32), cache_type,
+ jsgraph()->IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag),
+ effect, if_true0);
+ cache_type_enum_length =
+ graph()->NewNode(machine()->Word32And(), cache_type_enum_length,
+ jsgraph()->Uint32Constant(Map::EnumLengthBits::kMask));
+
+ Node* check1 =
+ graph()->NewNode(machine()->Word32Equal(), cache_type_enum_length,
+ jsgraph()->Int32Constant(0));
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* cache_array_true1;
+ Node* etrue1;
+ {
+ // No properties to enumerate.
+ cache_array_true1 =
+ jsgraph()->HeapConstant(isolate()->factory()->empty_fixed_array());
+ etrue1 = etrue0;
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* cache_array_false1;
+ Node* efalse1;
+ {
+ // Load the enumeration cache from the instance descriptors of {object}.
+ Node* object_map_descriptors = efalse1 = graph()->NewNode(
+ machine()->Load(kMachAnyTagged), object_map,
+ jsgraph()->IntPtrConstant(Map::kDescriptorsOffset - kHeapObjectTag),
+ etrue0, if_false1);
+ Node* object_map_enum_cache = efalse1 = graph()->NewNode(
+ machine()->Load(kMachAnyTagged), object_map_descriptors,
+ jsgraph()->IntPtrConstant(DescriptorArray::kEnumCacheOffset -
+ kHeapObjectTag),
+ efalse1, if_false1);
+ cache_array_false1 = efalse1 = graph()->NewNode(
+ machine()->Load(kMachAnyTagged), object_map_enum_cache,
+ jsgraph()->IntPtrConstant(
+ DescriptorArray::kEnumCacheBridgeCacheOffset - kHeapObjectTag),
+ efalse1, if_false1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ etrue0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+ cache_array_true0 =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true1,
+ cache_array_false1, if_true0);
+
+ cache_length_true0 = graph()->NewNode(
+ machine()->WordShl(),
+ machine()->Is64()
+ ? graph()->NewNode(machine()->ChangeUint32ToUint64(),
+ cache_type_enum_length)
+ : cache_type_enum_length,
+ jsgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize));
+ cache_type_true0 = cache_type;
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* cache_array_false0;
+ Node* cache_length_false0;
+ Node* cache_type_false0;
+ Node* efalse0;
+ {
+ // FixedArray case.
+ Node* object_instance_type = efalse0 = graph()->NewNode(
+ machine()->Load(kMachUint8), object_map,
+ jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
+ effect, if_false0);
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ Node* check1 = graph()->NewNode(
+ machine()->Uint32LessThanOrEqual(), object_instance_type,
+ jsgraph()->Uint32Constant(LAST_JS_PROXY_TYPE));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* cache_type_true1 = jsgraph()->ZeroConstant(); // Zero indicates proxy
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* cache_type_false1 = jsgraph()->OneConstant(); // One means slow check
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ cache_type_false0 =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_type_true1,
+ cache_type_false1, if_false0);
+
+ cache_array_false0 = cache_type;
+ cache_length_false0 = efalse0 = graph()->NewNode(
+ machine()->Load(kMachAnyTagged), cache_array_false0,
+ jsgraph()->IntPtrConstant(FixedArray::kLengthOffset - kHeapObjectTag),
+ efalse0, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* cache_array =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true0,
+ cache_array_false0, control);
+ Node* cache_length =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_length_true0,
+ cache_length_false0, control);
+ cache_type = graph()->NewNode(common()->Phi(kMachAnyTagged, 2),
+ cache_type_true0, cache_type_false0, control);
+
+ for (auto edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ Node* const use = edge.from();
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ use->ReplaceUses(control);
+ use->Kill();
+ } else if (use->opcode() == IrOpcode::kIfException) {
+ edge.UpdateTo(cache_type_true0);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ Node* const use = edge.from();
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ DCHECK_EQ(IrOpcode::kProjection, use->opcode());
+ switch (ProjectionIndexOf(use->op())) {
+ case 0:
+ use->ReplaceUses(cache_type);
+ break;
+ case 1:
+ use->ReplaceUses(cache_array);
+ break;
+ case 2:
+ use->ReplaceUses(cache_length);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ use->Kill();
+ }
+ }
+}
+
+
+void JSGenericLowering::LowerJSForInStep(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kForInStep);
+}
+
+
void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -552,10 +777,19 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
- // Relax controls of {node}, i.e. make it free floating.
- NodeProperties::ReplaceWithValue(node, node, ephi, merge);
+ // Wire the new diamond into the graph, {node} can still throw.
+ NodeProperties::ReplaceUses(node, node, ephi, node, node);
NodeProperties::ReplaceEffectInput(ephi, efalse, 1);
+ // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
+ // the node and places it inside the diamond. Come up with a helper method!
+ for (Node* use : node->uses()) {
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ use->ReplaceUses(merge);
+ merge->ReplaceInput(1, use);
+ }
+ }
+
// Turn the stack check into a runtime call.
ReplaceWithRuntimeCall(node, Runtime::kStackGuard);
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 89467dbdd2..9811ba8451 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -41,9 +41,6 @@ class JSGenericLowering final : public Reducer {
void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
- // Helper for optimization of JSCallFunction.
- bool TryLowerDirectJSCall(Node* node);
-
Zone* zone() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 9363268513..84fcf82c84 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -183,23 +183,28 @@ Node* JSGraph::ExternalConstant(ExternalReference reference) {
}
+Node* JSGraph::ExternalConstant(Runtime::FunctionId function_id) {
+ return ExternalConstant(ExternalReference(function_id, isolate()));
+}
+
+
Node* JSGraph::EmptyFrameState() {
Node* empty_frame_state = cached_nodes_[kEmptyFrameState];
if (!empty_frame_state || empty_frame_state->IsDead()) {
Node* state_values = graph()->NewNode(common()->StateValues(0));
empty_frame_state = graph()->NewNode(
- common()->FrameState(JS_FRAME, BailoutId::None(),
- OutputFrameStateCombine::Ignore()),
+ common()->FrameState(BailoutId::None(),
+ OutputFrameStateCombine::Ignore(), nullptr),
state_values, state_values, state_values, NoContextConstant(),
- UndefinedConstant());
+ UndefinedConstant(), graph()->start());
cached_nodes_[kEmptyFrameState] = empty_frame_state;
}
return empty_frame_state;
}
-Node* JSGraph::DeadControl() {
- return CACHED(kDeadControl, graph()->NewNode(common()->Dead()));
+Node* JSGraph::Dead() {
+ return CACHED(kDead, graph()->NewNode(common()->Dead()));
}
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 13dc367d4d..c7f07d46db 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -102,6 +102,7 @@ class JSGraph : public ZoneObject {
// Creates an ExternalConstant node, usually canonicalized.
Node* ExternalConstant(ExternalReference ref);
+ Node* ExternalConstant(Runtime::FunctionId function_id);
Node* SmiConstant(int32_t immediate) {
DCHECK(Smi::IsValid(immediate));
@@ -116,8 +117,8 @@ class JSGraph : public ZoneObject {
// cannot deopt.
Node* EmptyFrameState();
- // Create a control node that serves as control dependency for dead nodes.
- Node* DeadControl();
+ // Create a control node that serves as dependency for dead nodes.
+ Node* Dead();
JSOperatorBuilder* javascript() const { return javascript_; }
CommonOperatorBuilder* common() const { return common_; }
@@ -141,7 +142,7 @@ class JSGraph : public ZoneObject {
kOneConstant,
kNaNConstant,
kEmptyFrameState,
- kDeadControl,
+ kDead,
kNumCachedNodes // Must remain last.
};
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 7e0aa13411..88d91718e5 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -9,7 +9,6 @@
#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -59,108 +58,6 @@ class JSCallFunctionAccessor {
};
-namespace {
-
-// A facade on a JSFunction's graph to facilitate inlining. It assumes the
-// that the function graph has only one return statement, and provides
-// {UnifyReturn} to convert a function graph to that end.
-class Inlinee {
- public:
- Inlinee(Node* start, Node* end) : start_(start), end_(end) {}
-
- // Returns the last regular control node, that is
- // the last control node before the end node.
- Node* end_block() { return NodeProperties::GetControlInput(unique_return()); }
-
- // Return the effect output of the graph,
- // that is the effect input of the return statement of the inlinee.
- Node* effect_output() {
- return NodeProperties::GetEffectInput(unique_return());
- }
- // Return the value output of the graph,
- // that is the value input of the return statement of the inlinee.
- Node* value_output() {
- return NodeProperties::GetValueInput(unique_return(), 0);
- }
- // Return the control output of the graph,
- // that is the control input of the return statement of the inlinee.
- Node* control_output() {
- return NodeProperties::GetControlInput(unique_return(), 0);
- }
- // Return the unique return statement of the graph.
- Node* unique_return() {
- Node* unique_return = NodeProperties::GetControlInput(end_);
- DCHECK_EQ(IrOpcode::kReturn, unique_return->opcode());
- return unique_return;
- }
-
- // Counts JSFunction, Receiver, arguments, context but not effect, control.
- size_t total_parameters() { return start_->op()->ValueOutputCount(); }
-
- // Counts only formal parameters.
- size_t formal_parameters() {
- DCHECK_GE(total_parameters(), 3u);
- return total_parameters() - 3;
- }
-
- // Inline this graph at {call}, use {jsgraph} and its zone to create
- // any new nodes.
- Reduction InlineAtCall(JSGraph* jsgraph, Node* call);
-
- // Ensure that only a single return reaches the end node.
- static void UnifyReturn(JSGraph* jsgraph);
-
- private:
- Node* start_;
- Node* end_;
-};
-
-
-void Inlinee::UnifyReturn(JSGraph* jsgraph) {
- Graph* graph = jsgraph->graph();
-
- Node* final_merge = NodeProperties::GetControlInput(graph->end(), 0);
- if (final_merge->opcode() == IrOpcode::kReturn) {
- // nothing to do
- return;
- }
- DCHECK_EQ(IrOpcode::kMerge, final_merge->opcode());
-
- int predecessors = final_merge->op()->ControlInputCount();
-
- const Operator* op_phi = jsgraph->common()->Phi(kMachAnyTagged, predecessors);
- const Operator* op_ephi = jsgraph->common()->EffectPhi(predecessors);
-
- NodeVector values(jsgraph->zone());
- NodeVector effects(jsgraph->zone());
- // Iterate over all control flow predecessors,
- // which must be return statements.
- for (Edge edge : final_merge->input_edges()) {
- Node* input = edge.to();
- switch (input->opcode()) {
- case IrOpcode::kReturn:
- values.push_back(NodeProperties::GetValueInput(input, 0));
- effects.push_back(NodeProperties::GetEffectInput(input));
- edge.UpdateTo(NodeProperties::GetControlInput(input));
- input->NullAllInputs();
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- values.push_back(final_merge);
- effects.push_back(final_merge);
- Node* phi =
- graph->NewNode(op_phi, static_cast<int>(values.size()), &values.front());
- Node* ephi = graph->NewNode(op_ephi, static_cast<int>(effects.size()),
- &effects.front());
- Node* new_return =
- graph->NewNode(jsgraph->common()->Return(), phi, ephi, final_merge);
- graph->end()->ReplaceInput(0, new_return);
-}
-
-
class CopyVisitor {
public:
CopyVisitor(Graph* source_graph, Graph* target_graph, Zone* temp_zone)
@@ -218,7 +115,8 @@ class CopyVisitor {
};
-Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
+Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
+ Node* start, Node* end) {
// The scheduler is smart enough to place our code; we just ensure {control}
// becomes the control input of the start of the inlinee, and {effect} becomes
// the effect input of the start of the inlinee.
@@ -226,12 +124,14 @@ Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
Node* effect = NodeProperties::GetEffectInput(call);
// Context is last argument.
- int inlinee_context_index = static_cast<int>(total_parameters()) - 1;
+ int const inlinee_context_index =
+ static_cast<int>(start->op()->ValueOutputCount()) - 1;
+
// {inliner_inputs} counts JSFunction, Receiver, arguments, but not
// context, effect, control.
int inliner_inputs = call->op()->ValueInputCount();
// Iterate over all uses of the start node.
- for (Edge edge : start_->use_edges()) {
+ for (Edge edge : start->use_edges()) {
Node* use = edge.from();
switch (use->opcode()) {
case IrOpcode::kParameter: {
@@ -239,14 +139,12 @@ Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
if (index < inliner_inputs && index < inlinee_context_index) {
// There is an input from the call, and the index is a value
// projection but not the context, so rewire the input.
- NodeProperties::ReplaceWithValue(use, call->InputAt(index));
+ Replace(use, call->InputAt(index));
} else if (index == inlinee_context_index) {
- // TODO(turbofan): We always context specialize inlinees currently, so
- // we should never get here.
- UNREACHABLE();
+ Replace(use, context);
} else if (index < inlinee_context_index) {
// Call has fewer arguments than required, fill with undefined.
- NodeProperties::ReplaceWithValue(use, jsgraph->UndefinedConstant());
+ Replace(use, jsgraph_->UndefinedConstant());
} else {
// We got too many arguments, discard for now.
// TODO(sigurds): Fix to treat arguments array correctly.
@@ -258,6 +156,8 @@ Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
edge.UpdateTo(effect);
} else if (NodeProperties::IsControlEdge(edge)) {
edge.UpdateTo(control);
+ } else if (NodeProperties::IsFrameStateEdge(edge)) {
+ edge.UpdateTo(frame_state);
} else {
UNREACHABLE();
}
@@ -265,31 +165,59 @@ Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
}
}
- NodeProperties::ReplaceWithValue(call, value_output(), effect_output(),
- control_output());
-
- return Reducer::Replace(value_output());
+ NodeVector values(local_zone_);
+ NodeVector effects(local_zone_);
+ NodeVector controls(local_zone_);
+ for (Node* const input : end->inputs()) {
+ switch (input->opcode()) {
+ case IrOpcode::kReturn:
+ values.push_back(NodeProperties::GetValueInput(input, 0));
+ effects.push_back(NodeProperties::GetEffectInput(input));
+ controls.push_back(NodeProperties::GetControlInput(input));
+ break;
+ case IrOpcode::kDeoptimize:
+ case IrOpcode::kTerminate:
+ case IrOpcode::kThrow:
+ jsgraph_->graph()->end()->AppendInput(jsgraph_->zone(), input);
+ jsgraph_->graph()->end()->set_op(
+ jsgraph_->common()->End(jsgraph_->graph()->end()->InputCount()));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ DCHECK_NE(0u, values.size());
+ DCHECK_EQ(values.size(), effects.size());
+ DCHECK_EQ(values.size(), controls.size());
+ int const input_count = static_cast<int>(controls.size());
+ Node* control_output = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Merge(input_count), input_count, &controls.front());
+ values.push_back(control_output);
+ effects.push_back(control_output);
+ Node* value_output = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Phi(kMachAnyTagged, input_count),
+ static_cast<int>(values.size()), &values.front());
+ Node* effect_output = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->EffectPhi(input_count),
+ static_cast<int>(effects.size()), &effects.front());
+
+ ReplaceWithValue(call, value_output, effect_output, control_output);
+
+ return Changed(value_output);
}
-} // namespace
+Node* JSInliner::CreateArgumentsAdaptorFrameState(
+ JSCallFunctionAccessor* call, Handle<SharedFunctionInfo> shared_info,
+ Zone* temp_zone) {
+ const FrameStateFunctionInfo* state_info =
+ jsgraph_->common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kArgumentsAdaptor,
+ static_cast<int>(call->formal_arguments()) + 1, 0, shared_info);
-void JSInliner::AddClosureToFrameState(Node* frame_state,
- Handle<JSFunction> jsfunction) {
- FrameStateCallInfo call_info = OpParameter<FrameStateCallInfo>(frame_state);
const Operator* op = jsgraph_->common()->FrameState(
- FrameStateType::JS_FRAME, call_info.bailout_id(),
- call_info.state_combine(), jsfunction);
- frame_state->set_op(op);
-}
-
-
-Node* JSInliner::CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
- Handle<JSFunction> jsfunction,
- Zone* temp_zone) {
- const Operator* op = jsgraph_->common()->FrameState(
- FrameStateType::ARGUMENTS_ADAPTOR, BailoutId(-1),
- OutputFrameStateCombine::Ignore(), jsfunction);
+ BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
const Operator* op0 = jsgraph_->common()->StateValues(0);
Node* node0 = jsgraph_->graph()->NewNode(op0);
NodeVector params(temp_zone);
@@ -303,7 +231,7 @@ Node* JSInliner::CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
op_param, static_cast<int>(params.size()), &params.front());
return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
jsgraph_->UndefinedConstant(),
- call->frame_state());
+ call->jsfunction(), call->frame_state());
}
@@ -311,25 +239,60 @@ Reduction JSInliner::Reduce(Node* node) {
if (node->opcode() != IrOpcode::kJSCallFunction) return NoChange();
JSCallFunctionAccessor call(node);
- HeapObjectMatcher<JSFunction> match(call.jsfunction());
+ HeapObjectMatcher match(call.jsfunction());
if (!match.HasValue()) return NoChange();
- Handle<JSFunction> function = match.Value().handle();
- if (!function->IsJSFunction()) return NoChange();
- if (mode_ == kBuiltinsInlining && !function->shared()->inline_builtin()) {
+ if (!match.Value().handle()->IsJSFunction()) return NoChange();
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(match.Value().handle());
+ if (mode_ == kRestrictedInlining && !function->shared()->force_inline()) {
+ return NoChange();
+ }
+
+ // Disallow cross native-context inlining for now. This means that all parts
+ // of the resulting code will operate on the same global object.
+ // This also prevents cross context leaks for asm.js code, where we could
+ // inline functions from a different context and hold on to that context (and
+ // closure) from the code object.
+ // TODO(turbofan): We might want to revisit this restriction later when we
+ // have a need for this, and we know how to model different native contexts
+ // in the same graph in a compositional way.
+ if (function->context()->native_context() !=
+ info_->context()->native_context()) {
+ TRACE("Not inlining %s into %s because of different native contexts\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
+ // TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
+ // not inlining recursive functions. We might want to relax that at some
+ // point.
+ for (Node* frame_state = call.frame_state();
+ frame_state->opcode() == IrOpcode::kFrameState;
+ frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
+ FrameStateInfo const& info = OpParameter<FrameStateInfo>(frame_state);
+ Handle<SharedFunctionInfo> shared_info;
+ if (info.shared_info().ToHandle(&shared_info) &&
+ *shared_info == function->shared()) {
+ TRACE("Not inlining %s into %s because call is recursive\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+ }
+
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
+ if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
if (!Compiler::ParseAndAnalyze(info.parse_info())) return NoChange();
if (!Compiler::EnsureDeoptimizationSupport(&info)) return NoChange();
if (info.scope()->arguments() != NULL && is_sloppy(info.language_mode())) {
// For now do not inline functions that use their arguments array.
- TRACE("Not Inlining %s into %s because inlinee uses arguments array\n",
+ TRACE("Not inlining %s into %s because inlinee uses arguments array\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
@@ -342,40 +305,39 @@ Reduction JSInliner::Reduce(Node* node) {
Graph graph(info.zone());
JSGraph jsgraph(info.isolate(), &graph, jsgraph_->common(),
jsgraph_->javascript(), jsgraph_->machine());
+ AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
+ graph_builder.CreateGraph(false);
// The inlinee specializes to the context from the JSFunction object.
// TODO(turbofan): We might want to load the context from the JSFunction at
// runtime in case we only know the SharedFunctionInfo once we have dynamic
// type feedback in the compiler.
- AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
- graph_builder.CreateGraph(true, false);
- JSContextSpecializer context_specializer(&jsgraph);
- GraphReducer graph_reducer(&graph, local_zone_);
- graph_reducer.AddReducer(&context_specializer);
- graph_reducer.ReduceGraph();
- Inlinee::UnifyReturn(&jsgraph);
+ Node* context = jsgraph_->Constant(handle(function->context()));
CopyVisitor visitor(&graph, jsgraph_->graph(), info.zone());
visitor.CopyGraph();
- Inlinee inlinee(visitor.GetCopy(graph.start()), visitor.GetCopy(graph.end()));
+ Node* start = visitor.GetCopy(graph.start());
+ Node* end = visitor.GetCopy(graph.end());
- Node* outer_frame_state = call.frame_state();
+ Node* frame_state = call.frame_state();
+ size_t const inlinee_formal_parameters = start->op()->ValueOutputCount() - 3;
// Insert argument adaptor frame if required.
- if (call.formal_arguments() != inlinee.formal_parameters()) {
- outer_frame_state =
- CreateArgumentsAdaptorFrameState(&call, function, info.zone());
- }
-
- for (Node* node : visitor.copies()) {
- if (node && node->opcode() == IrOpcode::kFrameState) {
- DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
- AddClosureToFrameState(node, function);
- NodeProperties::ReplaceFrameStateInput(node, 0, outer_frame_state);
+ if (call.formal_arguments() != inlinee_formal_parameters) {
+ // In strong mode, in case of too few arguments we need to throw a
+ // TypeError so we must not inline this call.
+ if (is_strong(info.language_mode()) &&
+ call.formal_arguments() < inlinee_formal_parameters) {
+ return NoChange();
}
+ frame_state = CreateArgumentsAdaptorFrameState(&call, info.shared_info(),
+ info.zone());
}
- return inlinee.InlineAtCall(jsgraph_, node);
+ // Remember that we inlined this function.
+ info_->AddInlinedFunction(info.shared_info());
+
+ return InlineCall(node, context, frame_state, start, end);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 174b1e9a7a..b075024dd3 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -14,13 +14,17 @@ namespace compiler {
class JSCallFunctionAccessor;
-class JSInliner final : public Reducer {
+class JSInliner final : public AdvancedReducer {
public:
- enum Mode { kBuiltinsInlining, kGeneralInlining };
+ enum Mode { kRestrictedInlining, kGeneralInlining };
- JSInliner(Mode mode, Zone* local_zone, CompilationInfo* info,
+ JSInliner(Editor* editor, Mode mode, Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph)
- : mode_(mode), local_zone_(local_zone), info_(info), jsgraph_(jsgraph) {}
+ : AdvancedReducer(editor),
+ mode_(mode),
+ local_zone_(local_zone),
+ info_(info),
+ jsgraph_(jsgraph) {}
Reduction Reduce(Node* node) final;
@@ -31,9 +35,11 @@ class JSInliner final : public Reducer {
JSGraph* jsgraph_;
Node* CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
- Handle<JSFunction> jsfunction,
+ Handle<SharedFunctionInfo> shared_info,
Zone* temp_zone);
- void AddClosureToFrameState(Node* frame_state, Handle<JSFunction> jsfunction);
+
+ Reduction InlineCall(Node* call, Node* context, Node* frame_state,
+ Node* start, Node* end);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index b299e8bb0e..8f04fc1bcd 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -16,8 +16,12 @@ namespace v8 {
namespace internal {
namespace compiler {
-JSIntrinsicLowering::JSIntrinsicLowering(JSGraph* jsgraph)
- : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
+ DeoptimizationMode mode)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ mode_(mode),
+ simplified_(jsgraph->zone()) {}
Reduction JSIntrinsicLowering::Reduce(Node* node) {
@@ -28,6 +32,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
switch (f->function_id) {
case Runtime::kInlineConstructDouble:
return ReduceConstructDouble(node);
+ case Runtime::kInlineDateField:
+ return ReduceDateField(node);
case Runtime::kInlineDeoptimizeNow:
return ReduceDeoptimizeNow(node);
case Runtime::kInlineDoubleHi:
@@ -40,6 +46,10 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIncrementStatsCounter(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
+ case Runtime::kInlineIsDate:
+ return ReduceIsInstanceType(node, JS_DATE_TYPE);
+ case Runtime::kInlineIsTypedArray:
+ return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsFunction:
return ReduceIsInstanceType(node, JS_FUNCTION_TYPE);
case Runtime::kInlineIsNonNegativeSmi:
@@ -74,6 +84,20 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceUnLikely(node, BranchHint::kFalse);
case Runtime::kInlineValueOf:
return ReduceValueOf(node);
+ case Runtime::kInlineIsMinusZero:
+ return ReduceIsMinusZero(node);
+ case Runtime::kInlineFixedArrayGet:
+ return ReduceFixedArrayGet(node);
+ case Runtime::kInlineFixedArraySet:
+ return ReduceFixedArraySet(node);
+ case Runtime::kInlineGetTypeFeedbackVector:
+ return ReduceGetTypeFeedbackVector(node);
+ case Runtime::kInlineGetCallerJSFunction:
+ return ReduceGetCallerJSFunction(node);
+ case Runtime::kInlineThrowNotDateError:
+ return ReduceThrowNotDateError(node);
+ case Runtime::kInlineCallFunction:
+ return ReduceCallFunction(node);
default:
break;
}
@@ -89,41 +113,43 @@ Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
graph()->NewNode(machine()->Float64InsertLowWord32(),
jsgraph()->Constant(0), low),
high);
- NodeProperties::ReplaceWithValue(node, value);
+ ReplaceWithValue(node, value);
return Replace(value);
}
-Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
- // TODO(jarin): This should not depend on the global flag.
- if (!FLAG_turbo_deoptimization) return NoChange();
-
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
- DCHECK_EQ(frame_state->opcode(), IrOpcode::kFrameState);
-
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // We are making the continuation after the call dead. To
- // model this, we generate if (true) statement with deopt
- // in the true branch and continuation in the false branch.
- Node* branch =
- graph()->NewNode(common()->Branch(), jsgraph()->TrueConstant(), control);
-
- // False branch - the original continuation.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- NodeProperties::ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect,
- if_false);
-
- // True branch: deopt.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* deopt =
- graph()->NewNode(common()->Deoptimize(), frame_state, effect, if_true);
+Reduction JSIntrinsicLowering::ReduceDateField(Node* node) {
+ Node* const value = NodeProperties::GetValueInput(node, 0);
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ NumberMatcher mindex(index);
+ if (mindex.Is(JSDate::kDateValue)) {
+ return Change(
+ node,
+ simplified()->LoadField(AccessBuilder::ForJSDateField(
+ static_cast<JSDate::FieldIndex>(static_cast<int>(mindex.Value())))),
+ value, effect, control);
+ }
+ // TODO(turbofan): Optimize more patterns.
+ return NoChange();
+}
- // Connect the deopt to the merge exiting the graph.
- NodeProperties::MergeControlToEnd(graph(), common(), deopt);
- return Changed(deopt);
+Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
+ if (mode() != kDeoptimizationEnabled) return NoChange();
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+
+ // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(), frame_state, effect, control);
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+
+ node->set_op(common()->Dead());
+ node->TrimInputCount(0);
+ return Changed(node);
}
@@ -148,11 +174,12 @@ Reduction JSIntrinsicLowering::ReduceHeapObjectGetMap(Node* node) {
Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
if (!FLAG_native_code_counters) return ChangeToUndefined(node);
- HeapObjectMatcher<String> m(NodeProperties::GetValueInput(node, 0));
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
if (!m.HasValue() || !m.Value().handle()->IsString()) {
return ChangeToUndefined(node);
}
- SmartArrayPointer<char> name = m.Value().handle()->ToCString();
+ SmartArrayPointer<char> name =
+ Handle<String>::cast(m.Value().handle())->ToCString();
StatsCounter counter(jsgraph()->isolate(), name.get());
if (!counter.Enabled()) return ChangeToUndefined(node);
@@ -203,7 +230,7 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
// Replace all effect uses of {node} with the {ephi}.
Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
- NodeProperties::ReplaceWithValue(node, node, ephi);
+ ReplaceWithValue(node, node, ephi);
// Turn the {node} into a Phi.
return Change(node, common()->Phi(type, 2), vtrue, vfalse, merge);
@@ -245,8 +272,8 @@ Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
Reduction JSIntrinsicLowering::ReduceMathFloor(Node* node) {
- if (!machine()->HasFloat64RoundDown()) return NoChange();
- return Change(node, machine()->Float64RoundDown());
+ if (!machine()->Float64RoundDown().IsSupported()) return NoChange();
+ return Change(node, machine()->Float64RoundDown().op());
}
@@ -264,7 +291,7 @@ Reduction JSIntrinsicLowering::ReduceSeqStringGetChar(
node->ReplaceInput(2, effect);
node->ReplaceInput(3, control);
node->TrimInputCount(4);
- NodeProperties::ReplaceWithValue(node, node, node);
+ RelaxControls(node);
return Changed(node);
}
@@ -286,7 +313,7 @@ Reduction JSIntrinsicLowering::ReduceSeqStringSetChar(
node->ReplaceInput(4, control);
node->TrimInputCount(5);
NodeProperties::RemoveBounds(node);
- NodeProperties::ReplaceWithValue(node, string, node);
+ ReplaceWithValue(node, string, node);
return Changed(node);
}
@@ -319,7 +346,7 @@ Reduction JSIntrinsicLowering::ReduceUnLikely(Node* node, BranchHint hint) {
}
// Apart from adding hints to branchs nodes, this is the identity function.
Node* value = NodeProperties::GetValueInput(node, 0);
- NodeProperties::ReplaceWithValue(node, value);
+ ReplaceWithValue(node, value);
return Changed(value);
}
@@ -380,7 +407,7 @@ Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
// Replace all effect uses of {node} with the {ephi0}.
Node* ephi0 = graph()->NewNode(ephi_op, etrue0, efalse0, merge0);
- NodeProperties::ReplaceWithValue(node, node, ephi0);
+ ReplaceWithValue(node, node, ephi0);
// Turn the {node} into a Phi.
return Change(node, phi_op, vtrue0, vfalse0, merge0);
@@ -389,7 +416,7 @@ Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
// Replace all effect uses of {node} with the effect dependency.
- NodeProperties::ReplaceWithValue(node, node);
+ RelaxEffectsAndControls(node);
// Remove the inputs corresponding to context, effect and control.
NodeProperties::RemoveNonValueInputs(node);
// Finally update the operator to the new one.
@@ -398,6 +425,135 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
}
+Reduction JSIntrinsicLowering::ReduceIsMinusZero(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ Node* double_lo =
+ graph()->NewNode(machine()->Float64ExtractLowWord32(), value);
+ Node* check1 = graph()->NewNode(machine()->Word32Equal(), double_lo,
+ jsgraph()->ZeroConstant());
+
+ Node* double_hi =
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value);
+ Node* check2 = graph()->NewNode(
+ machine()->Word32Equal(), double_hi,
+ jsgraph()->Int32Constant(static_cast<int32_t>(0x80000000)));
+
+ ReplaceWithValue(node, node, effect);
+
+ Node* and_result = graph()->NewNode(machine()->Word32And(), check1, check2);
+
+ return Change(node, machine()->Word32Equal(), and_result,
+ jsgraph()->Int32Constant(1));
+}
+
+
+Reduction JSIntrinsicLowering::ReduceFixedArrayGet(Node* node) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ return Change(
+ node, simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ base, index, effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceFixedArraySet(Node* node) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* store = (graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()), base,
+ index, value, effect, control));
+ ReplaceWithValue(node, value, store);
+ return Changed(store);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceGetTypeFeedbackVector(Node* node) {
+ Node* func = node->InputAt(0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ FieldAccess access = AccessBuilder::ForJSFunctionSharedFunctionInfo();
+ Node* load =
+ graph()->NewNode(simplified()->LoadField(access), func, effect, control);
+ access = AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector();
+ return Change(node, simplified()->LoadField(access), load, load, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceGetCallerJSFunction(Node* node) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* outer_frame = frame_state->InputAt(kFrameStateOuterStateInput);
+ if (outer_frame->opcode() == IrOpcode::kFrameState) {
+ // Use the runtime implementation to throw the appropriate error if the
+ // containing function is inlined.
+ return NoChange();
+ }
+
+ // TODO(danno): This implementation forces intrinsic lowering to happen after
+ // inlining, which is fine for now, but eventually the frame-querying logic
+ // probably should go later, e.g. in instruction selection, so that there is
+ // no phase-ordering dependency.
+ FieldAccess access = AccessBuilder::ForFrameCallerFramePtr();
+ Node* fp = graph()->NewNode(machine()->LoadFramePointer());
+ Node* next_fp =
+ graph()->NewNode(simplified()->LoadField(access), fp, effect, control);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForFrameMarker()),
+ next_fp, effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceThrowNotDateError(Node* node) {
+ if (mode() != kDeoptimizationEnabled) return NoChange();
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+
+ // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(), frame_state, effect, control);
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+
+ node->set_op(common()->Dead());
+ node->TrimInputCount(0);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceCallFunction(Node* node) {
+ CallRuntimeParameters params = OpParameter<CallRuntimeParameters>(node->op());
+ size_t arity = params.arity();
+ node->set_op(javascript()->CallFunction(arity, NO_CALL_FUNCTION_FLAGS, STRICT,
+ VectorSlotPair(), ALLOW_TAIL_CALLS));
+ Node* function = node->InputAt(static_cast<int>(arity - 1));
+ while (--arity != 0) {
+ node->ReplaceInput(static_cast<int>(arity),
+ node->InputAt(static_cast<int>(arity - 1)));
+ }
+ node->ReplaceInput(0, function);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
+ Node* b) {
+ node->set_op(op);
+ node->ReplaceInput(0, a);
+ node->ReplaceInput(1, b);
+ node->TrimInputCount(2);
+ RelaxControls(node);
+ return Changed(node);
+}
+
+
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b, Node* c) {
node->set_op(op);
@@ -405,14 +561,26 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
node->ReplaceInput(1, b);
node->ReplaceInput(2, c);
node->TrimInputCount(3);
- NodeProperties::ReplaceWithValue(node, node, node);
+ RelaxControls(node);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
+ Node* b, Node* c, Node* d) {
+ node->set_op(op);
+ node->ReplaceInput(0, a);
+ node->ReplaceInput(1, b);
+ node->ReplaceInput(2, c);
+ node->ReplaceInput(3, d);
+ node->TrimInputCount(4);
+ RelaxControls(node);
return Changed(node);
}
Reduction JSIntrinsicLowering::ChangeToUndefined(Node* node, Node* effect) {
- NodeProperties::ReplaceWithValue(node, jsgraph()->UndefinedConstant(),
- effect);
+ ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect);
return Changed(node);
}
@@ -424,6 +592,10 @@ CommonOperatorBuilder* JSIntrinsicLowering::common() const {
return jsgraph()->common();
}
+JSOperatorBuilder* JSIntrinsicLowering::javascript() const {
+ return jsgraph_->javascript();
+}
+
MachineOperatorBuilder* JSIntrinsicLowering::machine() const {
return jsgraph()->machine();
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index a0e773ddb2..816defbf58 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -15,25 +15,31 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
+class JSOperatorBuilder;
class JSGraph;
class MachineOperatorBuilder;
// Lowers certain JS-level runtime calls.
-class JSIntrinsicLowering final : public Reducer {
+class JSIntrinsicLowering final : public AdvancedReducer {
public:
- explicit JSIntrinsicLowering(JSGraph* jsgraph);
+ enum DeoptimizationMode { kDeoptimizationEnabled, kDeoptimizationDisabled };
+
+ JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
+ DeoptimizationMode mode);
~JSIntrinsicLowering() final {}
Reduction Reduce(Node* node) final;
private:
Reduction ReduceConstructDouble(Node* node);
+ Reduction ReduceDateField(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceDoubleHi(Node* node);
Reduction ReduceDoubleLo(Node* node);
Reduction ReduceHeapObjectGetMap(Node* node);
Reduction ReduceIncrementStatsCounter(Node* node);
+ Reduction ReduceIsMinusZero(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsNonNegativeSmi(Node* node);
Reduction ReduceIsSmi(Node* node);
@@ -47,18 +53,30 @@ class JSIntrinsicLowering final : public Reducer {
Reduction ReduceStringGetLength(Node* node);
Reduction ReduceUnLikely(Node* node, BranchHint hint);
Reduction ReduceValueOf(Node* node);
+ Reduction ReduceFixedArrayGet(Node* node);
+ Reduction ReduceFixedArraySet(Node* node);
+ Reduction ReduceGetTypeFeedbackVector(Node* node);
+ Reduction ReduceGetCallerJSFunction(Node* node);
+ Reduction ReduceThrowNotDateError(Node* node);
+ Reduction ReduceCallFunction(Node* node);
Reduction Change(Node* node, const Operator* op);
+ Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
+ Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c,
+ Node* d);
Reduction ChangeToUndefined(Node* node, Node* effect = nullptr);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
MachineOperatorBuilder* machine() const;
+ DeoptimizationMode mode() const { return mode_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
- JSGraph* jsgraph_;
+ JSGraph* const jsgraph_;
+ DeoptimizationMode const mode_;
SimplifiedOperatorBuilder simplified_;
};
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index f737b40865..1966724a86 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -14,8 +14,27 @@ namespace v8 {
namespace internal {
namespace compiler {
+bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
+ return lhs.slot() == rhs.slot() && lhs.vector() == rhs.vector();
+}
+
+
+bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(VectorSlotPair const& p) {
+ return base::hash_combine(p.slot(), p.vector());
+}
+
+
std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
- return os << p.arity() << ", " << p.flags() << ", " << p.language_mode();
+ os << p.arity() << ", " << p.flags() << ", " << p.language_mode();
+ if (p.AllowTailCalls()) {
+ os << ", ALLOW_TAIL_CALLS";
+ }
+ return os;
}
@@ -91,22 +110,94 @@ ContextAccess const& ContextAccessOf(Operator const* op) {
}
-bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
- return lhs.slot().ToInt() == rhs.slot().ToInt() &&
- lhs.vector().is_identical_to(rhs.vector());
+DynamicGlobalAccess::DynamicGlobalAccess(const Handle<String>& name,
+ uint32_t check_bitset,
+ const VectorSlotPair& feedback,
+ ContextualMode mode)
+ : name_(name),
+ check_bitset_(check_bitset),
+ feedback_(feedback),
+ mode_(mode) {
+ DCHECK(check_bitset == kFullCheckRequired || check_bitset < 0x80000000U);
}
-size_t hash_value(VectorSlotPair const& p) {
- // TODO(mvstanton): include the vector in the hash.
- base::hash<int> h;
- return h(p.slot().ToInt());
+bool operator==(DynamicGlobalAccess const& lhs,
+ DynamicGlobalAccess const& rhs) {
+ UNIMPLEMENTED();
+ return true;
+}
+
+
+bool operator!=(DynamicGlobalAccess const& lhs,
+ DynamicGlobalAccess const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(DynamicGlobalAccess const& access) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
+std::ostream& operator<<(std::ostream& os, DynamicGlobalAccess const& access) {
+ return os << Brief(*access.name()) << ", " << access.check_bitset() << ", "
+ << access.mode();
+}
+
+
+DynamicGlobalAccess const& DynamicGlobalAccessOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSLoadDynamicGlobal, op->opcode());
+ return OpParameter<DynamicGlobalAccess>(op);
+}
+
+
+DynamicContextAccess::DynamicContextAccess(const Handle<String>& name,
+ uint32_t check_bitset,
+ const ContextAccess& context_access)
+ : name_(name),
+ check_bitset_(check_bitset),
+ context_access_(context_access) {
+ DCHECK(check_bitset == kFullCheckRequired || check_bitset < 0x80000000U);
+}
+
+
+bool operator==(DynamicContextAccess const& lhs,
+ DynamicContextAccess const& rhs) {
+ UNIMPLEMENTED();
+ return true;
+}
+
+
+bool operator!=(DynamicContextAccess const& lhs,
+ DynamicContextAccess const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(DynamicContextAccess const& access) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
+std::ostream& operator<<(std::ostream& os, DynamicContextAccess const& access) {
+ return os << Brief(*access.name()) << ", " << access.check_bitset() << ", "
+ << access.context_access();
+}
+
+
+DynamicContextAccess const& DynamicContextAccessOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSLoadDynamicContext, op->opcode());
+ return OpParameter<DynamicContextAccess>(op);
}
bool operator==(LoadNamedParameters const& lhs,
LoadNamedParameters const& rhs) {
return lhs.name() == rhs.name() &&
+ lhs.language_mode() == rhs.language_mode() &&
lhs.contextual_mode() == rhs.contextual_mode() &&
lhs.feedback() == rhs.feedback();
}
@@ -119,24 +210,26 @@ bool operator!=(LoadNamedParameters const& lhs,
size_t hash_value(LoadNamedParameters const& p) {
- return base::hash_combine(p.name(), p.contextual_mode(), p.feedback());
+ return base::hash_combine(p.name(), p.language_mode(), p.contextual_mode(),
+ p.feedback());
}
std::ostream& operator<<(std::ostream& os, LoadNamedParameters const& p) {
- return os << Brief(*p.name().handle()) << ", " << p.contextual_mode();
+ return os << Brief(*p.name().handle()) << ", " << p.language_mode() << ", "
+ << p.contextual_mode();
}
std::ostream& operator<<(std::ostream& os, LoadPropertyParameters const& p) {
- // Nothing special to print.
- return os;
+ return os << p.language_mode();
}
bool operator==(LoadPropertyParameters const& lhs,
LoadPropertyParameters const& rhs) {
- return lhs.feedback() == rhs.feedback();
+ return lhs.language_mode() == rhs.language_mode() &&
+ lhs.feedback() == rhs.feedback();
}
@@ -153,7 +246,7 @@ const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op) {
size_t hash_value(LoadPropertyParameters const& p) {
- return hash_value(p.feedback());
+ return base::hash_combine(p.language_mode(), p.feedback());
}
@@ -163,9 +256,16 @@ const LoadNamedParameters& LoadNamedParametersOf(const Operator* op) {
}
+const LoadNamedParameters& LoadGlobalParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, op->opcode());
+ return OpParameter<LoadNamedParameters>(op);
+}
+
+
bool operator==(StoreNamedParameters const& lhs,
StoreNamedParameters const& rhs) {
- return lhs.language_mode() == rhs.language_mode() && lhs.name() == rhs.name();
+ return lhs.language_mode() == rhs.language_mode() &&
+ lhs.name() == rhs.name() && lhs.feedback() == rhs.feedback();
}
@@ -176,7 +276,7 @@ bool operator!=(StoreNamedParameters const& lhs,
size_t hash_value(StoreNamedParameters const& p) {
- return base::hash_combine(p.language_mode(), p.name());
+ return base::hash_combine(p.language_mode(), p.name(), p.feedback());
}
@@ -191,6 +291,41 @@ const StoreNamedParameters& StoreNamedParametersOf(const Operator* op) {
}
+const StoreNamedParameters& StoreGlobalParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, op->opcode());
+ return OpParameter<StoreNamedParameters>(op);
+}
+
+
+bool operator==(StorePropertyParameters const& lhs,
+ StorePropertyParameters const& rhs) {
+ return lhs.language_mode() == rhs.language_mode() &&
+ lhs.feedback() == rhs.feedback();
+}
+
+
+bool operator!=(StorePropertyParameters const& lhs,
+ StorePropertyParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(StorePropertyParameters const& p) {
+ return base::hash_combine(p.language_mode(), p.feedback());
+}
+
+
+std::ostream& operator<<(std::ostream& os, StorePropertyParameters const& p) {
+ return os << p.language_mode();
+}
+
+
+const StorePropertyParameters& StorePropertyParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSStoreProperty, op->opcode());
+ return OpParameter<StorePropertyParameters>(op);
+}
+
+
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.pretenure() == rhs.pretenure() &&
@@ -238,6 +373,10 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
V(HasProperty, Operator::kNoProperties, 2, 1) \
V(TypeOf, Operator::kPure, 1, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
+ V(ForInDone, Operator::kPure, 2, 1) \
+ V(ForInNext, Operator::kNoProperties, 4, 1) \
+ V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(ForInStep, Operator::kPure, 1, 1) \
V(StackCheck, Operator::kNoProperties, 0, 0) \
V(CreateFunctionContext, Operator::kNoProperties, 1, 1) \
V(CreateWithContext, Operator::kNoProperties, 2, 1) \
@@ -261,8 +400,7 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
V(Subtract, Operator::kNoProperties, 2, 1) \
V(Multiply, Operator::kNoProperties, 2, 1) \
V(Divide, Operator::kNoProperties, 2, 1) \
- V(Modulus, Operator::kNoProperties, 2, 1) \
- V(StoreProperty, Operator::kNoProperties, 3, 0)
+ V(Modulus, Operator::kNoProperties, 2, 1)
struct JSOperatorGlobalCache final {
@@ -338,8 +476,11 @@ CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
const Operator* JSOperatorBuilder::CallFunction(size_t arity,
CallFunctionFlags flags,
- LanguageMode language_mode) {
- CallFunctionParameters parameters(arity, flags, language_mode);
+ LanguageMode language_mode,
+ VectorSlotPair const& feedback,
+ TailCallMode tail_call_mode) {
+ CallFunctionParameters parameters(arity, flags, language_mode, feedback,
+ tail_call_mode);
return new (zone()) Operator1<CallFunctionParameters>( // --
IrOpcode::kJSCallFunction, Operator::kNoProperties, // opcode
"JSCallFunction", // name
@@ -372,40 +513,50 @@ const Operator* JSOperatorBuilder::CallConstruct(int arguments) {
const Operator* JSOperatorBuilder::LoadNamed(const Unique<Name>& name,
const VectorSlotPair& feedback,
- ContextualMode contextual_mode,
- PropertyICMode load_ic) {
- LoadNamedParameters parameters(name, feedback, contextual_mode, load_ic);
+ LanguageMode language_mode) {
+ LoadNamedParameters parameters(name, feedback, language_mode, NOT_CONTEXTUAL);
return new (zone()) Operator1<LoadNamedParameters>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
- 1, 1, 1, 1, 1, 2, // counts
+ 2, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
-const Operator* JSOperatorBuilder::LoadProperty(
- const VectorSlotPair& feedback) {
- LoadPropertyParameters parameters(feedback);
+const Operator* JSOperatorBuilder::LoadProperty(const VectorSlotPair& feedback,
+ LanguageMode language_mode) {
+ LoadPropertyParameters parameters(feedback, language_mode);
return new (zone()) Operator1<LoadPropertyParameters>( // --
IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode
"JSLoadProperty", // name
- 2, 1, 1, 1, 1, 2, // counts
+ 3, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
const Unique<Name>& name,
- PropertyICMode store_ic) {
- StoreNamedParameters parameters(language_mode, name, store_ic);
+ const VectorSlotPair& feedback) {
+ StoreNamedParameters parameters(language_mode, feedback, name);
return new (zone()) Operator1<StoreNamedParameters>( // --
IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode
"JSStoreNamed", // name
- 2, 1, 1, 0, 1, 2, // counts
+ 3, 1, 1, 0, 1, 2, // counts
parameters); // parameter
}
+const Operator* JSOperatorBuilder::StoreProperty(
+ LanguageMode language_mode, const VectorSlotPair& feedback) {
+ StorePropertyParameters parameters(language_mode, feedback);
+ return new (zone()) Operator1<StorePropertyParameters>( // --
+ IrOpcode::kJSStoreProperty, Operator::kNoProperties, // opcode
+ "JSStoreProperty", // name
+ 4, 1, 1, 0, 1, 2, // counts
+ parameters); // parameter
+}
+
+
const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
return new (zone()) Operator1<LanguageMode>( // --
IrOpcode::kJSDeleteProperty, Operator::kNoProperties, // opcode
@@ -415,6 +566,30 @@ const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
}
+const Operator* JSOperatorBuilder::LoadGlobal(const Unique<Name>& name,
+ const VectorSlotPair& feedback,
+ ContextualMode contextual_mode) {
+ LoadNamedParameters parameters(name, feedback, SLOPPY, contextual_mode);
+ return new (zone()) Operator1<LoadNamedParameters>( // --
+ IrOpcode::kJSLoadGlobal, Operator::kNoProperties, // opcode
+ "JSLoadGlobal", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
+ const Unique<Name>& name,
+ const VectorSlotPair& feedback) {
+ StoreNamedParameters parameters(language_mode, feedback, name);
+ return new (zone()) Operator1<StoreNamedParameters>( // --
+ IrOpcode::kJSStoreGlobal, Operator::kNoProperties, // opcode
+ "JSStoreGlobal", // name
+ 3, 1, 1, 0, 1, 2, // counts
+ parameters); // parameter
+}
+
+
const Operator* JSOperatorBuilder::LoadContext(size_t depth, size_t index,
bool immutable) {
ContextAccess access(depth, index, immutable);
@@ -438,6 +613,31 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
}
+const Operator* JSOperatorBuilder::LoadDynamicGlobal(
+ const Handle<String>& name, uint32_t check_bitset,
+ const VectorSlotPair& feedback, ContextualMode mode) {
+ DynamicGlobalAccess access(name, check_bitset, feedback, mode);
+ return new (zone()) Operator1<DynamicGlobalAccess>( // --
+ IrOpcode::kJSLoadDynamicGlobal, Operator::kNoProperties, // opcode
+ "JSLoadDynamicGlobal", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ access); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::LoadDynamicContext(
+ const Handle<String>& name, uint32_t check_bitset, size_t depth,
+ size_t index) {
+ ContextAccess context_access(depth, index, false);
+ DynamicContextAccess access(name, check_bitset, context_access);
+ return new (zone()) Operator1<DynamicContextAccess>( // --
+ IrOpcode::kJSLoadDynamicContext, Operator::kNoProperties, // opcode
+ "JSLoadDynamicContext", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ access); // parameter
+}
+
+
const Operator* JSOperatorBuilder::CreateClosure(
Handle<SharedFunctionInfo> shared_info, PretenureFlag pretenure) {
CreateClosureParameters parameters(shared_info, pretenure);
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index dc002e1547..d70c8e2096 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -17,31 +17,69 @@ class Operator;
struct JSOperatorGlobalCache;
+// Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorICSlot}, which
+// is used to access the type feedback for a certain {Node}.
+class VectorSlotPair {
+ public:
+ VectorSlotPair() : slot_(FeedbackVectorICSlot::Invalid()) {}
+ VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ : vector_(vector), slot_(slot) {}
+
+ bool IsValid() const { return !vector_.is_null(); }
+
+ MaybeHandle<TypeFeedbackVector> vector() const { return vector_; }
+ FeedbackVectorICSlot slot() const { return slot_; }
+
+ int index() const {
+ Handle<TypeFeedbackVector> vector;
+ return vector_.ToHandle(&vector) ? vector->GetIndex(slot_) : -1;
+ }
+
+ private:
+ const MaybeHandle<TypeFeedbackVector> vector_;
+ const FeedbackVectorICSlot slot_;
+};
+
+bool operator==(VectorSlotPair const&, VectorSlotPair const&);
+bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
+
+size_t hash_value(VectorSlotPair const&);
+
+enum TailCallMode { NO_TAIL_CALLS, ALLOW_TAIL_CALLS };
+
// Defines the arity and the call flags for a JavaScript function call. This is
// used as a parameter by JSCallFunction operators.
class CallFunctionParameters final {
public:
CallFunctionParameters(size_t arity, CallFunctionFlags flags,
- LanguageMode language_mode)
+ LanguageMode language_mode,
+ VectorSlotPair const& feedback,
+ TailCallMode tail_call_mode)
: bit_field_(ArityField::encode(arity) | FlagsField::encode(flags) |
- LanguageModeField::encode(language_mode)) {}
+ LanguageModeField::encode(language_mode)),
+ feedback_(feedback),
+ tail_call_mode_(tail_call_mode) {}
size_t arity() const { return ArityField::decode(bit_field_); }
CallFunctionFlags flags() const { return FlagsField::decode(bit_field_); }
LanguageMode language_mode() const {
return LanguageModeField::decode(bit_field_);
}
+ VectorSlotPair const& feedback() const { return feedback_; }
bool operator==(CallFunctionParameters const& that) const {
- return this->bit_field_ == that.bit_field_;
+ return this->bit_field_ == that.bit_field_ &&
+ this->feedback_ == that.feedback_;
}
bool operator!=(CallFunctionParameters const& that) const {
return !(*this == that);
}
+ bool AllowTailCalls() const { return tail_call_mode_ == ALLOW_TAIL_CALLS; }
+
private:
friend size_t hash_value(CallFunctionParameters const& p) {
- return p.bit_field_;
+ return base::hash_combine(p.bit_field_, p.feedback_);
}
typedef BitField<size_t, 0, 28> ArityField;
@@ -49,6 +87,8 @@ class CallFunctionParameters final {
typedef BitField<LanguageMode, 30, 2> LanguageModeField;
const uint32_t bit_field_;
+ const VectorSlotPair feedback_;
+ bool tail_call_mode_;
};
size_t hash_value(CallFunctionParameters const&);
@@ -112,53 +152,109 @@ std::ostream& operator<<(std::ostream&, ContextAccess const&);
ContextAccess const& ContextAccessOf(Operator const*);
-class VectorSlotPair {
+// Defines the name for a dynamic variable lookup. The {check_bitset} allows to
+// inline checks whether the lookup yields in a global variable. This is used as
+// a parameter by JSLoadDynamicGlobal and JSStoreDynamicGlobal operators.
+class DynamicGlobalAccess final {
public:
- VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
- : vector_(vector), slot_(slot) {}
+ DynamicGlobalAccess(const Handle<String>& name, uint32_t check_bitset,
+ const VectorSlotPair& feedback, ContextualMode mode);
- Handle<TypeFeedbackVector> vector() const { return vector_; }
- FeedbackVectorICSlot slot() const { return slot_; }
+ const Handle<String>& name() const { return name_; }
+ uint32_t check_bitset() const { return check_bitset_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+ ContextualMode mode() const { return mode_; }
+
+ // Indicates that an inline check is disabled.
+ bool RequiresFullCheck() const {
+ return check_bitset() == kFullCheckRequired;
+ }
- int index() const { return vector_->GetIndex(slot_); }
+ // Limit of context chain length to which inline check is possible.
+ static const int kMaxCheckDepth = 30;
+
+ // Sentinel for {check_bitset} disabling inline checks.
+ static const uint32_t kFullCheckRequired = -1;
private:
- const Handle<TypeFeedbackVector> vector_;
- const FeedbackVectorICSlot slot_;
+ const Handle<String> name_;
+ const uint32_t check_bitset_;
+ const VectorSlotPair feedback_;
+ const ContextualMode mode_;
};
+size_t hash_value(DynamicGlobalAccess const&);
+
+bool operator==(DynamicGlobalAccess const&, DynamicGlobalAccess const&);
+bool operator!=(DynamicGlobalAccess const&, DynamicGlobalAccess const&);
+
+std::ostream& operator<<(std::ostream&, DynamicGlobalAccess const&);
+
+DynamicGlobalAccess const& DynamicGlobalAccessOf(Operator const*);
+
-bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs);
+// Defines the name for a dynamic variable lookup. The {check_bitset} allows to
+// inline checks whether the lookup yields in a context variable. This is used
+// as a parameter by JSLoadDynamicContext and JSStoreDynamicContext operators.
+class DynamicContextAccess final {
+ public:
+ DynamicContextAccess(const Handle<String>& name, uint32_t check_bitset,
+ const ContextAccess& context_access);
+
+ const Handle<String>& name() const { return name_; }
+ uint32_t check_bitset() const { return check_bitset_; }
+ const ContextAccess& context_access() const { return context_access_; }
+
+ // Indicates that an inline check is disabled.
+ bool RequiresFullCheck() const {
+ return check_bitset() == kFullCheckRequired;
+ }
+ // Limit of context chain length to which inline check is possible.
+ static const int kMaxCheckDepth = 30;
+
+ // Sentinel for {check_bitset} disabling inline checks.
+ static const uint32_t kFullCheckRequired = -1;
+
+ private:
+ const Handle<String> name_;
+ const uint32_t check_bitset_;
+ const ContextAccess context_access_;
+};
+
+size_t hash_value(DynamicContextAccess const&);
+
+bool operator==(DynamicContextAccess const&, DynamicContextAccess const&);
+bool operator!=(DynamicContextAccess const&, DynamicContextAccess const&);
+
+std::ostream& operator<<(std::ostream&, DynamicContextAccess const&);
+
+DynamicContextAccess const& DynamicContextAccessOf(Operator const*);
-// For (Load|Store)Named operators, the mode of the IC that needs
-// to be called. This is needed because (Load|Store)Property nodes can be
-// reduced to named versions, but still need to call the correct original
-// IC mode because of the layout of feedback vectors.
-enum PropertyICMode { NAMED, KEYED };
// Defines the property being loaded from an object by a named load. This is
-// used as a parameter by JSLoadNamed operators.
+// used as a parameter by JSLoadNamed and JSLoadGlobal operators.
class LoadNamedParameters final {
public:
LoadNamedParameters(const Unique<Name>& name, const VectorSlotPair& feedback,
- ContextualMode contextual_mode, PropertyICMode load_ic)
+ LanguageMode language_mode,
+ ContextualMode contextual_mode)
: name_(name),
feedback_(feedback),
- contextual_mode_(contextual_mode),
- load_ic_(load_ic) {}
+ language_mode_(language_mode),
+ contextual_mode_(contextual_mode) {}
const Unique<Name>& name() const { return name_; }
+ LanguageMode language_mode() const { return language_mode_; }
ContextualMode contextual_mode() const { return contextual_mode_; }
- PropertyICMode load_ic() const { return load_ic_; }
const VectorSlotPair& feedback() const { return feedback_; }
private:
const Unique<Name> name_;
const VectorSlotPair feedback_;
+ const LanguageMode language_mode_;
const ContextualMode contextual_mode_;
- const PropertyICMode load_ic_;
};
bool operator==(LoadNamedParameters const&, LoadNamedParameters const&);
@@ -170,18 +266,24 @@ std::ostream& operator<<(std::ostream&, LoadNamedParameters const&);
const LoadNamedParameters& LoadNamedParametersOf(const Operator* op);
+const LoadNamedParameters& LoadGlobalParametersOf(const Operator* op);
+
// Defines the property being loaded from an object. This is
// used as a parameter by JSLoadProperty operators.
class LoadPropertyParameters final {
public:
- explicit LoadPropertyParameters(const VectorSlotPair& feedback)
- : feedback_(feedback) {}
+ explicit LoadPropertyParameters(const VectorSlotPair& feedback,
+ LanguageMode language_mode)
+ : feedback_(feedback), language_mode_(language_mode) {}
const VectorSlotPair& feedback() const { return feedback_; }
+ LanguageMode language_mode() const { return language_mode_; }
+
private:
const VectorSlotPair feedback_;
+ const LanguageMode language_mode_;
};
bool operator==(LoadPropertyParameters const&, LoadPropertyParameters const&);
@@ -195,21 +297,21 @@ const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op);
// Defines the property being stored to an object by a named store. This is
-// used as a parameter by JSStoreNamed operators.
+// used as a parameter by JSStoreNamed and JSStoreGlobal operators.
class StoreNamedParameters final {
public:
- StoreNamedParameters(LanguageMode language_mode, const Unique<Name>& name,
- PropertyICMode store_ic)
- : language_mode_(language_mode), name_(name), store_ic_(store_ic) {}
+ StoreNamedParameters(LanguageMode language_mode,
+ const VectorSlotPair& feedback, const Unique<Name>& name)
+ : language_mode_(language_mode), name_(name), feedback_(feedback) {}
LanguageMode language_mode() const { return language_mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
const Unique<Name>& name() const { return name_; }
- PropertyICMode store_ic() const { return store_ic_; }
private:
const LanguageMode language_mode_;
const Unique<Name> name_;
- const PropertyICMode store_ic_;
+ const VectorSlotPair feedback_;
};
bool operator==(StoreNamedParameters const&, StoreNamedParameters const&);
@@ -221,6 +323,34 @@ std::ostream& operator<<(std::ostream&, StoreNamedParameters const&);
const StoreNamedParameters& StoreNamedParametersOf(const Operator* op);
+const StoreNamedParameters& StoreGlobalParametersOf(const Operator* op);
+
+
+// Defines the property being stored to an object. This is used as a parameter
+// by JSStoreProperty operators.
+class StorePropertyParameters final {
+ public:
+ StorePropertyParameters(LanguageMode language_mode,
+ const VectorSlotPair& feedback)
+ : language_mode_(language_mode), feedback_(feedback) {}
+
+ LanguageMode language_mode() const { return language_mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ const LanguageMode language_mode_;
+ const VectorSlotPair feedback_;
+};
+
+bool operator==(StorePropertyParameters const&, StorePropertyParameters const&);
+bool operator!=(StorePropertyParameters const&, StorePropertyParameters const&);
+
+size_t hash_value(StorePropertyParameters const&);
+
+std::ostream& operator<<(std::ostream&, StorePropertyParameters const&);
+
+const StorePropertyParameters& StorePropertyParametersOf(const Operator* op);
+
// Defines shared information for the closure that should be created. This is
// used as a parameter by JSCreateClosure operators.
@@ -289,33 +419,56 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* CreateLiteralArray(int literal_flags);
const Operator* CreateLiteralObject(int literal_flags);
- const Operator* CallFunction(size_t arity, CallFunctionFlags flags,
- LanguageMode language_mode);
+ const Operator* CallFunction(
+ size_t arity, CallFunctionFlags flags, LanguageMode language_mode,
+ VectorSlotPair const& feedback = VectorSlotPair(),
+ TailCallMode tail_call_mode = NO_TAIL_CALLS);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallConstruct(int arguments);
- const Operator* LoadProperty(const VectorSlotPair& feedback);
+ const Operator* LoadProperty(const VectorSlotPair& feedback,
+ LanguageMode language_mode);
const Operator* LoadNamed(const Unique<Name>& name,
const VectorSlotPair& feedback,
- ContextualMode contextual_mode = NOT_CONTEXTUAL,
- PropertyICMode load_ic = NAMED);
+ LanguageMode language_mode);
- const Operator* StoreProperty(LanguageMode language_mode);
+ const Operator* StoreProperty(LanguageMode language_mode,
+ const VectorSlotPair& feedback);
const Operator* StoreNamed(LanguageMode language_mode,
const Unique<Name>& name,
- PropertyICMode store_ic = NAMED);
+ const VectorSlotPair& feedback);
const Operator* DeleteProperty(LanguageMode language_mode);
const Operator* HasProperty();
+ const Operator* LoadGlobal(const Unique<Name>& name,
+ const VectorSlotPair& feedback,
+ ContextualMode contextual_mode = NOT_CONTEXTUAL);
+ const Operator* StoreGlobal(LanguageMode language_mode,
+ const Unique<Name>& name,
+ const VectorSlotPair& feedback);
+
const Operator* LoadContext(size_t depth, size_t index, bool immutable);
const Operator* StoreContext(size_t depth, size_t index);
+ const Operator* LoadDynamicGlobal(const Handle<String>& name,
+ uint32_t check_bitset,
+ const VectorSlotPair& feedback,
+ ContextualMode mode);
+ const Operator* LoadDynamicContext(const Handle<String>& name,
+ uint32_t check_bitset, size_t depth,
+ size_t index);
+
const Operator* TypeOf();
const Operator* InstanceOf();
+ const Operator* ForInDone();
+ const Operator* ForInNext();
+ const Operator* ForInPrepare();
+ const Operator* ForInStep();
+
const Operator* StackCheck();
// TODO(titzer): nail down the static parts of each of these context flavors.
diff --git a/deps/v8/src/compiler/js-type-feedback.cc b/deps/v8/src/compiler/js-type-feedback.cc
index 749eebab29..432e2d0366 100644
--- a/deps/v8/src/compiler/js-type-feedback.cc
+++ b/deps/v8/src/compiler/js-type-feedback.cc
@@ -13,6 +13,7 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/frame-states.h"
#include "src/compiler/node-aux-data.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
@@ -24,54 +25,39 @@ namespace compiler {
enum LoadOrStore { LOAD, STORE };
-#define EAGER_DEOPT_LOCATIONS_FOR_PROPERTY_ACCESS_ARE_CORRECT false
+// TODO(turbofan): fix deoptimization problems
+#define ENABLE_FAST_PROPERTY_LOADS false
+#define ENABLE_FAST_PROPERTY_STORES false
JSTypeFeedbackTable::JSTypeFeedbackTable(Zone* zone)
- : map_(TypeFeedbackIdMap::key_compare(),
- TypeFeedbackIdMap::allocator_type(zone)) {}
+ : type_feedback_id_map_(TypeFeedbackIdMap::key_compare(),
+ TypeFeedbackIdMap::allocator_type(zone)),
+ feedback_vector_ic_slot_map_(TypeFeedbackIdMap::key_compare(),
+ TypeFeedbackIdMap::allocator_type(zone)) {}
void JSTypeFeedbackTable::Record(Node* node, TypeFeedbackId id) {
- map_.insert(std::make_pair(node->id(), id));
+ type_feedback_id_map_.insert(std::make_pair(node->id(), id));
+}
+
+
+void JSTypeFeedbackTable::Record(Node* node, FeedbackVectorICSlot slot) {
+ feedback_vector_ic_slot_map_.insert(std::make_pair(node->id(), slot));
}
Reduction JSTypeFeedbackSpecializer::Reduce(Node* node) {
switch (node->opcode()) {
- case IrOpcode::kJSLoadProperty: {
- HeapObjectMatcher<Name> match(node->InputAt(1));
- if (match.HasValue() && match.Value().handle()->IsName()) {
- // LoadProperty(o, "constant") => LoadNamed["constant"](o).
- Unique<Name> name = match.Value();
- const VectorSlotPair& feedback =
- LoadPropertyParametersOf(node->op()).feedback();
- node->set_op(jsgraph()->javascript()->LoadNamed(name, feedback,
- NOT_CONTEXTUAL, KEYED));
- node->RemoveInput(1);
- return ReduceJSLoadNamed(node);
- }
+ case IrOpcode::kJSLoadProperty:
return ReduceJSLoadProperty(node);
- }
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
+ case IrOpcode::kJSLoadGlobal:
+ return ReduceJSLoadGlobal(node);
case IrOpcode::kJSStoreNamed:
return ReduceJSStoreNamed(node);
- case IrOpcode::kJSStoreProperty: {
- HeapObjectMatcher<Name> match(node->InputAt(1));
- if (match.HasValue() && match.Value().handle()->IsName()) {
- // StoreProperty(o, "constant", v) => StoreNamed["constant"](o, v).
- Unique<Name> name = match.Value();
- LanguageMode language_mode = OpParameter<LanguageMode>(node);
- // StoreProperty has 2 frame state inputs, but StoreNamed only 1.
- DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node->op()));
- node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
- node->set_op(
- jsgraph()->javascript()->StoreNamed(language_mode, name, KEYED));
- node->RemoveInput(1);
- return ReduceJSStoreNamed(node);
- }
+ case IrOpcode::kJSStoreProperty:
return ReduceJSStoreProperty(node);
- }
default:
break;
}
@@ -140,6 +126,10 @@ static bool GetInObjectFieldAccess(LoadOrStore mode, Handle<Map> map,
FieldIndex field_index = FieldIndex::ForPropertyIndex(*map, index, is_double);
if (field_index.is_inobject()) {
+ if (is_double && !map->IsUnboxedDoubleField(field_index)) {
+ // TODO(turbofan): support for out-of-line (MutableHeapNumber) loads.
+ return false;
+ }
access->offset = field_index.offset();
return true;
}
@@ -149,36 +139,29 @@ static bool GetInObjectFieldAccess(LoadOrStore mode, Handle<Map> map,
}
-static bool IsGlobalObject(Node* node) {
- return NodeProperties::IsTyped(node) &&
- NodeProperties::GetBounds(node).upper->Is(Type::GlobalObject());
-}
-
-
Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamed(Node* node) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed);
- Node* receiver = node->InputAt(0);
- if (IsGlobalObject(receiver)) {
- return ReduceJSLoadNamedForGlobalVariable(node);
- }
+ if (mode() != kDeoptimizationEnabled) return NoChange();
+ Node* frame_state_before = GetFrameStateBefore(node);
+ if (frame_state_before == nullptr) return NoChange();
- if (!FLAG_turbo_deoptimization) return NoChange();
- // TODO(titzer): deopt locations are wrong for property accesses
- if (!EAGER_DEOPT_LOCATIONS_FOR_PROPERTY_ACCESS_ARE_CORRECT) return NoChange();
+ const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
+ Handle<Name> name = p.name().handle();
+ SmallMapList maps;
- // TODO(turbofan): handle vector-based type feedback.
- TypeFeedbackId id = js_type_feedback_->find(node);
- if (id.IsNone() || oracle()->LoadInlineCacheState(id) == UNINITIALIZED) {
+ FeedbackVectorICSlot slot = js_type_feedback_->FindFeedbackVectorICSlot(node);
+ if (slot.IsInvalid() ||
+ oracle()->LoadInlineCacheState(slot) == UNINITIALIZED) {
+ // No type feedback ids or the load is uninitialized.
return NoChange();
}
+ oracle()->PropertyReceiverTypes(slot, name, &maps);
- const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
- SmallMapList maps;
- Handle<Name> name = p.name().handle();
+ Node* receiver = node->InputAt(0);
Node* effect = NodeProperties::GetEffectInput(node);
- GatherReceiverTypes(receiver, effect, id, name, &maps);
if (maps.length() != 1) return NoChange(); // TODO(turbofan): polymorphism
+ if (!ENABLE_FAST_PROPERTY_LOADS) return NoChange();
Handle<Map> map = maps.first();
FieldAccess field_access;
@@ -197,27 +180,25 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamed(Node* node) {
effect, check_success);
// TODO(turbofan): handle slow case instead of deoptimizing.
- // TODO(titzer): frame state should be from before the load.
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state, effect,
- check_failed);
+ Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state_before,
+ effect, check_failed);
NodeProperties::MergeControlToEnd(graph(), common(), deopt);
- NodeProperties::ReplaceWithValue(node, load, load, check_success);
+ ReplaceWithValue(node, load, load, check_success);
return Replace(load);
}
-Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamedForGlobalVariable(
- Node* node) {
+Reduction JSTypeFeedbackSpecializer::ReduceJSLoadGlobal(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadGlobal);
Handle<String> name =
- Handle<String>::cast(LoadNamedParametersOf(node->op()).name().handle());
+ Handle<String>::cast(LoadGlobalParametersOf(node->op()).name().handle());
// Try to optimize loads from the global object.
Handle<Object> constant_value =
jsgraph()->isolate()->factory()->GlobalConstantFor(name);
if (!constant_value.is_null()) {
// Always optimize global constants.
Node* constant = jsgraph()->Constant(constant_value);
- NodeProperties::ReplaceWithValue(node, constant);
+ ReplaceWithValue(node, constant);
return Replace(constant);
}
@@ -226,7 +207,7 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamedForGlobalVariable(
return NoChange();
}
- if (FLAG_turbo_deoptimization) {
+ if (mode() == kDeoptimizationEnabled) {
// Handle lookups in the script context.
{
Handle<ScriptContextTable> script_contexts(
@@ -253,7 +234,7 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamedForGlobalVariable(
String::Flatten(Handle<String>::cast(constant_value));
}
Node* constant = jsgraph()->Constant(constant_value);
- NodeProperties::ReplaceWithValue(node, constant);
+ ReplaceWithValue(node, constant);
return Replace(constant);
} else {
// Load directly from the property cell.
@@ -262,7 +243,7 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamedForGlobalVariable(
Node* load_field = graph()->NewNode(
simplified()->LoadField(access), jsgraph()->Constant(cell),
NodeProperties::GetEffectInput(node), control);
- NodeProperties::ReplaceWithValue(node, load_field, load_field, control);
+ ReplaceWithValue(node, load_field, load_field, control);
return Replace(load_field);
}
}
@@ -282,21 +263,28 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSLoadProperty(Node* node) {
Reduction JSTypeFeedbackSpecializer::ReduceJSStoreNamed(Node* node) {
DCHECK(node->opcode() == IrOpcode::kJSStoreNamed);
- // TODO(titzer): deopt locations are wrong for property accesses
- if (!EAGER_DEOPT_LOCATIONS_FOR_PROPERTY_ACCESS_ARE_CORRECT) return NoChange();
-
- TypeFeedbackId id = js_type_feedback_->find(node);
- if (id.IsNone() || oracle()->StoreIsUninitialized(id)) return NoChange();
+ Node* frame_state_before = GetFrameStateBefore(node);
+ if (frame_state_before == nullptr) return NoChange();
const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
- SmallMapList maps;
Handle<Name> name = p.name().handle();
+ SmallMapList maps;
+ TypeFeedbackId id = js_type_feedback_->FindTypeFeedbackId(node);
+ if (id.IsNone() || oracle()->StoreIsUninitialized(id) == UNINITIALIZED) {
+ // No type feedback ids or the store is uninitialized.
+ // TODO(titzer): no feedback from vector ICs from stores.
+ return NoChange();
+ } else {
+ oracle()->AssignmentReceiverTypes(id, name, &maps);
+ }
+
Node* receiver = node->InputAt(0);
Node* effect = NodeProperties::GetEffectInput(node);
- GatherReceiverTypes(receiver, effect, id, name, &maps);
if (maps.length() != 1) return NoChange(); // TODO(turbofan): polymorphism
+ if (!ENABLE_FAST_PROPERTY_STORES) return NoChange();
+
Handle<Map> map = maps.first();
FieldAccess field_access;
if (!GetInObjectFieldAccess(STORE, map, name, &field_access)) {
@@ -315,12 +303,10 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSStoreNamed(Node* node) {
receiver, value, effect, check_success);
// TODO(turbofan): handle slow case instead of deoptimizing.
- // TODO(titzer): frame state should be from before the store.
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state, effect,
- check_failed);
+ Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state_before,
+ effect, check_failed);
NodeProperties::MergeControlToEnd(graph(), common(), deopt);
- NodeProperties::ReplaceWithValue(node, store, store, check_success);
+ ReplaceWithValue(node, store, store, check_success);
return Replace(store);
}
@@ -360,18 +346,21 @@ void JSTypeFeedbackSpecializer::BuildMapCheck(Node* receiver, Handle<Map> map,
}
-void JSTypeFeedbackSpecializer::GatherReceiverTypes(Node* receiver,
- Node* effect,
- TypeFeedbackId id,
- Handle<Name> name,
- SmallMapList* maps) {
- // TODO(turbofan): filter maps by initial receiver map if known
- // TODO(turbofan): filter maps by native context (if specializing)
- // TODO(turbofan): filter maps by effect chain
- oracle()->PropertyReceiverTypes(id, name, maps);
+// Get the frame state before an operation if it exists and has a valid
+// bailout id.
+Node* JSTypeFeedbackSpecializer::GetFrameStateBefore(Node* node) {
+ int count = OperatorProperties::GetFrameStateInputCount(node->op());
+ DCHECK_LE(count, 2);
+ if (count == 2) {
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ if (frame_state->opcode() == IrOpcode::kFrameState) {
+ BailoutId id = OpParameter<FrameStateInfo>(node).bailout_id();
+ if (id != BailoutId::None()) return frame_state;
+ }
+ }
+ return nullptr;
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-type-feedback.h b/deps/v8/src/compiler/js-type-feedback.h
index 51faee3a4f..84060f8096 100644
--- a/deps/v8/src/compiler/js-type-feedback.h
+++ b/deps/v8/src/compiler/js-type-feedback.h
@@ -27,47 +27,65 @@ class JSTypeFeedbackTable : public ZoneObject {
public:
explicit JSTypeFeedbackTable(Zone* zone);
- // TODO(titzer): support recording the feedback vector slot.
-
void Record(Node* node, TypeFeedbackId id);
+ void Record(Node* node, FeedbackVectorICSlot slot);
private:
friend class JSTypeFeedbackSpecializer;
typedef std::map<NodeId, TypeFeedbackId, std::less<NodeId>,
zone_allocator<TypeFeedbackId> > TypeFeedbackIdMap;
+ typedef std::map<NodeId, FeedbackVectorICSlot, std::less<NodeId>,
+ zone_allocator<FeedbackVectorICSlot> >
+ FeedbackVectorICSlotMap;
+
+ TypeFeedbackIdMap type_feedback_id_map_;
+ FeedbackVectorICSlotMap feedback_vector_ic_slot_map_;
+
+ TypeFeedbackId FindTypeFeedbackId(Node* node) {
+ TypeFeedbackIdMap::const_iterator it =
+ type_feedback_id_map_.find(node->id());
+ return it == type_feedback_id_map_.end() ? TypeFeedbackId::None()
+ : it->second;
+ }
- TypeFeedbackIdMap map_;
-
- TypeFeedbackId find(Node* node) {
- TypeFeedbackIdMap::const_iterator it = map_.find(node->id());
- return it == map_.end() ? TypeFeedbackId::None() : it->second;
+ FeedbackVectorICSlot FindFeedbackVectorICSlot(Node* node) {
+ FeedbackVectorICSlotMap::const_iterator it =
+ feedback_vector_ic_slot_map_.find(node->id());
+ return it == feedback_vector_ic_slot_map_.end()
+ ? FeedbackVectorICSlot::Invalid()
+ : it->second;
}
};
// Specializes a graph to the type feedback recorded in the
// {js_type_feedback} provided to the constructor.
-class JSTypeFeedbackSpecializer : public Reducer {
+class JSTypeFeedbackSpecializer : public AdvancedReducer {
public:
- JSTypeFeedbackSpecializer(JSGraph* jsgraph,
+ enum DeoptimizationMode { kDeoptimizationEnabled, kDeoptimizationDisabled };
+
+ JSTypeFeedbackSpecializer(Editor* editor, JSGraph* jsgraph,
JSTypeFeedbackTable* js_type_feedback,
TypeFeedbackOracle* oracle,
Handle<GlobalObject> global_object,
+ DeoptimizationMode mode,
CompilationDependencies* dependencies)
- : jsgraph_(jsgraph),
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
simplified_(jsgraph->graph()->zone()),
js_type_feedback_(js_type_feedback),
oracle_(oracle),
global_object_(global_object),
+ mode_(mode),
dependencies_(dependencies) {
- CHECK(js_type_feedback);
+ CHECK_NOT_NULL(js_type_feedback);
}
Reduction Reduce(Node* node) override;
// Visible for unit testing.
+ Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
- Reduction ReduceJSLoadNamedForGlobalVariable(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
@@ -78,19 +96,20 @@ class JSTypeFeedbackSpecializer : public Reducer {
JSTypeFeedbackTable* js_type_feedback_;
TypeFeedbackOracle* oracle_;
Handle<GlobalObject> global_object_;
+ DeoptimizationMode const mode_;
CompilationDependencies* dependencies_;
TypeFeedbackOracle* oracle() { return oracle_; }
Graph* graph() { return jsgraph_->graph(); }
JSGraph* jsgraph() { return jsgraph_; }
CommonOperatorBuilder* common() { return jsgraph_->common(); }
+ DeoptimizationMode mode() const { return mode_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
void BuildMapCheck(Node* receiver, Handle<Map> map, bool smi_check,
Node* effect, Node* control, Node** success, Node** fail);
- void GatherReceiverTypes(Node* receiver, Node* effect, TypeFeedbackId id,
- Handle<Name> property, SmallMapList* maps);
+ Node* GetFrameStateBefore(Node* node);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 74ebfa02ee..051009dd6e 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -21,28 +21,8 @@ namespace compiler {
// - relax effects from generic but not-side-effecting operations
-// Relax the effects of {node} by immediately replacing effect and control uses
-// of {node} with the effect and control input to {node}.
-// TODO(turbofan): replace the effect input to {node} with {graph->start()}.
-// TODO(titzer): move into a GraphEditor?
-static void RelaxEffectsAndControls(Node* node) {
- NodeProperties::ReplaceWithValue(node, node, NULL);
-}
-
-
-// Relax the control uses of {node} by immediately replacing them with the
-// control input to {node}.
-// TODO(titzer): move into a GraphEditor?
-static void RelaxControls(Node* node) {
- NodeProperties::ReplaceWithValue(node, node, node);
-}
-
-
-JSTypedLowering::JSTypedLowering(JSGraph* jsgraph, Zone* zone)
- : jsgraph_(jsgraph), simplified_(graph()->zone()) {
- zero_range_ = Type::Range(0.0, 0.0, graph()->zone());
- one_range_ = Type::Range(1.0, 1.0, graph()->zone());
- zero_thirtyone_range_ = Type::Range(0.0, 31.0, graph()->zone());
+JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor), jsgraph_(jsgraph), simplified_(graph()->zone()) {
for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
double min = kMinInt / (1 << k);
double max = kMaxInt / (1 << k);
@@ -51,12 +31,6 @@ JSTypedLowering::JSTypedLowering(JSGraph* jsgraph, Zone* zone)
}
-Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
- NodeProperties::ReplaceWithValue(old, node, node);
- return Changed(node);
-}
-
-
// A helper class to construct inline allocations on the simplified operator
// level. This keeps track of the effect chain for initial stores on a newly
// allocated object and also provides helpers for commonly allocated objects.
@@ -121,11 +95,6 @@ class JSBinopReduction final {
JSBinopReduction(JSTypedLowering* lowering, Node* node)
: lowering_(lowering), node_(node) {}
- void ConvertPrimitiveInputsToNumber() {
- node_->ReplaceInput(0, ConvertPrimitiveToNumber(left()));
- node_->ReplaceInput(1, ConvertPrimitiveToNumber(right()));
- }
-
void ConvertInputsToNumber(Node* frame_state) {
// To convert the inputs to numbers, we have to provide frame states
// for lazy bailouts in the ToNumber conversions.
@@ -135,17 +104,25 @@ class JSBinopReduction final {
// already converted values from full code. This way we are sure that we
// will not re-do any of the side effects.
- Node* left_input =
- left_type()->Is(Type::PlainPrimitive())
- ? ConvertPrimitiveToNumber(left())
- : ConvertToNumber(left(),
- CreateFrameStateForLeftInput(frame_state));
+ Node* left_input = nullptr;
+ Node* right_input = nullptr;
+ bool left_is_primitive = left_type()->Is(Type::PlainPrimitive());
+ bool right_is_primitive = right_type()->Is(Type::PlainPrimitive());
+ bool handles_exception = NodeProperties::IsExceptionalCall(node_);
- Node* right_input =
- right_type()->Is(Type::PlainPrimitive())
- ? ConvertPrimitiveToNumber(right())
- : ConvertToNumber(right(), CreateFrameStateForRightInput(
+ if (!left_is_primitive && !right_is_primitive && handles_exception) {
+ ConvertBothInputsToNumber(&left_input, &right_input, frame_state);
+ } else {
+ left_input = left_is_primitive
+ ? ConvertPlainPrimitiveToNumber(left())
+ : ConvertSingleInputToNumber(
+ left(), CreateFrameStateForLeftInput(frame_state));
+ right_input = right_is_primitive
+ ? ConvertPlainPrimitiveToNumber(right())
+ : ConvertSingleInputToNumber(
+ right(), CreateFrameStateForRightInput(
frame_state, left_input));
+ }
node_->ReplaceInput(0, left_input);
node_->ReplaceInput(1, right_input);
@@ -162,19 +139,6 @@ class JSBinopReduction final {
node_->ReplaceInput(1, ConvertToString(right()));
}
- // Convert inputs for bitwise shift operation (ES5 spec 11.7).
- void ConvertInputsForShift(Signedness left_signedness) {
- node_->ReplaceInput(
- 0, ConvertToUI32(ConvertPrimitiveToNumber(left()), left_signedness));
- Node* rnum = ConvertToUI32(ConvertPrimitiveToNumber(right()), kUnsigned);
- Type* rnum_type = NodeProperties::GetBounds(rnum).upper;
- if (!rnum_type->Is(lowering_->zero_thirtyone_range_)) {
- rnum = graph()->NewNode(machine()->Word32And(), rnum,
- jsgraph()->Int32Constant(0x1F));
- }
- node_->ReplaceInput(1, rnum);
- }
-
void SwapInputs() {
Node* l = left();
Node* r = right();
@@ -193,7 +157,7 @@ class JSBinopReduction final {
// Remove the effects from the node, and update its effect/control usages.
if (node_->op()->EffectInputCount() > 0) {
- RelaxEffectsAndControls(node_);
+ lowering_->RelaxEffectsAndControls(node_);
}
// Remove the inputs corresponding to context, effect, and control.
NodeProperties::RemoveNonValueInputs(node_);
@@ -253,6 +217,7 @@ class JSBinopReduction final {
JSGraph* jsgraph() { return lowering_->jsgraph(); }
JSOperatorBuilder* javascript() { return lowering_->javascript(); }
MachineOperatorBuilder* machine() { return lowering_->machine(); }
+ CommonOperatorBuilder* common() { return jsgraph()->common(); }
Zone* zone() const { return graph()->zone(); }
private:
@@ -270,8 +235,7 @@ class JSBinopReduction final {
}
Node* CreateFrameStateForLeftInput(Node* frame_state) {
- FrameStateCallInfo state_info =
- OpParameter<FrameStateCallInfo>(frame_state);
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
if (state_info.bailout_id() == BailoutId::None()) {
// Dummy frame state => just leave it as is.
@@ -288,17 +252,20 @@ class JSBinopReduction final {
// the stack top. This is the slot that full code uses to store the
// left operand.
const Operator* op = jsgraph()->common()->FrameState(
- state_info.type(), state_info.bailout_id(),
- OutputFrameStateCombine::PokeAt(1));
+ state_info.bailout_id(), OutputFrameStateCombine::PokeAt(1),
+ state_info.function_info());
- return graph()->NewNode(op, frame_state->InputAt(0),
- frame_state->InputAt(1), frame_state->InputAt(2),
- frame_state->InputAt(3), frame_state->InputAt(4));
+ return graph()->NewNode(op,
+ frame_state->InputAt(kFrameStateParametersInput),
+ frame_state->InputAt(kFrameStateLocalsInput),
+ frame_state->InputAt(kFrameStateStackInput),
+ frame_state->InputAt(kFrameStateContextInput),
+ frame_state->InputAt(kFrameStateFunctionInput),
+ frame_state->InputAt(kFrameStateOuterStateInput));
}
Node* CreateFrameStateForRightInput(Node* frame_state, Node* converted_left) {
- FrameStateCallInfo state_info =
- OpParameter<FrameStateCallInfo>(frame_state);
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
if (state_info.bailout_id() == BailoutId::None()) {
// Dummy frame state => just leave it as is.
@@ -308,8 +275,8 @@ class JSBinopReduction final {
// Create a frame state that stores the result of the operation to the
// top of the stack (i.e., the slot used for the right operand).
const Operator* op = jsgraph()->common()->FrameState(
- state_info.type(), state_info.bailout_id(),
- OutputFrameStateCombine::PokeAt(0));
+ state_info.bailout_id(), OutputFrameStateCombine::PokeAt(0),
+ state_info.function_info());
// Change the left operand {converted_left} on the expression stack.
Node* stack = frame_state->InputAt(2);
@@ -328,25 +295,80 @@ class JSBinopReduction final {
Node* new_stack =
graph()->NewNode(stack->op(), stack->InputCount(), &new_values.front());
- return graph()->NewNode(op, frame_state->InputAt(0),
- frame_state->InputAt(1), new_stack,
- frame_state->InputAt(3), frame_state->InputAt(4));
- }
-
- Node* ConvertPrimitiveToNumber(Node* node) {
- return lowering_->ConvertPrimitiveToNumber(node);
+ return graph()->NewNode(
+ op, frame_state->InputAt(kFrameStateParametersInput),
+ frame_state->InputAt(kFrameStateLocalsInput), new_stack,
+ frame_state->InputAt(kFrameStateContextInput),
+ frame_state->InputAt(kFrameStateFunctionInput),
+ frame_state->InputAt(kFrameStateOuterStateInput));
+ }
+
+ Node* ConvertPlainPrimitiveToNumber(Node* node) {
+ DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive()));
+ // Avoid inserting too many eager ToNumber() operations.
+ Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
+ if (reduction.Changed()) return reduction.replacement();
+ // TODO(jarin) Use PlainPrimitiveToNumber once we have it.
+ return graph()->NewNode(
+ javascript()->ToNumber(), node, jsgraph()->NoContextConstant(),
+ jsgraph()->EmptyFrameState(), graph()->start(), graph()->start());
+ }
+
+ Node* ConvertSingleInputToNumber(Node* node, Node* frame_state) {
+ DCHECK(!NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive()));
+ Node* const n = graph()->NewNode(javascript()->ToNumber(), node, context(),
+ frame_state, effect(), control());
+ NodeProperties::ReplaceUses(node_, node_, node_, n, n);
+ update_effect(n);
+ return n;
}
- Node* ConvertToNumber(Node* node, Node* frame_state) {
- if (NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive())) {
- return ConvertPrimitiveToNumber(node);
- } else {
- Node* const n =
- graph()->NewNode(javascript()->ToNumber(), node, context(),
- frame_state, effect(), control());
- update_effect(n);
- return n;
+ void ConvertBothInputsToNumber(Node** left_result, Node** right_result,
+ Node* frame_state) {
+ Node* projections[2];
+
+ // Find {IfSuccess} and {IfException} continuations of the operation.
+ NodeProperties::CollectControlProjections(node_, projections, 2);
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(projections[1]);
+ Node* if_exception = projections[1];
+ Node* if_success = projections[0];
+
+ // Insert two ToNumber() operations that both potentially throw.
+ Node* left_state = CreateFrameStateForLeftInput(frame_state);
+ Node* left_conv =
+ graph()->NewNode(javascript()->ToNumber(), left(), context(),
+ left_state, effect(), control());
+ Node* left_success = graph()->NewNode(common()->IfSuccess(), left_conv);
+ Node* right_state = CreateFrameStateForRightInput(frame_state, left_conv);
+ Node* right_conv =
+ graph()->NewNode(javascript()->ToNumber(), right(), context(),
+ right_state, left_conv, left_success);
+ Node* left_exception =
+ graph()->NewNode(common()->IfException(hint), left_conv, left_conv);
+ Node* right_exception =
+ graph()->NewNode(common()->IfException(hint), right_conv, right_conv);
+ NodeProperties::ReplaceControlInput(if_success, right_conv);
+ update_effect(right_conv);
+
+ // Wire conversions to existing {IfException} continuation.
+ Node* exception_merge = if_exception;
+ Node* exception_value =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), left_exception,
+ right_exception, exception_merge);
+ Node* exception_effect =
+ graph()->NewNode(common()->EffectPhi(2), left_exception,
+ right_exception, exception_merge);
+ for (Edge edge : exception_merge->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) edge.UpdateTo(exception_effect);
+ if (NodeProperties::IsValueEdge(edge)) edge.UpdateTo(exception_value);
}
+ NodeProperties::RemoveBounds(exception_merge);
+ exception_merge->ReplaceInput(0, left_exception);
+ exception_merge->ReplaceInput(1, right_exception);
+ exception_merge->set_op(common()->Merge(2));
+
+ *left_result = left_conv;
+ *right_result = right_conv;
}
Node* ConvertToUI32(Node* node, Signedness signedness) {
@@ -383,20 +405,31 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
r.ConvertInputsToNumber(frame_state);
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
-#if 0
- // TODO(turbofan): Lowering of StringAdd is disabled for now because:
- // a) The inserted ToString operation screws up valueOf vs. toString order.
- // b) Deoptimization at ToString doesn't have corresponding bailout id.
- // c) Our current StringAddStub is actually non-pure and requires context.
- if ((r.OneInputIs(Type::String()) && !r.IsStrong()) ||
- r.BothInputsAre(Type::String())) {
- // JSAdd(x:string, y:string) => StringAdd(x, y)
- // JSAdd(x:string, y) => StringAdd(x, ToString(y))
- // JSAdd(x, y:string) => StringAdd(ToString(x), y)
- r.ConvertInputsToString();
- return r.ChangeToPureOperator(simplified()->StringAdd());
- }
-#endif
+ if (r.BothInputsAre(Type::String())) {
+ // JSAdd(x:string, y:string) => CallStub[StringAdd](x, y)
+ Callable const callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->set_op(common()->Call(desc));
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
+ JSBinopReduction r(this, node);
+ if (r.BothInputsAre(Type::Number())) {
+ // JSModulus(x:number, x:number) => NumberModulus(x, y)
+ return r.ChangeToPureOperator(simplified()->NumberModulus(),
+ Type::Number());
+ }
return NoChange();
}
@@ -404,7 +437,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
const Operator* numberOp) {
JSBinopReduction r(this, node);
- if (r.IsStrong()) {
+ if (r.IsStrong() || numberOp == simplified()->NumberModulus()) {
if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(numberOp, Type::Number());
}
@@ -436,12 +469,17 @@ Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
Signedness left_signedness,
const Operator* shift_op) {
JSBinopReduction r(this, node);
- Type* reduce_type = r.IsStrong() ? Type::Number() : Type::Primitive();
- if (r.BothInputsAre(reduce_type)) {
- r.ConvertInputsForShift(left_signedness);
- return r.ChangeToPureOperator(shift_op, Type::Integral32());
+ if (r.IsStrong()) {
+ if (r.BothInputsAre(Type::Number())) {
+ r.ConvertInputsToUI32(left_signedness, kUnsigned);
+ return r.ChangeToPureOperator(shift_op);
+ }
+ return NoChange();
}
- return NoChange();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToUI32(left_signedness, kUnsigned);
+ return r.ChangeToPureOperator(shift_op);
}
@@ -470,18 +508,7 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
}
return r.ChangeToPureOperator(stringOp);
}
-#if 0
- // TODO(turbofan): General ToNumber disabled for now because:
- // a) The inserted ToNumber operation screws up observability of valueOf.
- // b) Deoptimization at ToNumber doesn't have corresponding bailout id.
- Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
- if (r.OneInputCannotBe(maybe_string)) {
- // If one input cannot be a string, then emit a number comparison.
- ...
- }
-#endif
- if (r.BothInputsAre(Type::PlainPrimitive()) &&
- r.OneInputCannotBe(Type::StringOrReceiver())) {
+ if (r.OneInputCannotBe(Type::StringOrReceiver())) {
const Operator* less_than;
const Operator* less_than_or_equal;
if (r.BothInputsAre(Type::Unsigned32())) {
@@ -492,7 +519,11 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
less_than_or_equal = machine()->Int32LessThanOrEqual();
} else {
// TODO(turbofan): mixed signed/unsigned int32 comparisons.
- r.ConvertPrimitiveInputsToNumber();
+ if (r.IsStrong() && !r.BothInputsAre(Type::Number())) {
+ return NoChange();
+ }
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
less_than = simplified()->NumberLessThan();
less_than_or_equal = simplified()->NumberLessThanOrEqual();
}
@@ -547,14 +578,18 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
if (r.left() == r.right()) {
// x === x is always true if x != NaN
if (!r.left_type()->Maybe(Type::NaN())) {
- return ReplaceEagerly(node, jsgraph()->BooleanConstant(!invert));
+ Node* replacement = jsgraph()->BooleanConstant(!invert);
+ Replace(node, replacement);
+ return Replace(replacement);
}
}
if (r.OneInputCannotBe(Type::NumberOrString())) {
// For values with canonical representation (i.e. not string nor number) an
// empty type intersection means the values cannot be strictly equal.
if (!r.left_type()->Maybe(r.right_type())) {
- return ReplaceEagerly(node, jsgraph()->BooleanConstant(invert));
+ Node* replacement = jsgraph()->BooleanConstant(invert);
+ Replace(node, replacement);
+ return Replace(replacement);
}
}
if (r.OneInputIs(Type::Undefined())) {
@@ -577,6 +612,10 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
return r.ChangeToPureOperator(
simplified()->ReferenceEqual(Type::Receiver()), invert);
}
+ if (r.BothInputsAre(Type::Unique())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Unique()),
+ invert);
+ }
if (r.BothInputsAre(Type::String())) {
return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
@@ -612,7 +651,7 @@ Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
node->set_op(simplified()->NumberEqual());
node->ReplaceInput(0, length);
node->ReplaceInput(1, jsgraph()->ZeroConstant());
- NodeProperties::ReplaceWithValue(node, node, length);
+ ReplaceWithValue(node, node, length);
DCHECK_EQ(2, node->InputCount());
return Changed(node);
}
@@ -685,7 +724,7 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
Node* const input = node->InputAt(0);
Reduction reduction = ReduceJSToNumberInput(input);
if (reduction.Changed()) {
- NodeProperties::ReplaceWithValue(node, reduction.replacement());
+ ReplaceWithValue(node, reduction.replacement());
return reduction;
}
Type* const input_type = NodeProperties::GetBounds(input).upper;
@@ -738,25 +777,21 @@ Reduction JSTypedLowering::ReduceJSToString(Node* node) {
Node* const input = node->InputAt(0);
Reduction reduction = ReduceJSToStringInput(input);
if (reduction.Changed()) {
- NodeProperties::ReplaceWithValue(node, reduction.replacement());
+ ReplaceWithValue(node, reduction.replacement());
return reduction;
}
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
- Node* object = NodeProperties::GetValueInput(node, 0);
- Type* object_type = NodeProperties::GetBounds(object).upper;
- if (object_type->Is(Type::GlobalObject())) {
- // Optimize global constants like "undefined", "Infinity", and "NaN".
- Handle<Name> name = LoadNamedParametersOf(node->op()).name().handle();
- Handle<Object> constant_value = factory()->GlobalConstantFor(name);
- if (!constant_value.is_null()) {
- Node* constant = jsgraph()->Constant(constant_value);
- NodeProperties::ReplaceWithValue(node, constant);
- return Replace(constant);
- }
+Reduction JSTypedLowering::ReduceJSLoadGlobal(Node* node) {
+ // Optimize global constants like "undefined", "Infinity", and "NaN".
+ Handle<Name> name = LoadGlobalParametersOf(node->op()).name().handle();
+ Handle<Object> constant_value = factory()->GlobalConstantFor(name);
+ if (!constant_value.is_null()) {
+ Node* constant = jsgraph()->Constant(constant_value);
+ ReplaceWithValue(node, constant);
+ return Replace(constant);
}
return NoChange();
}
@@ -766,7 +801,7 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
Node* key = NodeProperties::GetValueInput(node, 1);
Node* base = NodeProperties::GetValueInput(node, 0);
Type* key_type = NodeProperties::GetBounds(key).upper;
- HeapObjectMatcher<Object> mbase(base);
+ HeapObjectMatcher mbase(base);
if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
Handle<JSTypedArray>::cast(mbase.Value().handle());
@@ -786,19 +821,20 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Check if we can avoid the bounds check.
- if (key_type->Min() >= 0 &&
- key_type->Max() < array->length()->Number()) {
+ if (key_type->Min() >= 0 && key_type->Max() < array->length_value()) {
Node* load = graph()->NewNode(
simplified()->LoadElement(
AccessBuilder::ForTypedArrayElement(array->type(), true)),
buffer, key, effect, control);
- return ReplaceEagerly(node, load);
+ ReplaceWithValue(node, load, load);
+ return Replace(load);
}
// Compute byte offset.
Node* offset = Word32Shl(key, static_cast<int>(k));
Node* load = graph()->NewNode(simplified()->LoadBuffer(access), buffer,
offset, length, effect, control);
- return ReplaceEagerly(node, load);
+ ReplaceWithValue(node, load, load);
+ return Replace(load);
}
}
}
@@ -812,7 +848,7 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 2);
Type* key_type = NodeProperties::GetBounds(key).upper;
Type* value_type = NodeProperties::GetBounds(value).upper;
- HeapObjectMatcher<Object> mbase(base);
+ HeapObjectMatcher mbase(base);
if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
Handle<JSTypedArray>::cast(mbase.Value().handle());
@@ -855,8 +891,7 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
value = graph()->NewNode(simplified()->NumberToUint32(), value);
}
// Check if we can avoid the bounds check.
- if (key_type->Min() >= 0 &&
- key_type->Max() < array->length()->Number()) {
+ if (key_type->Min() >= 0 && key_type->Max() < array->length_value()) {
node->set_op(simplified()->StoreElement(
AccessBuilder::ForTypedArrayElement(array->type(), true)));
node->ReplaceInput(0, buffer);
@@ -929,6 +964,123 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSLoadDynamicGlobal(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadDynamicGlobal, node->opcode());
+ DynamicGlobalAccess const& access = DynamicGlobalAccessOf(node->op());
+ Node* const vector = NodeProperties::GetValueInput(node, 0);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const state1 = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const state2 = NodeProperties::GetFrameStateInput(node, 1);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ if (access.RequiresFullCheck()) return NoChange();
+
+ // Perform checks whether the fast mode applies, by looking for any extension
+ // object which might shadow the optimistic declaration.
+ uint32_t bitset = access.check_bitset();
+ Node* check_true = control;
+ Node* check_false = graph()->NewNode(common()->Merge(0));
+ for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
+ if ((bitset & 1) == 0) continue;
+ Node* load = graph()->NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ context, context, effect);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()),
+ load, jsgraph()->ZeroConstant());
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
+ check_true);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ check_false->set_op(common()->Merge(check_false->InputCount() + 1));
+ check_false->AppendInput(graph()->zone(), if_false);
+ check_true = if_true;
+ }
+
+ // Fast case, because variable is not shadowed. Perform global object load.
+ Unique<Name> name = Unique<Name>::CreateUninitialized(access.name());
+ Node* global = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true), context,
+ context, effect);
+ Node* fast = graph()->NewNode(
+ javascript()->LoadGlobal(name, access.feedback(), access.mode()), global,
+ vector, context, state1, state2, global, check_true);
+
+ // Slow case, because variable potentially shadowed. Perform dynamic lookup.
+ uint32_t check_bitset = DynamicGlobalAccess::kFullCheckRequired;
+ Node* slow = graph()->NewNode(
+ javascript()->LoadDynamicGlobal(access.name(), check_bitset,
+ access.feedback(), access.mode()),
+ vector, context, context, state1, state2, effect, check_false);
+
+ // Replace value, effect and control uses accordingly.
+ Node* new_control =
+ graph()->NewNode(common()->Merge(2), check_true, check_false);
+ Node* new_effect =
+ graph()->NewNode(common()->EffectPhi(2), fast, slow, new_control);
+ Node* new_value = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fast,
+ slow, new_control);
+ ReplaceWithValue(node, new_value, new_effect, new_control);
+ return Changed(new_value);
+}
+
+
+Reduction JSTypedLowering::ReduceJSLoadDynamicContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadDynamicContext, node->opcode());
+ DynamicContextAccess const& access = DynamicContextAccessOf(node->op());
+ ContextAccess const& context_access = access.context_access();
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ if (access.RequiresFullCheck()) return NoChange();
+
+ // Perform checks whether the fast mode applies, by looking for any extension
+ // object which might shadow the optimistic declaration.
+ uint32_t bitset = access.check_bitset();
+ Node* check_true = control;
+ Node* check_false = graph()->NewNode(common()->Merge(0));
+ for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
+ if ((bitset & 1) == 0) continue;
+ Node* load = graph()->NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ context, context, effect);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()),
+ load, jsgraph()->ZeroConstant());
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
+ check_true);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ check_false->set_op(common()->Merge(check_false->InputCount() + 1));
+ check_false->AppendInput(graph()->zone(), if_false);
+ check_true = if_true;
+ }
+
+ // Fast case, because variable is not shadowed. Perform context slot load.
+ Node* fast =
+ graph()->NewNode(javascript()->LoadContext(context_access.depth(),
+ context_access.index(), false),
+ context, context, effect);
+
+ // Slow case, because variable potentially shadowed. Perform dynamic lookup.
+ uint32_t check_bitset = DynamicContextAccess::kFullCheckRequired;
+ Node* slow =
+ graph()->NewNode(javascript()->LoadDynamicContext(
+ access.name(), check_bitset, context_access.depth(),
+ context_access.index()),
+ context, context, state, effect, check_false);
+
+ // Replace value, effect and control uses accordingly.
+ Node* new_control =
+ graph()->NewNode(common()->Merge(2), check_true, check_false);
+ Node* new_effect =
+ graph()->NewNode(common()->EffectPhi(2), fast, slow, new_control);
+ Node* new_value = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fast,
+ slow, new_control);
+ ReplaceWithValue(node, new_value, new_effect, new_control);
+ return Changed(new_value);
+}
+
+
Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
@@ -957,13 +1109,15 @@ Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateLiteralArray, node->opcode());
- HeapObjectMatcher<FixedArray> mconst(NodeProperties::GetValueInput(node, 2));
- int length = mconst.Value().handle()->length();
+ HeapObjectMatcher mconst(NodeProperties::GetValueInput(node, 2));
+ int length = Handle<FixedArray>::cast(mconst.Value().handle())->length();
int flags = OpParameter<int>(node->op());
// Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
// initial length limit for arrays with "fast" elements kind.
+ // TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
if ((flags & ArrayLiteral::kShallowElements) != 0 &&
+ (flags & ArrayLiteral::kIsStrong) == 0 &&
length < JSObject::kInitialMaxFastElementArray) {
Isolate* isolate = jsgraph()->isolate();
Callable callable = CodeFactory::FastCloneShallowArray(isolate);
@@ -985,9 +1139,9 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateLiteralObject, node->opcode());
- HeapObjectMatcher<FixedArray> mconst(NodeProperties::GetValueInput(node, 2));
+ HeapObjectMatcher mconst(NodeProperties::GetValueInput(node, 2));
// Constants are pairs, see ObjectLiteral::properties_count().
- int length = mconst.Value().handle()->length() / 2;
+ int length = Handle<FixedArray>::cast(mconst.Value().handle())->length() / 2;
int flags = OpParameter<int>(node->op());
// Use the FastCloneShallowObjectStub only for shallow boilerplates without
@@ -1036,7 +1190,7 @@ Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
// TODO(mstarzinger): We could mutate {node} into the allocation instead.
NodeProperties::SetBounds(a.allocation(), NodeProperties::GetBounds(node));
- NodeProperties::ReplaceWithValue(node, node, a.effect());
+ ReplaceWithValue(node, node, a.effect());
node->ReplaceInput(0, a.allocation());
node->ReplaceInput(1, a.effect());
node->set_op(common()->Finish(1));
@@ -1050,9 +1204,10 @@ Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
Node* const input = NodeProperties::GetValueInput(node, 0);
- HeapObjectMatcher<ScopeInfo> minput(input);
+ HeapObjectMatcher minput(input);
DCHECK(minput.HasValue()); // TODO(mstarzinger): Make ScopeInfo static.
- int context_length = minput.Value().handle()->ContextLength();
+ int context_length =
+ Handle<ScopeInfo>::cast(minput.Value().handle())->ContextLength();
if (FLAG_turbo_allocate && context_length < kBlockContextAllocationLimit) {
// JSCreateBlockContext(s:scope[length < limit], f)
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -1075,7 +1230,7 @@ Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
}
// TODO(mstarzinger): We could mutate {node} into the allocation instead.
NodeProperties::SetBounds(a.allocation(), NodeProperties::GetBounds(node));
- NodeProperties::ReplaceWithValue(node, node, a.effect());
+ ReplaceWithValue(node, node, a.effect());
node->ReplaceInput(0, a.allocation());
node->ReplaceInput(1, a.effect());
node->set_op(common()->Finish(1));
@@ -1086,6 +1241,307 @@ Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ int const arity = static_cast<int>(p.arity() - 2);
+ Node* const function = NodeProperties::GetValueInput(node, 0);
+ Type* const function_type = NodeProperties::GetBounds(function).upper;
+ Node* const receiver = NodeProperties::GetValueInput(node, 1);
+ Type* const receiver_type = NodeProperties::GetBounds(receiver).upper;
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+
+ // Check that {function} is actually a JSFunction with the correct arity.
+ if (function_type->IsFunction() &&
+ function_type->AsFunction()->Arity() == arity) {
+ // Check that the {receiver} doesn't need to be wrapped.
+ if (receiver_type->Is(Type::ReceiverOrUndefined())) {
+ Node* const context = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()),
+ function, effect, control);
+ NodeProperties::ReplaceContextInput(node, context);
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ if (is_strict(p.language_mode())) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+ node->set_op(common()->Call(Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, 1 + arity, flags)));
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSForInDone(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSForInDone, node->opcode());
+ node->set_op(machine()->Word32Equal());
+ node->TrimInputCount(2);
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Get the set of properties to enumerate.
+ Node* cache_type = effect = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), receiver,
+ context, frame_state, effect, control);
+ control = graph()->NewNode(common()->IfSuccess(), cache_type);
+
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* cache_type_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ cache_type, effect, control);
+ Node* meta_map = jsgraph()->HeapConstant(factory()->meta_map());
+
+ // If we got a map from the GetPropertyNamesFast runtime call, we can do a
+ // fast modification check. Otherwise, we got a fixed array, and we have to
+ // perform a slow check on every iteration.
+ Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
+ cache_type_map, meta_map);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* cache_array_true0;
+ Node* cache_length_true0;
+ Node* cache_type_true0;
+ Node* etrue0;
+ {
+ // Enum cache case.
+ Node* cache_type_enum_length = etrue0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField3()), cache_type,
+ effect, if_true0);
+ cache_length_true0 =
+ graph()->NewNode(machine()->Word32And(), cache_type_enum_length,
+ jsgraph()->Uint32Constant(Map::EnumLengthBits::kMask));
+
+ Node* check1 =
+ graph()->NewNode(machine()->Word32Equal(), cache_length_true0,
+ jsgraph()->Int32Constant(0));
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* cache_array_true1;
+ Node* etrue1;
+ {
+ // No properties to enumerate.
+ cache_array_true1 =
+ jsgraph()->HeapConstant(factory()->empty_fixed_array());
+ etrue1 = etrue0;
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* cache_array_false1;
+ Node* efalse1;
+ {
+ // Load the enumeration cache from the instance descriptors of {receiver}.
+ Node* receiver_map_descriptors = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
+ receiver_map, etrue0, if_false1);
+ Node* object_map_enum_cache = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
+ receiver_map_descriptors, efalse1, if_false1);
+ cache_array_false1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache()),
+ object_map_enum_cache, efalse1, if_false1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ etrue0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+ cache_array_true0 =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true1,
+ cache_array_false1, if_true0);
+
+ cache_type_true0 = cache_type;
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* cache_array_false0;
+ Node* cache_length_false0;
+ Node* cache_type_false0;
+ Node* efalse0;
+ {
+ // FixedArray case.
+ Node* receiver_instance_type = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ receiver_map, effect, if_false0);
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ cache_type_false0 = graph()->NewNode(
+ common()->Select(kMachAnyTagged, BranchHint::kFalse),
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ receiver_instance_type,
+ jsgraph()->Uint32Constant(LAST_JS_PROXY_TYPE)),
+ jsgraph()->ZeroConstant(), // Zero indicagtes proxy.
+ jsgraph()->OneConstant()); // One means slow check.
+
+ cache_array_false0 = cache_type;
+ cache_length_false0 = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ cache_array_false0, efalse0, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* cache_array =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true0,
+ cache_array_false0, control);
+ Node* cache_length =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_length_true0,
+ cache_length_false0, control);
+ cache_type = graph()->NewNode(common()->Phi(kMachAnyTagged, 2),
+ cache_type_true0, cache_type_false0, control);
+
+ for (auto edge : node->use_edges()) {
+ Node* const use = edge.from();
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ Revisit(use);
+ } else {
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ Replace(use, control);
+ } else if (use->opcode() == IrOpcode::kIfException) {
+ edge.UpdateTo(cache_type_true0);
+ continue;
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ DCHECK_EQ(IrOpcode::kProjection, use->opcode());
+ switch (ProjectionIndexOf(use->op())) {
+ case 0:
+ Replace(use, cache_type);
+ break;
+ case 1:
+ Replace(use, cache_array);
+ break;
+ case 2:
+ Replace(use, cache_length);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ use->Kill();
+ }
+ }
+ return NoChange(); // All uses were replaced already above.
+}
+
+
+Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* cache_array = NodeProperties::GetValueInput(node, 1);
+ Node* cache_type = NodeProperties::GetValueInput(node, 2);
+ Node* index = NodeProperties::GetValueInput(node, 3);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Load the next {key} from the {cache_array}.
+ Node* key = effect = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ cache_array, index, effect, control);
+
+ // Load the map of the {receiver}.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+
+ // Check if the expected map still matches that of the {receiver}.
+ Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
+ receiver_map, cache_type);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0;
+ Node* vtrue0;
+ {
+ // Don't need filtering since expected map still matches that of the
+ // {receiver}.
+ etrue0 = effect;
+ vtrue0 = key;
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0;
+ Node* vfalse0;
+ {
+ // Check if the {cache_type} is zero, which indicates proxy.
+ Node* check1 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
+ cache_type, jsgraph()->ZeroConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1;
+ Node* vtrue1;
+ {
+ // Don't do filtering for proxies.
+ etrue1 = effect;
+ vtrue1 = key;
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1;
+ Node* vfalse1;
+ {
+ // Filter the {key} to check if it's still a valid property of the
+ // {receiver} (does the ToName conversion implicitly).
+ vfalse1 = efalse1 = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kForInFilter, 2), receiver, key,
+ context, frame_state, effect, if_false1);
+ if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue1,
+ vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ ReplaceWithValue(node, node, effect, control);
+ node->set_op(common()->Phi(kMachAnyTagged, 2));
+ node->ReplaceInput(0, vtrue0);
+ node->ReplaceInput(1, vfalse0);
+ node->ReplaceInput(2, control);
+ node->TrimInputCount(3);
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSForInStep(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSForInStep, node->opcode());
+ node->set_op(machine()->Int32Add());
+ node->ReplaceInput(1, jsgraph()->Int32Constant(1));
+ DCHECK_EQ(2, node->InputCount());
+ return Changed(node);
+}
+
+
Reduction JSTypedLowering::Reduce(Node* node) {
// Check if the output type is a singleton. In that case we already know the
// result value and can simply replace the node if it's eliminable.
@@ -1094,27 +1550,27 @@ Reduction JSTypedLowering::Reduce(Node* node) {
Type* upper = NodeProperties::GetBounds(node).upper;
if (upper->IsConstant()) {
Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::MinusZero())) {
Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::NaN())) {
Node* replacement = jsgraph()->NaNConstant();
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::Null())) {
Node* replacement = jsgraph()->NullConstant();
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::PlainNumber()) && upper->Min() == upper->Max()) {
Node* replacement = jsgraph()->Constant(upper->Min());
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::Undefined())) {
Node* replacement = jsgraph()->UndefinedConstant();
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
}
}
@@ -1139,11 +1595,12 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSBitwiseAnd:
return ReduceInt32Binop(node, machine()->Word32And());
case IrOpcode::kJSShiftLeft:
- return ReduceUI32Shift(node, kSigned, machine()->Word32Shl());
+ return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftLeft());
case IrOpcode::kJSShiftRight:
- return ReduceUI32Shift(node, kSigned, machine()->Word32Sar());
+ return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftRight());
case IrOpcode::kJSShiftRightLogical:
- return ReduceUI32Shift(node, kUnsigned, machine()->Word32Shr());
+ return ReduceUI32Shift(node, kUnsigned,
+ simplified()->NumberShiftRightLogical());
case IrOpcode::kJSAdd:
return ReduceJSAdd(node);
case IrOpcode::kJSSubtract:
@@ -1153,7 +1610,7 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSDivide:
return ReduceNumberBinop(node, simplified()->NumberDivide());
case IrOpcode::kJSModulus:
- return ReduceNumberBinop(node, simplified()->NumberModulus());
+ return ReduceJSModulus(node);
case IrOpcode::kJSUnaryNot:
return ReduceJSUnaryNot(node);
case IrOpcode::kJSToBoolean:
@@ -1162,8 +1619,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSToNumber(node);
case IrOpcode::kJSToString:
return ReduceJSToString(node);
- case IrOpcode::kJSLoadNamed:
- return ReduceJSLoadNamed(node);
+ case IrOpcode::kJSLoadGlobal:
+ return ReduceJSLoadGlobal(node);
case IrOpcode::kJSLoadProperty:
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
@@ -1172,6 +1629,10 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSLoadContext(node);
case IrOpcode::kJSStoreContext:
return ReduceJSStoreContext(node);
+ case IrOpcode::kJSLoadDynamicGlobal:
+ return ReduceJSLoadDynamicGlobal(node);
+ case IrOpcode::kJSLoadDynamicContext:
+ return ReduceJSLoadDynamicContext(node);
case IrOpcode::kJSCreateClosure:
return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateLiteralArray:
@@ -1182,6 +1643,16 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSCreateWithContext(node);
case IrOpcode::kJSCreateBlockContext:
return ReduceJSCreateBlockContext(node);
+ case IrOpcode::kJSCallFunction:
+ return ReduceJSCallFunction(node);
+ case IrOpcode::kJSForInDone:
+ return ReduceJSForInDone(node);
+ case IrOpcode::kJSForInNext:
+ return ReduceJSForInNext(node);
+ case IrOpcode::kJSForInPrepare:
+ return ReduceJSForInPrepare(node);
+ case IrOpcode::kJSForInStep:
+ return ReduceJSForInStep(node);
default:
break;
}
@@ -1189,18 +1660,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
}
-Node* JSTypedLowering::ConvertPrimitiveToNumber(Node* input) {
- DCHECK(NodeProperties::GetBounds(input).upper->Is(Type::PlainPrimitive()));
- // Avoid inserting too many eager ToNumber() operations.
- Reduction const reduction = ReduceJSToNumberInput(input);
- if (reduction.Changed()) return reduction.replacement();
- // TODO(jarin) Use PlainPrimitiveToNumber once we have it.
- return graph()->NewNode(
- javascript()->ToNumber(), input, jsgraph()->NoContextConstant(),
- jsgraph()->EmptyFrameState(), graph()->start(), graph()->start());
-}
-
-
Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
if (rhs == 0) return lhs;
return graph()->NewNode(machine()->Word32Shl(), lhs,
@@ -1214,6 +1673,9 @@ Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
Graph* JSTypedLowering::graph() const { return jsgraph()->graph(); }
+Isolate* JSTypedLowering::isolate() const { return jsgraph()->isolate(); }
+
+
JSOperatorBuilder* JSTypedLowering::javascript() const {
return jsgraph()->javascript();
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index a41fb516a0..8252093d15 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -26,9 +26,9 @@ class MachineOperatorBuilder;
// Lowers JS-level operators to simplified operators based on types.
-class JSTypedLowering final : public Reducer {
+class JSTypedLowering final : public AdvancedReducer {
public:
- JSTypedLowering(JSGraph* jsgraph, Zone* zone);
+ JSTypedLowering(Editor* editor, JSGraph* jsgraph, Zone* zone);
~JSTypedLowering() final {}
Reduction Reduce(Node* node) final;
@@ -36,16 +36,18 @@ class JSTypedLowering final : public Reducer {
private:
friend class JSBinopReduction;
- Reduction ReplaceEagerly(Node* old, Node* node);
Reduction ReduceJSAdd(Node* node);
+ Reduction ReduceJSModulus(Node* node);
Reduction ReduceJSBitwiseOr(Node* node);
Reduction ReduceJSMultiply(Node* node);
Reduction ReduceJSComparison(Node* node);
- Reduction ReduceJSLoadNamed(Node* node);
+ Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
+ Reduction ReduceJSLoadDynamicGlobal(Node* node);
+ Reduction ReduceJSLoadDynamicContext(Node* node);
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
Reduction ReduceJSUnaryNot(Node* node);
@@ -59,18 +61,22 @@ class JSTypedLowering final : public Reducer {
Reduction ReduceJSCreateLiteralObject(Node* node);
Reduction ReduceJSCreateWithContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
+ Reduction ReduceJSCallFunction(Node* node);
+ Reduction ReduceJSForInDone(Node* node);
+ Reduction ReduceJSForInNext(Node* node);
+ Reduction ReduceJSForInPrepare(Node* node);
+ Reduction ReduceJSForInStep(Node* node);
Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
const Operator* shift_op);
- Node* ConvertPrimitiveToNumber(Node* input);
-
Node* Word32Shl(Node* const lhs, int32_t const rhs);
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
@@ -81,9 +87,6 @@ class JSTypedLowering final : public Reducer {
JSGraph* jsgraph_;
SimplifiedOperatorBuilder simplified_;
- Type* zero_range_;
- Type* one_range_;
- Type* zero_thirtyone_range_;
Type* shifted_int32_ranges_[4];
};
diff --git a/deps/v8/src/compiler/linkage-impl.h b/deps/v8/src/compiler/linkage-impl.h
index 1791a43007..27b0235b97 100644
--- a/deps/v8/src/compiler/linkage-impl.h
+++ b/deps/v8/src/compiler/linkage-impl.h
@@ -70,6 +70,7 @@ class LinkageHelper {
js_parameter_count, // js_parameter_count
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved
+ kNoCalleeSaved, // callee-saved fp
flags, // flags
"js-call");
}
@@ -115,7 +116,7 @@ class LinkageHelper {
locations.AddParam(regloc(LinkageTraits::ContextReg()));
types.AddParam(kMachAnyTagged);
- CallDescriptor::Flags flags = Linkage::NeedsFrameState(function_id)
+ CallDescriptor::Flags flags = Linkage::FrameStateInputCount(function_id) > 0
? CallDescriptor::kNeedsFrameState
: CallDescriptor::kNoFlags;
@@ -131,6 +132,7 @@ class LinkageHelper {
js_parameter_count, // js_parameter_count
properties, // properties
kNoCalleeSaved, // callee-saved
+ kNoCalleeSaved, // callee-saved fp
flags, // flags
function->name); // debug name
}
@@ -143,8 +145,7 @@ class LinkageHelper {
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties, MachineType return_type) {
- const int register_parameter_count =
- descriptor.GetEnvironmentParameterCount();
+ const int register_parameter_count = descriptor.GetRegisterParameterCount();
const int js_parameter_count =
register_parameter_count + stack_parameter_count;
const int context_count = 1;
@@ -163,9 +164,9 @@ class LinkageHelper {
for (int i = 0; i < js_parameter_count; i++) {
if (i < register_parameter_count) {
// The first parameters go in registers.
- Register reg = descriptor.GetEnvironmentParameterRegister(i);
+ Register reg = descriptor.GetRegisterParameter(i);
Representation rep =
- descriptor.GetEnvironmentParameterRepresentation(i);
+ RepresentationFromType(descriptor.GetParameterType(i));
locations.AddParam(regloc(reg));
types.AddParam(reptyp(rep));
} else {
@@ -191,6 +192,7 @@ class LinkageHelper {
js_parameter_count, // js_parameter_count
properties, // properties
kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
flags, // flags
descriptor.DebugName(isolate));
}
@@ -204,27 +206,30 @@ class LinkageHelper {
// Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(msig->parameter_count());
+ int stack_offset = LinkageTraits::CStackBackingStoreLength();
for (int i = 0; i < parameter_count; i++) {
if (i < LinkageTraits::CRegisterParametersLength()) {
locations.AddParam(regloc(LinkageTraits::CRegisterParameter(i)));
} else {
- locations.AddParam(stackloc(-1 - i));
+ locations.AddParam(stackloc(-1 - stack_offset));
+ stack_offset++;
}
}
// The target for C calls is always an address (i.e. machine pointer).
MachineType target_type = kMachPtr;
LinkageLocation target_loc = LinkageLocation::AnyRegister();
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallAddress, // kind
- target_type, // target MachineType
- target_loc, // target location
- msig, // machine_sig
- locations.Build(), // location_sig
- 0, // js_parameter_count
- Operator::kNoProperties, // properties
- LinkageTraits::CCalleeSaveRegisters(), // callee-saved registers
- CallDescriptor::kNoFlags, // flags
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallAddress, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ msig, // machine_sig
+ locations.Build(), // location_sig
+ 0, // js_parameter_count
+ Operator::kNoProperties, // properties
+ LinkageTraits::CCalleeSaveRegisters(), // callee-saved registers
+ LinkageTraits::CCalleeSaveFPRegisters(), // callee-saved fp regs
+ CallDescriptor::kNoFlags, // flags
"c-call");
}
@@ -272,7 +277,12 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
int parameter_count = static_cast<int>(incoming_->JSParameterCount() - 1);
int first_stack_slot = OsrHelper::FirstStackSlotIndex(parameter_count);
- if (index >= first_stack_slot) {
+ if (index == kOsrContextSpillSlotIndex) {
+ // Context. Use the parameter location of the context spill slot.
+ // Parameter (arity + 1) is special for the context of the function frame.
+ int context_index = 1 + 1 + parameter_count; // target + receiver + params
+ return incoming_->GetInputLocation(context_index);
+ } else if (index >= first_stack_slot) {
// Local variable stored in this (callee) stack.
int spill_index =
LinkageLocation::ANY_REGISTER + 1 + index - first_stack_slot;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 2f22d9a6be..6ef014246d 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -4,6 +4,7 @@
#include "src/code-stubs.h"
#include "src/compiler.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node.h"
#include "src/compiler/pipeline.h"
@@ -48,14 +49,71 @@ bool CallDescriptor::HasSameReturnLocationsAs(
}
+bool CallDescriptor::CanTailCall(const Node* node) const {
+ // Tail calling is currently allowed if return locations match and all
+ // parameters are either in registers or on the stack but match exactly in
+ // number and content.
+ CallDescriptor const* other = OpParameter<CallDescriptor const*>(node);
+ if (!HasSameReturnLocationsAs(other)) return false;
+ size_t current_input = 0;
+ size_t other_input = 0;
+ size_t stack_parameter = 0;
+ while (true) {
+ if (other_input >= other->InputCount()) {
+ while (current_input <= InputCount()) {
+ if (!GetInputLocation(current_input).is_register()) {
+ return false;
+ }
+ ++current_input;
+ }
+ return true;
+ }
+ if (current_input >= InputCount()) {
+ while (other_input < other->InputCount()) {
+ if (!other->GetInputLocation(other_input).is_register()) {
+ return false;
+ }
+ ++other_input;
+ }
+ return true;
+ }
+ if (GetInputLocation(current_input).is_register()) {
+ ++current_input;
+ continue;
+ }
+ if (other->GetInputLocation(other_input).is_register()) {
+ ++other_input;
+ continue;
+ }
+ if (GetInputLocation(current_input) !=
+ other->GetInputLocation(other_input)) {
+ return false;
+ }
+ Node* input = node->InputAt(static_cast<int>(other_input));
+ if (input->opcode() != IrOpcode::kParameter) {
+ return false;
+ }
+ size_t param_index = ParameterIndexOf(input->op());
+ if (param_index != stack_parameter) {
+ return false;
+ }
+ ++stack_parameter;
+ ++current_input;
+ ++other_input;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
if (info->code_stub() != NULL) {
// Use the code stub interface descriptor.
- CallInterfaceDescriptor descriptor =
- info->code_stub()->GetCallInterfaceDescriptor();
- return GetStubCallDescriptor(info->isolate(), zone, descriptor, 0,
- CallDescriptor::kNoFlags,
- Operator::kNoProperties);
+ CodeStub* stub = info->code_stub();
+ CallInterfaceDescriptor descriptor = stub->GetCallInterfaceDescriptor();
+ return GetStubCallDescriptor(
+ info->isolate(), zone, descriptor, stub->GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties);
}
if (info->function() != NULL) {
// If we already have the function literal, use the number of parameters
@@ -105,18 +163,19 @@ FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame,
// static
-bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
+int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
// Most runtime functions need a FrameState. A few chosen ones that we know
// not to call into arbitrary JavaScript, not to throw, and not to deoptimize
// are blacklisted here and can be called without a FrameState.
switch (function) {
case Runtime::kAllocateInTargetSpace:
+ case Runtime::kDateField:
case Runtime::kDefineClassMethod: // TODO(jarin): Is it safe?
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
- case Runtime::kForInCacheArrayLength:
- case Runtime::kForInInit:
- case Runtime::kForInNext:
+ case Runtime::kForInDone:
+ case Runtime::kForInStep:
+ case Runtime::kGetOriginalConstructor:
case Runtime::kNewArguments:
case Runtime::kNewClosure:
case Runtime::kNewFunctionContext:
@@ -124,20 +183,21 @@ bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
case Runtime::kPushBlockContext:
case Runtime::kPushCatchContext:
case Runtime::kReThrow:
- case Runtime::kSetProperty: // TODO(jarin): Is it safe?
case Runtime::kStringCompareRT:
case Runtime::kStringEquals:
case Runtime::kToFastProperties: // TODO(jarin): Is it safe?
case Runtime::kTraceEnter:
case Runtime::kTraceExit:
- return false;
+ return 0;
case Runtime::kInlineArguments:
case Runtime::kInlineCallFunction:
- case Runtime::kInlineDateField:
- case Runtime::kInlineDeoptimizeNow:
+ case Runtime::kInlineGetCallerJSFunction:
case Runtime::kInlineGetPrototype:
case Runtime::kInlineRegExpExec:
- return true;
+ return 1;
+ case Runtime::kInlineDeoptimizeNow:
+ case Runtime::kInlineThrowNotDateError:
+ return 2;
default:
break;
}
@@ -145,9 +205,9 @@ bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
// Most inlined runtime functions (except the ones listed above) can be called
// without a FrameState or will be lowered by JSIntrinsicLowering internally.
const Runtime::Function* const f = Runtime::FunctionForId(function);
- if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return false;
+ if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return 0;
- return true;
+ return 1;
}
@@ -203,6 +263,6 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
return NULL;
}
#endif // !V8_TURBOFAN_BACKEND
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index aa680689d7..31b9faca2a 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -20,6 +20,7 @@ class CallInterfaceDescriptor;
namespace compiler {
+class Node;
class OsrHelper;
// Describes the location for a parameter or a return value to a call.
@@ -73,7 +74,8 @@ class CallDescriptor final : public ZoneObject {
kPatchableCallSite = 1u << 1,
kNeedsNopAfterCall = 1u << 2,
kHasExceptionHandler = 1u << 3,
- kSupportsTailCalls = 1u << 4,
+ kHasLocalCatchHandler = 1u << 4,
+ kSupportsTailCalls = 1u << 5,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
};
typedef base::Flags<Flag> Flags;
@@ -82,7 +84,8 @@ class CallDescriptor final : public ZoneObject {
const MachineSignature* machine_sig,
LocationSignature* location_sig, size_t js_param_count,
Operator::Properties properties,
- RegList callee_saved_registers, Flags flags,
+ RegList callee_saved_registers,
+ RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "")
: kind_(kind),
target_type_(target_type),
@@ -92,6 +95,7 @@ class CallDescriptor final : public ZoneObject {
js_param_count_(js_param_count),
properties_(properties),
callee_saved_registers_(callee_saved_registers),
+ callee_saved_fp_registers_(callee_saved_fp_registers),
flags_(flags),
debug_name_(debug_name) {
DCHECK(machine_sig->return_count() == location_sig->return_count());
@@ -101,12 +105,18 @@ class CallDescriptor final : public ZoneObject {
// Returns the kind of this call.
Kind kind() const { return kind_; }
+ // Returns {true} if this descriptor is a call to a C function.
+ bool IsCFunctionCall() const { return kind_ == kCallAddress; }
+
// Returns {true} if this descriptor is a call to a JSFunction.
bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
// The number of return values from this call.
size_t ReturnCount() const { return machine_sig_->return_count(); }
+ // The number of C parameters to this call.
+ size_t CParameterCount() const { return machine_sig_->parameter_count(); }
+
// The number of JavaScript parameters to this call, including the receiver
// object.
size_t JSParameterCount() const { return js_param_count_; }
@@ -149,12 +159,17 @@ class CallDescriptor final : public ZoneObject {
// Get the callee-saved registers, if any, across this call.
RegList CalleeSavedRegisters() const { return callee_saved_registers_; }
+ // Get the callee-saved FP registers, if any, across this call.
+ RegList CalleeSavedFPRegisters() const { return callee_saved_fp_registers_; }
+
const char* debug_name() const { return debug_name_; }
bool UsesOnlyRegisters() const;
bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
+ bool CanTailCall(const Node* call) const;
+
private:
friend class Linkage;
@@ -166,6 +181,7 @@ class CallDescriptor final : public ZoneObject {
const size_t js_param_count_;
const Operator::Properties properties_;
const RegList callee_saved_registers_;
+ const RegList callee_saved_fp_registers_;
const Flags flags_;
const char* const debug_name_;
@@ -244,7 +260,7 @@ class Linkage : public ZoneObject {
// the frame offset, e.g. to index into part of a double slot.
FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0) const;
- static bool NeedsFrameState(Runtime::FunctionId function);
+ static int FrameStateInputCount(Runtime::FunctionId function);
// Get the location where an incoming OSR value is stored.
LinkageLocation GetOsrValueLocation(int index) const;
@@ -252,6 +268,9 @@ class Linkage : public ZoneObject {
// A special parameter index for JSCalls that represents the closure.
static const int kJSFunctionCallClosureParamIndex = -1;
+ // A special {OsrValue} index to indicate the context spill slot.
+ static const int kOsrContextSpillSlotIndex = -1;
+
private:
CallDescriptor* const incoming_;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index b76b187d5d..c78a283ca0 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -36,7 +36,7 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
if (object == NodeProperties::GetValueInput(effect, 0) &&
access == FieldAccessOf(effect->op())) {
Node* const value = effect;
- NodeProperties::ReplaceWithValue(node, value);
+ ReplaceWithValue(node, value);
return Replace(value);
}
break;
@@ -45,7 +45,7 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
if (access == FieldAccessOf(effect->op())) {
if (object == NodeProperties::GetValueInput(effect, 0)) {
Node* const value = NodeProperties::GetValueInput(effect, 1);
- NodeProperties::ReplaceWithValue(node, value);
+ ReplaceWithValue(node, value);
return Replace(value);
}
// TODO(turbofan): Alias analysis to the rescue?
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 7f819dc94c..db87d9a082 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -11,9 +11,9 @@ namespace v8 {
namespace internal {
namespace compiler {
-class LoadElimination final : public Reducer {
+class LoadElimination final : public AdvancedReducer {
public:
- LoadElimination() {}
+ explicit LoadElimination(Editor* editor) : AdvancedReducer(editor) {}
~LoadElimination() final;
Reduction Reduce(Node* node) final;
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 77c2a95928..1a06b666dd 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -63,8 +63,7 @@ class LoopTree : public ZoneObject {
// Return the innermost nested loop, if any, that contains {node}.
Loop* ContainingLoop(Node* node) {
- if (node->id() >= static_cast<int>(node_to_loop_num_.size()))
- return nullptr;
+ if (node->id() >= node_to_loop_num_.size()) return nullptr;
int num = node_to_loop_num_[node->id()];
return num > 0 ? &all_loops_[num - 1] : nullptr;
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index ba0b7a1893..3ce59733eb 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -646,14 +646,13 @@ Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
Node* const phi = m.node();
DCHECK_EQ(kRepFloat64, RepresentationOf(OpParameter<MachineType>(phi)));
if (phi->OwnedBy(node)) {
- // TruncateFloat64ToInt32(Phi[Float64](x1,...,xn))
- // => Phi[Int32](TruncateFloat64ToInt32(x1),
+ // TruncateFloat64ToInt32[mode](Phi[Float64](x1,...,xn))
+ // => Phi[Int32](TruncateFloat64ToInt32[mode](x1),
// ...,
- // TruncateFloat64ToInt32(xn))
+ // TruncateFloat64ToInt32[mode](xn))
const int value_input_count = phi->InputCount() - 1;
for (int i = 0; i < value_input_count; ++i) {
- Node* input = graph()->NewNode(machine()->TruncateFloat64ToInt32(),
- phi->InputAt(i));
+ Node* input = graph()->NewNode(node->op(), phi->InputAt(i));
// TODO(bmeurer): Reschedule input for reduction once we have Revisit()
// instead of recursing into ReduceTruncateFloat64ToInt32() here.
Reduction reduction = ReduceTruncateFloat64ToInt32(input);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 07bcf3f77f..2e2229032c 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -12,6 +12,24 @@ namespace v8 {
namespace internal {
namespace compiler {
+std::ostream& operator<<(std::ostream& os, TruncationMode mode) {
+ switch (mode) {
+ case TruncationMode::kJavaScript:
+ return os << "JavaScript";
+ case TruncationMode::kRoundToZero:
+ return os << "RoundToZero";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+TruncationMode TruncationModeOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, op->opcode());
+ return OpParameter<TruncationMode>(op);
+}
+
+
std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
switch (kind) {
case kNoWriteBarrier:
@@ -101,13 +119,14 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64Div, Operator::kNoProperties, 2, 0, 1) \
- V(Int64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
V(Int64LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint64Mod, Operator::kNoProperties, 2, 1, 1) \
V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
@@ -116,24 +135,20 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Add, Operator::kCommutative, 2, 0, 1) \
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float64Add, Operator::kCommutative, 2, 0, 1) \
V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
- V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
- V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
V(Float32Equal, Operator::kCommutative, 2, 0, 1) \
V(Float32LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
@@ -144,11 +159,17 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Max, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Min, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Max, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Min, Operator::kNoProperties, 2, 0, 1) \
- V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)
+ V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)
+
+#define PURE_OPTIONAL_OP_LIST(V) \
+ V(Float32Max, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Min, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Max, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Min, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1)
#define MACHINE_TYPE_LIST(V) \
@@ -184,8 +205,22 @@ struct MachineOperatorGlobalCache {
}; \
Name##Operator k##Name;
PURE_OP_LIST(PURE)
+ PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
+ template <TruncationMode kMode>
+ struct TruncateFloat64ToInt32Operator final
+ : public Operator1<TruncationMode> {
+ TruncateFloat64ToInt32Operator()
+ : Operator1<TruncationMode>(IrOpcode::kTruncateFloat64ToInt32,
+ Operator::kPure, "TruncateFloat64ToInt32",
+ 1, 0, 0, 1, 0, 0, kMode) {}
+ };
+ TruncateFloat64ToInt32Operator<TruncationMode::kJavaScript>
+ kTruncateFloat64ToInt32JavaScript;
+ TruncateFloat64ToInt32Operator<TruncationMode::kRoundToZero>
+ kTruncateFloat64ToInt32RoundToZero;
+
#define LOAD(Type) \
struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
Load##Type##Operator() \
@@ -255,6 +290,27 @@ MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone, MachineType word,
PURE_OP_LIST(PURE)
#undef PURE
+#define PURE(Name, properties, value_input_count, control_input_count, \
+ output_count) \
+ const OptionalOperator MachineOperatorBuilder::Name() { \
+ return OptionalOperator(flags_ & k##Name ? &cache_.k##Name : nullptr); \
+ }
+PURE_OPTIONAL_OP_LIST(PURE)
+#undef PURE
+
+
+const Operator* MachineOperatorBuilder::TruncateFloat64ToInt32(
+ TruncationMode mode) {
+ switch (mode) {
+ case TruncationMode::kJavaScript:
+ return &cache_.kTruncateFloat64ToInt32JavaScript;
+ case TruncationMode::kRoundToZero:
+ return &cache_.kTruncateFloat64ToInt32RoundToZero;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
switch (rep) {
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 141586965b..0c055b8732 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -17,6 +17,37 @@ struct MachineOperatorGlobalCache;
class Operator;
+// For operators that are not supported on all platforms.
+class OptionalOperator final {
+ public:
+ explicit OptionalOperator(const Operator* op) : op_(op) {}
+
+ bool IsSupported() const { return op_ != nullptr; }
+ const Operator* op() const {
+ DCHECK_NOT_NULL(op_);
+ return op_;
+ }
+
+ private:
+ const Operator* const op_;
+};
+
+
+// Supported float64 to int32 truncation modes.
+enum class TruncationMode : uint8_t {
+ kJavaScript, // ES6 section 7.1.5
+ kRoundToZero // Round towards zero. Implementation defined for NaN and ovf.
+};
+
+V8_INLINE size_t hash_value(TruncationMode mode) {
+ return static_cast<uint8_t>(mode);
+}
+
+std::ostream& operator<<(std::ostream&, TruncationMode);
+
+TruncationMode TruncationModeOf(Operator const*);
+
+
// Supported write barrier modes.
enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
@@ -74,6 +105,8 @@ class MachineOperatorBuilder final : public ZoneObject {
// for operations that are unsupported by some back-ends.
enum Flag {
kNoFlags = 0u,
+ // Note that Float*Max behaves like `(a < b) ? b : a`, not like Math.max().
+ // Note that Float*Min behaves like `(a < b) ? a : b`, not like Math.min().
kFloat32Max = 1u << 0,
kFloat32Min = 1u << 1,
kFloat64Max = 1u << 2,
@@ -83,7 +116,10 @@ class MachineOperatorBuilder final : public ZoneObject {
kFloat64RoundTiesAway = 1u << 6,
kInt32DivIsSafe = 1u << 7,
kUint32DivIsSafe = 1u << 8,
- kWord32ShiftIsSafe = 1u << 9
+ kWord32ShiftIsSafe = 1u << 9,
+ kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
+ kFloat64RoundDown | kFloat64RoundTruncate |
+ kFloat64RoundTiesAway
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -137,6 +173,7 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Int64LessThanOrEqual();
const Operator* Uint64Div();
const Operator* Uint64LessThan();
+ const Operator* Uint64LessThanOrEqual();
const Operator* Uint64Mod();
// These operators change the representation of numbers while preserving the
@@ -155,7 +192,7 @@ class MachineOperatorBuilder final : public ZoneObject {
// These operators truncate numbers, both changing the representation of
// the number and mapping multiple input values onto the same output value.
const Operator* TruncateFloat64ToFloat32();
- const Operator* TruncateFloat64ToInt32(); // JavaScript semantics.
+ const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
// Floating point operators always operate with IEEE 754 round-to-nearest
@@ -186,16 +223,12 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Float64LessThanOrEqual();
// Floating point min/max complying to IEEE 754 (single-precision).
- const Operator* Float32Max();
- const Operator* Float32Min();
- bool HasFloat32Max() { return flags_ & kFloat32Max; }
- bool HasFloat32Min() { return flags_ & kFloat32Min; }
+ const OptionalOperator Float32Max();
+ const OptionalOperator Float32Min();
// Floating point min/max complying to IEEE 754 (double-precision).
- const Operator* Float64Max();
- const Operator* Float64Min();
- bool HasFloat64Max() { return flags_ & kFloat64Max; }
- bool HasFloat64Min() { return flags_ & kFloat64Min; }
+ const OptionalOperator Float64Max();
+ const OptionalOperator Float64Min();
// Floating point abs complying to IEEE 754 (single-precision).
const Operator* Float32Abs();
@@ -204,12 +237,9 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Float64Abs();
// Floating point rounding.
- const Operator* Float64RoundDown();
- const Operator* Float64RoundTruncate();
- const Operator* Float64RoundTiesAway();
- bool HasFloat64RoundDown() { return flags_ & kFloat64RoundDown; }
- bool HasFloat64RoundTruncate() { return flags_ & kFloat64RoundTruncate; }
- bool HasFloat64RoundTiesAway() { return flags_ & kFloat64RoundTiesAway; }
+ const OptionalOperator Float64RoundDown();
+ const OptionalOperator Float64RoundTruncate();
+ const OptionalOperator Float64RoundTiesAway();
// Floating point bit representation.
const Operator* Float64ExtractLowWord32();
@@ -225,6 +255,7 @@ class MachineOperatorBuilder final : public ZoneObject {
// Access to the machine stack.
const Operator* LoadStackPointer();
+ const Operator* LoadFramePointer();
// checked-load heap, index, length
const Operator* CheckedLoad(CheckedLoadRepresentation);
diff --git a/deps/v8/src/compiler/machine-type.h b/deps/v8/src/compiler/machine-type.h
index 02719f270f..f152611a14 100644
--- a/deps/v8/src/compiler/machine-type.h
+++ b/deps/v8/src/compiler/machine-type.h
@@ -18,28 +18,28 @@ namespace compiler {
// Machine-level types and representations.
// TODO(titzer): Use the real type system instead of MachineType.
-enum MachineType {
+enum MachineType : uint16_t {
// Representations.
- kRepBit = 1 << 0,
- kRepWord8 = 1 << 1,
- kRepWord16 = 1 << 2,
- kRepWord32 = 1 << 3,
- kRepWord64 = 1 << 4,
- kRepFloat32 = 1 << 5,
- kRepFloat64 = 1 << 6,
- kRepTagged = 1 << 7,
+ kRepBit = 1u << 0,
+ kRepWord8 = 1u << 1,
+ kRepWord16 = 1u << 2,
+ kRepWord32 = 1u << 3,
+ kRepWord64 = 1u << 4,
+ kRepFloat32 = 1u << 5,
+ kRepFloat64 = 1u << 6,
+ kRepTagged = 1u << 7,
// Types.
- kTypeBool = 1 << 8,
- kTypeInt32 = 1 << 9,
- kTypeUint32 = 1 << 10,
- kTypeInt64 = 1 << 11,
- kTypeUint64 = 1 << 12,
- kTypeNumber = 1 << 13,
- kTypeAny = 1 << 14,
+ kTypeBool = 1u << 8,
+ kTypeInt32 = 1u << 9,
+ kTypeUint32 = 1u << 10,
+ kTypeInt64 = 1u << 11,
+ kTypeUint64 = 1u << 12,
+ kTypeNumber = 1u << 13,
+ kTypeAny = 1u << 14,
// Machine types.
- kMachNone = 0,
+ kMachNone = 0u,
kMachBool = kRepBit | kTypeBool,
kMachFloat32 = kRepFloat32 | kTypeNumber,
kMachFloat64 = kRepFloat64 | kTypeNumber,
@@ -57,6 +57,10 @@ enum MachineType {
kMachAnyTagged = kRepTagged | kTypeAny
};
+V8_INLINE size_t hash_value(MachineType type) {
+ return static_cast<size_t>(type);
+}
+
std::ostream& operator<<(std::ostream& os, const MachineType& type);
typedef uint16_t MachineTypeUnion;
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 0e45172a5b..5a69658cbf 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -399,10 +399,6 @@ void CodeGenerator::AssembleDeconstructActivationRecord() {
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ LeaveFrame(StackFrame::MANUAL);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Drop(pop_count);
}
}
@@ -463,6 +459,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(at);
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
@@ -487,6 +499,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -622,6 +637,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
+ case kMipsMaxS:
+ __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMinS:
+ __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@@ -663,6 +686,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
+ case kMipsMaxD:
+ __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMinD:
+ __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMipsFloat64RoundDown: {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
break;
@@ -876,42 +907,32 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
-
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
// kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
cc = FlagsConditionToConditionOvf(branch->condition);
__ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
-
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
} else if (instr->arch_opcode() == kMipsCmpS) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpS, branch->condition);
}
__ BranchF32(tlabel, NULL, cc, i.InputSingleRegister(0),
i.InputSingleRegister(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
} else if (instr->arch_opcode() == kMipsCmpD) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
}
__ BranchF64(tlabel, NULL, cc, i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
UNIMPLEMENTED();
}
+ if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
}
@@ -1043,23 +1064,28 @@ void CodeGenerator::AssemblePrologue() {
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ Push(ra, fp);
__ mov(fp, sp);
+
const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- // TODO(plind): make callee save size const, possibly DCHECK it.
- int register_save_area_size = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- __ MultiPush(saves);
- }
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ // kNumCalleeSaved includes the fp register, but the fp register
+ // is saved separately in TF.
+ DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
+ int register_save_area_size = kNumCalleeSaved * kPointerSize;
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
+ register_save_area_size += kNumCalleeSavedFPU * kDoubleSize * kPointerSize;
+
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else if (stack_slots > 0) {
+ } else if (needs_frame_) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -1096,22 +1122,36 @@ void CodeGenerator::AssembleReturn() {
if (stack_slots > 0) {
__ Addu(sp, sp, Operand(stack_slots * kPointerSize));
}
- // Restore registers.
+ // Restore FPU registers.
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ __ MultiPopFPU(saves_fpu);
+
+ // Restore GP registers.
const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- __ MultiPop(saves);
- }
+ __ MultiPop(saves);
}
__ mov(sp, fp);
__ Pop(ra, fp);
__ Ret();
- } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ mov(sp, fp);
- __ Pop(ra, fp);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ DropAndRet(pop_count);
+ } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : (info()->IsStub()
+ ? info()->code_stub()->GetStackParameterCount()
+ : 0);
+ if (pop_count != 0) {
+ __ DropAndRet(pop_count);
+ } else {
+ __ Ret();
+ }
+ }
} else {
__ Ret();
}
@@ -1322,7 +1362,6 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
}
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 372b41462f..db8f2511e9 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -42,6 +42,8 @@ namespace compiler {
V(MipsModS) \
V(MipsAbsS) \
V(MipsSqrtS) \
+ V(MipsMaxS) \
+ V(MipsMinS) \
V(MipsCmpD) \
V(MipsAddD) \
V(MipsSubD) \
@@ -50,6 +52,8 @@ namespace compiler {
V(MipsModD) \
V(MipsAbsD) \
V(MipsSqrtD) \
+ V(MipsMaxD) \
+ V(MipsMinD) \
V(MipsFloat64RoundDown) \
V(MipsFloat64RoundTruncate) \
V(MipsFloat64RoundUp) \
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index b4e811f0f8..c2420ac0d8 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -391,6 +391,17 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kMipsTruncWD, node);
+ }
+ UNREACHABLE();
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMipsAddS, node);
}
@@ -512,23 +523,44 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
- // Possibly align stack here for functions.
- int push_count = buffer.pushed_nodes.size();
- if (push_count > 0) {
- Emit(kMipsStackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
- }
- int slot = buffer.pushed_nodes.size() - 1;
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (Node* node : buffer.pushed_nodes) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ // Possibly align stack here for functions.
+ int push_count = buffer.pushed_nodes.size();
+ if (push_count > 0) {
+ Emit(kMipsStackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ int slot = buffer.pushed_nodes.size() - 1;
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ slot--;
+ }
}
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
@@ -536,18 +568,21 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
@@ -565,16 +600,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
// TODO(turbofan): Relax restriction for stack parameters.
- if (descriptor->UsesOnlyRegisters() &&
- descriptor->HasSameReturnLocationsAs(
- linkage()->GetIncomingDescriptor())) {
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, false);
- DCHECK_EQ(0u, buffer.pushed_nodes.size());
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
diff --git a/deps/v8/src/compiler/mips/linkage-mips.cc b/deps/v8/src/compiler/mips/linkage-mips.cc
index 9480b73eae..7b03340a0a 100644
--- a/deps/v8/src/compiler/mips/linkage-mips.cc
+++ b/deps/v8/src/compiler/mips/linkage-mips.cc
@@ -23,11 +23,16 @@ struct MipsLinkageHelperTraits {
return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
s6.bit() | s7.bit();
}
+ static RegList CCalleeSaveFPRegisters() {
+ return f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() |
+ f30.bit();
+ }
static Register CRegisterParameter(int i) {
static Register register_parameters[] = {a0, a1, a2, a3};
return register_parameters[i];
}
static int CRegisterParametersLength() { return 4; }
+ static int CStackBackingStoreLength() { return 0; }
};
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 5fcf95befe..72114215a0 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -310,7 +310,7 @@ FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
__ Daddu(at, i.InputRegister(2), offset); \
__ asm_instr(result, MemOperand(at, 0)); \
} else { \
- auto offset = i.InputOperand(0).immediate(); \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
} \
@@ -328,7 +328,7 @@ FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
__ Daddu(at, i.InputRegister(2), offset); \
__ asm_instr(result, MemOperand(at, 0)); \
} else { \
- auto offset = i.InputOperand(0).immediate(); \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
} \
@@ -346,7 +346,7 @@ FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
__ Daddu(at, i.InputRegister(3), offset); \
__ asm_instr(value, MemOperand(at, 0)); \
} else { \
- auto offset = i.InputOperand(0).immediate(); \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
auto value = i.Input##width##Register(2); \
__ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
@@ -365,7 +365,7 @@ FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
__ Daddu(at, i.InputRegister(3), offset); \
__ asm_instr(value, MemOperand(at, 0)); \
} else { \
- auto offset = i.InputOperand(0).immediate(); \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
auto value = i.InputRegister(2); \
__ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
@@ -399,10 +399,6 @@ void CodeGenerator::AssembleDeconstructActivationRecord() {
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ LeaveFrame(StackFrame::MANUAL);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Drop(pop_count);
}
}
@@ -463,6 +459,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(at);
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
@@ -487,6 +499,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -556,24 +571,27 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
- __ sll(i.OutputRegister(), i.InputRegister(0), imm);
+ int64_t imm = i.InputOperand(1).immediate();
+ __ sll(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
}
break;
case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) {
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
- __ srl(i.OutputRegister(), i.InputRegister(0), imm);
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srl(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
}
break;
case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) {
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
- __ sra(i.OutputRegister(), i.InputRegister(0), imm);
+ int64_t imm = i.InputOperand(1).immediate();
+ __ sra(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
}
break;
case kMips64Ext:
@@ -588,11 +606,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->InputAt(1)->IsRegister()) {
__ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
+ int64_t imm = i.InputOperand(1).immediate();
if (imm < 32) {
- __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
+ __ dsll(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
} else {
- __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ __ dsll32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm - 32));
}
}
break;
@@ -600,11 +620,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->InputAt(1)->IsRegister()) {
__ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
+ int64_t imm = i.InputOperand(1).immediate();
if (imm < 32) {
- __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
+ __ dsrl(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
} else {
- __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ __ dsrl32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm - 32));
}
}
break;
@@ -612,7 +634,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->InputAt(1)->IsRegister()) {
__ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
+ int64_t imm = i.InputOperand(1).immediate();
if (imm < 32) {
__ dsra(i.OutputRegister(), i.InputRegister(0), imm);
} else {
@@ -684,6 +706,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
+ case kMips64MaxS:
+ __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64MinS:
+ __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMips64CmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@@ -725,6 +755,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
+ case kMips64MaxD:
+ __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64MinD:
+ __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMips64Float64RoundDown: {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
break;
@@ -945,37 +983,30 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
cc = FlagsConditionToConditionOvf(branch->condition);
-
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
} else if (instr->arch_opcode() == kMips64CmpS) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpS, branch->condition);
}
__ BranchF32(tlabel, NULL, cc, i.InputSingleRegister(0),
i.InputSingleRegister(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
} else if (instr->arch_opcode() == kMips64CmpD) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
}
__ BranchF64(tlabel, NULL, cc, i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
UNIMPLEMENTED();
}
+ if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
}
@@ -1077,7 +1108,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Label here;
__ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
- __ BlockTrampolinePoolFor(case_count * 2 + 7);
+ __ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
// Ensure that dd-ed labels use 8 byte aligned addresses.
if ((masm()->pc_offset() & 7) != 0) {
__ nop();
@@ -1109,23 +1140,28 @@ void CodeGenerator::AssemblePrologue() {
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ Push(ra, fp);
__ mov(fp, sp);
+
const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- // TODO(plind): make callee save size const, possibly DCHECK it.
- int register_save_area_size = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- __ MultiPush(saves);
- }
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ // kNumCalleeSaved includes the fp register, but the fp register
+ // is saved separately in TF.
+ DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
+ int register_save_area_size = kNumCalleeSaved * kPointerSize;
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
+ register_save_area_size += kNumCalleeSavedFPU * kDoubleSize * kPointerSize;
+
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else if (stack_slots > 0) {
+ } else if (needs_frame_) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -1162,22 +1198,36 @@ void CodeGenerator::AssembleReturn() {
if (stack_slots > 0) {
__ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
}
- // Restore registers.
+ // Restore FPU registers.
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ __ MultiPopFPU(saves_fpu);
+
+ // Restore GP registers.
const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- __ MultiPop(saves);
- }
+ __ MultiPop(saves);
}
__ mov(sp, fp);
__ Pop(ra, fp);
__ Ret();
- } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ mov(sp, fp);
- __ Pop(ra, fp);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ DropAndRet(pop_count);
+ } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : (info()->IsStub()
+ ? info()->code_stub()->GetStackParameterCount()
+ : 0);
+ if (pop_count != 0) {
+ __ DropAndRet(pop_count);
+ } else {
+ __ Ret();
+ }
+ }
} else {
__ Ret();
}
@@ -1388,7 +1438,6 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
}
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 7c09a4291d..ce95ad4e37 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -53,6 +53,8 @@ namespace compiler {
V(Mips64ModS) \
V(Mips64AbsS) \
V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
V(Mips64CmpD) \
V(Mips64AddD) \
V(Mips64SubD) \
@@ -61,6 +63,8 @@ namespace compiler {
V(Mips64ModD) \
V(Mips64AbsD) \
V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 52da27f3ac..e4d8795f1b 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -393,7 +393,7 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
if (m.right().HasValue() && m.right().Value() > 0) {
- int64_t value = m.right().Value();
+ int32_t value = static_cast<int32_t>(m.right().Value());
if (base::bits::IsPowerOfTwo32(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -539,6 +539,17 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kMips64TruncWD, node);
+ }
+ UNREACHABLE();
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMips64AddS, node);
}
@@ -654,30 +665,50 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ frame_state_descriptor = GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())));
}
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
+ InitializeCallBuffer(node, &buffer, true, true);
- int push_count = buffer.pushed_nodes.size();
- if (push_count > 0) {
- Emit(kMips64StackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
- }
- int slot = buffer.pushed_nodes.size() - 1;
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (Node* node : buffer.pushed_nodes) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ const int32_t push_count = static_cast<int32_t>(buffer.pushed_nodes.size());
+ if (push_count > 0) {
+ Emit(kMips64StackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ int32_t slot = push_count - 1;
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ slot--;
+ }
}
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
@@ -685,12 +716,16 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
@@ -714,16 +749,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
// TODO(turbofan): Relax restriction for stack parameters.
- if (descriptor->UsesOnlyRegisters() &&
- descriptor->HasSameReturnLocationsAs(
- linkage()->GetIncomingDescriptor())) {
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, false);
- DCHECK_EQ(0u, buffer.pushed_nodes.size());
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -754,12 +785,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, false);
- int push_count = buffer.pushed_nodes.size();
+ const int32_t push_count = static_cast<int32_t>(buffer.pushed_nodes.size());
if (push_count > 0) {
Emit(kMips64StackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
- int slot = buffer.pushed_nodes.size() - 1;
+ int slot = push_count - 1;
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
g.TempImmediate(slot << kPointerSizeLog2));
@@ -1024,6 +1055,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat32Compare(selector, value, cont);
@@ -1200,6 +1234,12 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
}
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont(kEqual, node);
VisitFloat32Compare(this, node, &cont);
diff --git a/deps/v8/src/compiler/mips64/linkage-mips64.cc b/deps/v8/src/compiler/mips64/linkage-mips64.cc
index 6fed0617d0..acfedb715f 100644
--- a/deps/v8/src/compiler/mips64/linkage-mips64.cc
+++ b/deps/v8/src/compiler/mips64/linkage-mips64.cc
@@ -23,11 +23,16 @@ struct MipsLinkageHelperTraits {
return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
s6.bit() | s7.bit();
}
+ static RegList CCalleeSaveFPRegisters() {
+ return f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() |
+ f30.bit();
+ }
static Register CRegisterParameter(int i) {
static Register register_parameters[] = {a0, a1, a2, a3, a4, a5, a6, a7};
return register_parameters[i];
}
static int CRegisterParametersLength() { return 8; }
+ static int CStackBackingStoreLength() { return 0; }
};
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index d55005f768..b869185e60 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -42,7 +42,7 @@ int FindFirstNonEmptySlot(Instruction* instr) {
return i;
}
-} // namepace
+} // namespace
MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
diff --git a/deps/v8/src/compiler/node-marker.cc b/deps/v8/src/compiler/node-marker.cc
index fb7c1e192a..fdfb22b21a 100644
--- a/deps/v8/src/compiler/node-marker.cc
+++ b/deps/v8/src/compiler/node-marker.cc
@@ -5,7 +5,6 @@
#include "src/compiler/node-marker.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
namespace v8 {
namespace internal {
@@ -17,32 +16,6 @@ NodeMarkerBase::NodeMarkerBase(Graph* graph, uint32_t num_states)
DCHECK_LT(mark_min_, mark_max_); // check for wraparound.
}
-
-Mark NodeMarkerBase::Get(Node* node) {
- Mark mark = node->mark();
- if (mark < mark_min_) {
- mark = mark_min_;
- node->set_mark(mark_min_);
- }
- DCHECK_LT(mark, mark_max_);
- return mark - mark_min_;
-}
-
-
-void NodeMarkerBase::Set(Node* node, Mark mark) {
- DCHECK_LT(mark, mark_max_ - mark_min_);
- DCHECK_LT(node->mark(), mark_max_);
- node->set_mark(mark + mark_min_);
-}
-
-
-void NodeMarkerBase::Reset(Graph* graph) {
- uint32_t const num_states = mark_max_ - mark_min_;
- mark_min_ = graph->mark_max_;
- mark_max_ = graph->mark_max_ += num_states;
- DCHECK_LT(mark_min_, mark_max_); // check for wraparound.
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/node-marker.h b/deps/v8/src/compiler/node-marker.h
index 853ba22288..5ef2063f18 100644
--- a/deps/v8/src/compiler/node-marker.h
+++ b/deps/v8/src/compiler/node-marker.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_NODE_MARKER_H_
#define V8_COMPILER_NODE_MARKER_H_
-#include "src/base/macros.h"
+#include "src/compiler/node.h"
namespace v8 {
namespace internal {
@@ -13,13 +13,6 @@ namespace compiler {
// Forward declarations.
class Graph;
-class Node;
-
-
-// Marks are used during traversal of the graph to distinguish states of nodes.
-// Each node has a mark which is a monotonically increasing integer, and a
-// {NodeMarker} has a range of values that indicate states of a node.
-typedef uint32_t Mark;
// Base class for templatized NodeMarkers.
@@ -27,13 +20,24 @@ class NodeMarkerBase {
public:
NodeMarkerBase(Graph* graph, uint32_t num_states);
- Mark Get(Node* node);
- void Set(Node* node, Mark mark);
- void Reset(Graph* graph);
+ V8_INLINE Mark Get(Node* node) {
+ Mark mark = node->mark();
+ if (mark < mark_min_) {
+ mark = mark_min_;
+ node->set_mark(mark_min_);
+ }
+ DCHECK_LT(mark, mark_max_);
+ return mark - mark_min_;
+ }
+ V8_INLINE void Set(Node* node, Mark mark) {
+ DCHECK_LT(mark, mark_max_ - mark_min_);
+ DCHECK_LT(node->mark(), mark_max_);
+ node->set_mark(mark + mark_min_);
+ }
private:
- Mark mark_min_;
- Mark mark_max_;
+ Mark const mark_min_;
+ Mark const mark_max_;
DISALLOW_COPY_AND_ASSIGN(NodeMarkerBase);
};
@@ -44,14 +48,14 @@ class NodeMarkerBase {
template <typename State>
class NodeMarker : public NodeMarkerBase {
public:
- NodeMarker(Graph* graph, uint32_t num_states)
+ V8_INLINE NodeMarker(Graph* graph, uint32_t num_states)
: NodeMarkerBase(graph, num_states) {}
- State Get(Node* node) {
+ V8_INLINE State Get(Node* node) {
return static_cast<State>(NodeMarkerBase::Get(node));
}
- void Set(Node* node, State state) {
+ V8_INLINE void Set(Node* node, State state) {
NodeMarkerBase::Set(node, static_cast<Mark>(state));
}
};
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index d6e8c6943c..d543425fca 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -152,11 +152,10 @@ typedef FloatMatcher<double, IrOpcode::kNumberConstant> NumberMatcher;
// A pattern matcher for heap object constants.
-template <typename T>
struct HeapObjectMatcher final
- : public ValueMatcher<Unique<T>, IrOpcode::kHeapConstant> {
+ : public ValueMatcher<Unique<HeapObject>, IrOpcode::kHeapConstant> {
explicit HeapObjectMatcher(Node* node)
- : ValueMatcher<Unique<T>, IrOpcode::kHeapConstant>(node) {}
+ : ValueMatcher<Unique<HeapObject>, IrOpcode::kHeapConstant>(node) {}
};
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 9e665d10e9..19ca5dd1b6 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -120,8 +120,9 @@ bool NodeProperties::IsControlEdge(Edge edge) {
// static
bool NodeProperties::IsExceptionalCall(Node* node) {
- for (Node* const use : node->uses()) {
- if (use->opcode() == IrOpcode::kIfException) return true;
+ for (Edge const edge : node->use_edges()) {
+ if (!NodeProperties::IsControlEdge(edge)) continue;
+ if (edge.from()->opcode() == IrOpcode::kIfException) return true;
}
return false;
}
@@ -163,40 +164,31 @@ void NodeProperties::RemoveNonValueInputs(Node* node) {
void NodeProperties::MergeControlToEnd(Graph* graph,
CommonOperatorBuilder* common,
Node* node) {
- // Connect the node to the merge exiting the graph.
- Node* end_pred = NodeProperties::GetControlInput(graph->end());
- if (end_pred->opcode() == IrOpcode::kMerge) {
- int inputs = end_pred->op()->ControlInputCount() + 1;
- end_pred->AppendInput(graph->zone(), node);
- end_pred->set_op(common->Merge(inputs));
- } else {
- Node* merge = graph->NewNode(common->Merge(2), end_pred, node);
- NodeProperties::ReplaceControlInput(graph->end(), merge);
- }
+ graph->end()->AppendInput(graph->zone(), node);
+ graph->end()->set_op(common->End(graph->end()->InputCount()));
}
// static
-void NodeProperties::ReplaceWithValue(Node* node, Node* value, Node* effect,
- Node* control) {
- if (!effect && node->op()->EffectInputCount() > 0) {
- effect = NodeProperties::GetEffectInput(node);
- }
- if (control == nullptr && node->op()->ControlInputCount() > 0) {
- control = NodeProperties::GetControlInput(node);
- }
-
+void NodeProperties::ReplaceUses(Node* node, Node* value, Node* effect,
+ Node* success, Node* exception) {
// Requires distinguishing between value, effect and control edges.
for (Edge edge : node->use_edges()) {
if (IsControlEdge(edge)) {
- DCHECK_EQ(IrOpcode::kIfSuccess, edge.from()->opcode());
- DCHECK_NOT_NULL(control);
- edge.from()->ReplaceUses(control);
- edge.UpdateTo(NULL);
+ if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ DCHECK_NOT_NULL(success);
+ edge.UpdateTo(success);
+ } else if (edge.from()->opcode() == IrOpcode::kIfException) {
+ DCHECK_NOT_NULL(exception);
+ edge.UpdateTo(exception);
+ } else {
+ UNREACHABLE();
+ }
} else if (IsEffectEdge(edge)) {
DCHECK_NOT_NULL(effect);
edge.UpdateTo(effect);
} else {
+ DCHECK_NOT_NULL(value);
edge.UpdateTo(value);
}
}
@@ -223,7 +215,9 @@ void NodeProperties::CollectControlProjections(Node* node, Node** projections,
std::memset(projections, 0, sizeof(*projections) * projection_count);
#endif
size_t if_value_index = 0;
- for (Node* const use : node->uses()) {
+ for (Edge const edge : node->use_edges()) {
+ if (!IsControlEdge(edge)) continue;
+ Node* use = edge.from();
size_t index;
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@@ -235,11 +229,11 @@ void NodeProperties::CollectControlProjections(Node* node, Node** projections,
index = 1;
break;
case IrOpcode::kIfSuccess:
- DCHECK_EQ(IrOpcode::kCall, node->opcode());
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
index = 0;
break;
case IrOpcode::kIfException:
- DCHECK_EQ(IrOpcode::kCall, node->opcode());
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
index = 1;
break;
case IrOpcode::kIfValue:
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 0f25051e4e..6d11f6cfcc 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -88,11 +88,11 @@ class NodeProperties final {
static void MergeControlToEnd(Graph* graph, CommonOperatorBuilder* common,
Node* node);
- // Replace value uses of {node} with {value} and effect uses of {node} with
- // {effect}. If {effect == NULL}, then use the effect input to {node}. All
- // control uses will be relaxed assuming {node} cannot throw.
- static void ReplaceWithValue(Node* node, Node* value, Node* effect = nullptr,
- Node* control = nullptr);
+ // Replace all uses of {node} with the given replacement nodes. All occurring
+ // use kinds need to be replaced, {NULL} is only valid if a use kind is
+ // guaranteed not to exist.
+ static void ReplaceUses(Node* node, Node* value, Node* effect = nullptr,
+ Node* success = nullptr, Node* exception = nullptr);
// ---------------------------------------------------------------------------
// Miscellaneous utilities.
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 724c9f173e..e92dccc739 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -4,37 +4,116 @@
#include "src/compiler/node.h"
-#include <algorithm>
-
namespace v8 {
namespace internal {
namespace compiler {
+Node::OutOfLineInputs* Node::OutOfLineInputs::New(Zone* zone, int capacity) {
+ size_t size =
+ sizeof(OutOfLineInputs) + capacity * (sizeof(Node*) + sizeof(Use));
+ intptr_t raw_buffer = reinterpret_cast<intptr_t>(zone->New(size));
+ Node::OutOfLineInputs* outline =
+ reinterpret_cast<OutOfLineInputs*>(raw_buffer + capacity * sizeof(Use));
+ outline->capacity_ = capacity;
+ outline->count_ = 0;
+ return outline;
+}
+
+
+void Node::OutOfLineInputs::ExtractFrom(Use* old_use_ptr, Node** old_input_ptr,
+ int count) {
+ // Extract the inputs from the old use and input pointers and copy them
+ // to this out-of-line-storage.
+ Use* new_use_ptr = reinterpret_cast<Use*>(this) - 1;
+ Node** new_input_ptr = inputs_;
+ for (int current = 0; current < count; current++) {
+ new_use_ptr->bit_field_ =
+ Use::InputIndexField::encode(current) | Use::InlineField::encode(false);
+ DCHECK_EQ(old_input_ptr, old_use_ptr->input_ptr());
+ DCHECK_EQ(new_input_ptr, new_use_ptr->input_ptr());
+ Node* old_to = *old_input_ptr;
+ if (old_to) {
+ *old_input_ptr = nullptr;
+ old_to->RemoveUse(old_use_ptr);
+ *new_input_ptr = old_to;
+ old_to->AppendUse(new_use_ptr);
+ } else {
+ *new_input_ptr = nullptr;
+ }
+ old_input_ptr++;
+ new_input_ptr++;
+ old_use_ptr--;
+ new_use_ptr--;
+ }
+ this->count_ = count;
+}
+
+
Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
- Node** inputs, bool has_extensible_inputs) {
- size_t node_size = sizeof(Node) - sizeof(Input);
- int reserve_input_count = has_extensible_inputs ? kDefaultReservedInputs : 0;
- size_t inputs_size = std::max<size_t>(
- (input_count + reserve_input_count) * sizeof(Input), sizeof(InputDeque*));
- size_t uses_size = input_count * sizeof(Use);
- int size = static_cast<int>(node_size + inputs_size + uses_size);
- void* buffer = zone->New(size);
- Node* result = new (buffer) Node(id, op, input_count, reserve_input_count);
- Input* input = result->inputs_.static_;
- Use* use =
- reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
+ Node* const* inputs, bool has_extensible_inputs) {
+ Node** input_ptr;
+ Use* use_ptr;
+ Node* node;
+ bool is_inline;
+
+ if (input_count > kMaxInlineCapacity) {
+ // Allocate out-of-line inputs.
+ int capacity =
+ has_extensible_inputs ? input_count + kMaxInlineCapacity : input_count;
+ OutOfLineInputs* outline = OutOfLineInputs::New(zone, capacity);
+
+ // Allocate node.
+ void* node_buffer = zone->New(sizeof(Node));
+ node = new (node_buffer) Node(id, op, kOutlineMarker, 0);
+ node->inputs_.outline_ = outline;
+
+ outline->node_ = node;
+ outline->count_ = input_count;
+
+ input_ptr = outline->inputs_;
+ use_ptr = reinterpret_cast<Use*>(outline);
+ is_inline = false;
+ } else {
+ // Allocate node with inline inputs.
+ int capacity = input_count;
+ if (has_extensible_inputs) {
+ const int max = kMaxInlineCapacity;
+ capacity = std::min(input_count + 3, max);
+ }
+ size_t size = sizeof(Node) + capacity * (sizeof(Node*) + sizeof(Use));
+ intptr_t raw_buffer = reinterpret_cast<intptr_t>(zone->New(size));
+ void* node_buffer =
+ reinterpret_cast<void*>(raw_buffer + capacity * sizeof(Use));
+
+ node = new (node_buffer) Node(id, op, input_count, capacity);
+ input_ptr = node->inputs_.inline_;
+ use_ptr = reinterpret_cast<Use*>(node);
+ is_inline = true;
+ }
+
+ // Initialize the input pointers and the uses.
for (int current = 0; current < input_count; ++current) {
Node* to = *inputs++;
- input->to = to;
- input->use = use;
- use->input_index = current;
- use->from = result;
+ input_ptr[current] = to;
+ Use* use = use_ptr - 1 - current;
+ use->bit_field_ = Use::InputIndexField::encode(current) |
+ Use::InlineField::encode(is_inline);
to->AppendUse(use);
- ++use;
- ++input;
}
- return result;
+ node->Verify();
+ return node;
+}
+
+
+Node* Node::Clone(Zone* zone, NodeId id, const Node* node) {
+ int const input_count = node->InputCount();
+ Node* const* const inputs = node->has_inline_inputs()
+ ? node->inputs_.inline_
+ : node->inputs_.outline_->inputs_;
+ Node* const clone = New(zone, id, node->op(), input_count, inputs, false);
+ clone->set_bounds(node->bounds());
+ return clone;
}
@@ -48,22 +127,47 @@ void Node::Kill() {
void Node::AppendInput(Zone* zone, Node* new_to) {
DCHECK_NOT_NULL(zone);
DCHECK_NOT_NULL(new_to);
- Use* new_use = new (zone) Use;
- Input new_input;
- new_input.to = new_to;
- new_input.use = new_use;
- if (reserved_input_count() > 0) {
- DCHECK(!has_appendable_inputs());
- set_reserved_input_count(reserved_input_count() - 1);
- inputs_.static_[input_count()] = new_input;
+
+ int inline_count = InlineCountField::decode(bit_field_);
+ int inline_capacity = InlineCapacityField::decode(bit_field_);
+ if (inline_count < inline_capacity) {
+ // Append inline input.
+ bit_field_ = InlineCountField::update(bit_field_, inline_count + 1);
+ *GetInputPtr(inline_count) = new_to;
+ Use* use = GetUsePtr(inline_count);
+ use->bit_field_ = Use::InputIndexField::encode(inline_count) |
+ Use::InlineField::encode(true);
+ new_to->AppendUse(use);
} else {
- EnsureAppendableInputs(zone);
- inputs_.appendable_->push_back(new_input);
+ // Append out-of-line input.
+ int input_count = InputCount();
+ OutOfLineInputs* outline = nullptr;
+ if (inline_count != kOutlineMarker) {
+ // switch to out of line inputs.
+ outline = OutOfLineInputs::New(zone, input_count * 2 + 3);
+ outline->node_ = this;
+ outline->ExtractFrom(GetUsePtr(0), GetInputPtr(0), input_count);
+ bit_field_ = InlineCountField::update(bit_field_, kOutlineMarker);
+ inputs_.outline_ = outline;
+ } else {
+ // use current out of line inputs.
+ outline = inputs_.outline_;
+ if (input_count >= outline->capacity_) {
+ // out of space in out-of-line inputs.
+ outline = OutOfLineInputs::New(zone, input_count * 2 + 3);
+ outline->node_ = this;
+ outline->ExtractFrom(GetUsePtr(0), GetInputPtr(0), input_count);
+ inputs_.outline_ = outline;
+ }
+ }
+ outline->count_++;
+ *GetInputPtr(input_count) = new_to;
+ Use* use = GetUsePtr(input_count);
+ use->bit_field_ = Use::InputIndexField::encode(input_count) |
+ Use::InlineField::encode(false);
+ new_to->AppendUse(use);
}
- new_use->input_index = input_count();
- new_use->from = this;
- new_to->AppendUse(new_use);
- set_input_count(input_count() + 1);
+ Verify();
}
@@ -76,6 +180,7 @@ void Node::InsertInput(Zone* zone, int index, Node* new_to) {
ReplaceInput(i, InputAt(i - 1));
}
ReplaceInput(index, new_to);
+ Verify();
}
@@ -86,26 +191,38 @@ void Node::RemoveInput(int index) {
ReplaceInput(index, InputAt(index + 1));
}
TrimInputCount(InputCount() - 1);
+ Verify();
}
-void Node::NullAllInputs() {
- for (Edge edge : input_edges()) edge.UpdateTo(nullptr);
+void Node::ClearInputs(int start, int count) {
+ Node** input_ptr = GetInputPtr(start);
+ Use* use_ptr = GetUsePtr(start);
+ while (count-- > 0) {
+ DCHECK_EQ(input_ptr, use_ptr->input_ptr());
+ Node* input = *input_ptr;
+ *input_ptr = nullptr;
+ if (input) input->RemoveUse(use_ptr);
+ input_ptr++;
+ use_ptr--;
+ }
+ Verify();
}
+void Node::NullAllInputs() { ClearInputs(0, InputCount()); }
+
+
void Node::TrimInputCount(int new_input_count) {
- DCHECK_LE(new_input_count, input_count());
- if (new_input_count == input_count()) return; // Nothing to do.
- for (int index = new_input_count; index < input_count(); ++index) {
- ReplaceInput(index, nullptr);
- }
- if (!has_appendable_inputs()) {
- set_reserved_input_count(std::min<int>(
- ReservedInputCountField::kMax,
- reserved_input_count() + (input_count() - new_input_count)));
+ int current_count = InputCount();
+ DCHECK_LE(new_input_count, current_count);
+ if (new_input_count == current_count) return; // Nothing to do.
+ ClearInputs(new_input_count, current_count - new_input_count);
+ if (has_inline_inputs()) {
+ bit_field_ = InlineCountField::update(bit_field_, new_input_count);
+ } else {
+ inputs_.outline_->count_ = new_input_count;
}
- set_input_count(new_input_count);
}
@@ -125,7 +242,7 @@ void Node::ReplaceUses(Node* that) {
// Update the pointers to {this} to point to {that}.
Use* last_use = nullptr;
for (Use* use = this->first_use_; use; use = use->next) {
- use->from->GetInputRecordPtr(use->input_index)->to = that;
+ *use->input_ptr() = that;
last_use = use;
}
if (last_use) {
@@ -141,9 +258,10 @@ void Node::ReplaceUses(Node* that) {
bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
unsigned mask = 0;
for (Use* use = first_use_; use; use = use->next) {
- if (use->from == owner1) {
+ Node* from = use->from();
+ if (from == owner1) {
mask |= 1;
- } else if (use->from == owner2) {
+ } else if (from == owner2) {
mask |= 2;
} else {
return false;
@@ -153,50 +271,21 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
}
-void Node::Input::Update(Node* new_to) {
- Node* old_to = this->to;
- if (new_to == old_to) return; // Nothing to do.
- // Snip out the use from where it used to be
- if (old_to) {
- old_to->RemoveUse(use);
- }
- to = new_to;
- // And put it into the new node's use list.
- if (new_to) {
- new_to->AppendUse(use);
- } else {
- use->next = nullptr;
- use->prev = nullptr;
- }
-}
-
-
-Node::Node(NodeId id, const Operator* op, int input_count,
- int reserved_input_count)
+Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
: op_(op),
mark_(0),
- id_(id),
- bit_field_(InputCountField::encode(input_count) |
- ReservedInputCountField::encode(reserved_input_count) |
- HasAppendableInputsField::encode(false)),
- first_use_(nullptr) {}
-
-
-void Node::EnsureAppendableInputs(Zone* zone) {
- if (!has_appendable_inputs()) {
- void* deque_buffer = zone->New(sizeof(InputDeque));
- InputDeque* deque = new (deque_buffer) InputDeque(zone);
- for (int i = 0; i < input_count(); ++i) {
- deque->push_back(inputs_.static_[i]);
- }
- inputs_.appendable_ = deque;
- set_has_appendable_inputs(true);
- }
+ bit_field_(IdField::encode(id) | InlineCountField::encode(inline_count) |
+ InlineCapacityField::encode(inline_capacity)),
+ first_use_(nullptr) {
+ // Inputs must either be out of line or within the inline capacity.
+ DCHECK(inline_capacity <= kMaxInlineCapacity);
+ DCHECK(inline_count == kOutlineMarker || inline_count <= inline_capacity);
}
-void Node::AppendUse(Use* const use) {
+void Node::AppendUse(Use* use) {
DCHECK(first_use_ == nullptr || first_use_->prev == nullptr);
+ DCHECK_EQ(this, *use->input_ptr());
use->next = first_use_;
use->prev = nullptr;
if (first_use_) first_use_->prev = use;
@@ -204,7 +293,7 @@ void Node::AppendUse(Use* const use) {
}
-void Node::RemoveUse(Use* const use) {
+void Node::RemoveUse(Use* use) {
DCHECK(first_use_ == nullptr || first_use_->prev == nullptr);
if (use->prev) {
DCHECK_NE(first_use_, use);
@@ -219,6 +308,44 @@ void Node::RemoveUse(Use* const use) {
}
+#if DEBUG
+void Node::Verify() {
+ // Check basic sanity of input data structures.
+ fflush(stdout);
+ int count = this->InputCount();
+ // Avoid quadratic explosion for mega nodes; only verify if the input
+ // count is less than 200 or is a round number of 100s.
+ if (count > 200 && count % 100) return;
+
+ for (int i = 0; i < count; i++) {
+ CHECK_EQ(i, this->GetUsePtr(i)->input_index());
+ CHECK_EQ(this->GetInputPtr(i), this->GetUsePtr(i)->input_ptr());
+ CHECK_EQ(count, this->InputCount());
+ }
+ { // Direct input iteration.
+ int index = 0;
+ for (Node* input : this->inputs()) {
+ CHECK_EQ(this->InputAt(index), input);
+ index++;
+ }
+ CHECK_EQ(count, index);
+ CHECK_EQ(this->InputCount(), index);
+ }
+ { // Input edge iteration.
+ int index = 0;
+ for (Edge edge : this->input_edges()) {
+ CHECK_EQ(edge.from(), this);
+ CHECK_EQ(index, edge.index());
+ CHECK_EQ(this->InputAt(index), edge.to());
+ index++;
+ }
+ CHECK_EQ(count, index);
+ CHECK_EQ(this->InputCount(), index);
+ }
+}
+#endif
+
+
std::ostream& operator<<(std::ostream& os, const Node& n) {
os << n.id() << ": " << *n.op();
if (n.InputCount() > 0) {
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 46dd041f1f..6557635a2e 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -27,7 +27,7 @@ typedef uint32_t Mark;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
-typedef int32_t NodeId;
+typedef uint32_t NodeId;
// A Node is the basic primitive of graphs. Nodes are chained together by
@@ -42,7 +42,8 @@ typedef int32_t NodeId;
class Node final {
public:
static Node* New(Zone* zone, NodeId id, const Operator* op, int input_count,
- Node** inputs, bool has_extensible_inputs);
+ Node* const* inputs, bool has_extensible_inputs);
+ static Node* Clone(Zone* zone, NodeId id, const Node* node);
bool IsDead() const { return InputCount() > 0 && !InputAt(0); }
void Kill();
@@ -55,11 +56,49 @@ class Node final {
return static_cast<IrOpcode::Value>(op_->opcode());
}
- NodeId id() const { return id_; }
+ NodeId id() const { return IdField::decode(bit_field_); }
+
+ int InputCount() const {
+ return has_inline_inputs() ? InlineCountField::decode(bit_field_)
+ : inputs_.outline_->count_;
+ }
+
+#if DEBUG
+ void Verify();
+#define BOUNDS_CHECK(index) \
+ do { \
+ if (index < 0 || index >= InputCount()) { \
+ V8_Fatal(__FILE__, __LINE__, "Node #%d:%s->InputAt(%d) out of bounds", \
+ id(), op()->mnemonic(), index); \
+ } \
+ } while (false)
+#else
+ // No bounds checks or verification in release mode.
+ inline void Verify() {}
+#define BOUNDS_CHECK(index) \
+ do { \
+ } while (false)
+#endif
+
+ Node* InputAt(int index) const {
+ BOUNDS_CHECK(index);
+ return *GetInputPtrConst(index);
+ }
+
+ void ReplaceInput(int index, Node* new_to) {
+ BOUNDS_CHECK(index);
+ Node** input_ptr = GetInputPtr(index);
+ Node* old_to = *input_ptr;
+ if (old_to != new_to) {
+ Use* use = GetUsePtr(index);
+ if (old_to) old_to->RemoveUse(use);
+ *input_ptr = new_to;
+ if (new_to) new_to->AppendUse(use);
+ }
+ }
+
+#undef BOUNDS_CHECK
- int InputCount() const { return input_count(); }
- Node* InputAt(int index) const { return GetInputRecordPtr(index)->to; }
- inline void ReplaceInput(int index, Node* new_to);
void AppendInput(Zone* zone, Node* new_to);
void InsertInput(Zone* zone, int index, Node* new_to);
void RemoveInput(int index);
@@ -143,98 +182,138 @@ class Node final {
// Returns true if {owner} is the user of {this} node.
bool OwnedBy(Node* owner) const {
- return first_use_ && first_use_->from == owner && !first_use_->next;
+ return first_use_ && first_use_->from() == owner && !first_use_->next;
}
// Returns true if {owner1} and {owner2} are the only users of {this} node.
bool OwnedBy(Node const* owner1, Node const* owner2) const;
private:
- struct Use final : public ZoneObject {
- Node* from;
+ struct Use;
+ // Out of line storage for inputs when the number of inputs overflowed the
+ // capacity of the inline-allocated space.
+ struct OutOfLineInputs {
+ Node* node_;
+ int count_;
+ int capacity_;
+ Node* inputs_[1];
+
+ static OutOfLineInputs* New(Zone* zone, int capacity);
+ void ExtractFrom(Use* use_ptr, Node** input_ptr, int count);
+ };
+
+ // A link in the use chain for a node. Every input {i} to a node {n} has an
+ // associated {Use} which is linked into the use chain of the {i} node.
+ struct Use {
Use* next;
Use* prev;
- int input_index;
- };
+ uint32_t bit_field_;
+
+ int input_index() const { return InputIndexField::decode(bit_field_); }
+ int output_index() const { return OutputIndexField::decode(bit_field_); }
+ bool is_inline_use() const { return InlineField::decode(bit_field_); }
+ Node** input_ptr() {
+ int index = input_index();
+ Use* start = this + 1 + index;
+ Node** inputs = is_inline_use()
+ ? reinterpret_cast<Node*>(start)->inputs_.inline_
+ : reinterpret_cast<OutOfLineInputs*>(start)->inputs_;
+ return &inputs[index];
+ }
- class Input final {
- public:
- Node* to;
- Use* use;
+ Node* from() {
+ Use* start = this + 1 + input_index();
+ return is_inline_use() ? reinterpret_cast<Node*>(start)
+ : reinterpret_cast<OutOfLineInputs*>(start)->node_;
+ }
- void Update(Node* new_to);
+ typedef BitField<bool, 0, 1> InlineField;
+ typedef BitField<unsigned, 1, 17> InputIndexField;
+ typedef BitField<unsigned, 17, 14> OutputIndexField;
};
- inline Node(NodeId id, const Operator* op, int input_count,
- int reserve_input_count);
-
- inline void EnsureAppendableInputs(Zone* zone);
-
- Input* GetInputRecordPtr(int index) {
- return has_appendable_inputs() ? &((*inputs_.appendable_)[index])
- : &inputs_.static_[index];
+ //============================================================================
+ //== Memory layout ===========================================================
+ //============================================================================
+ // Saving space for big graphs is important. We use a memory layout trick to
+ // be able to map {Node} objects to {Use} objects and vice-versa in a
+ // space-efficient manner.
+ //
+ // {Use} links are laid out in memory directly before a {Node}, followed by
+ // direct pointers to input {Nodes}.
+ //
+ // inline case:
+ // |Use #N |Use #N-1|...|Use #1 |Use #0 |Node xxxx |I#0|I#1|...|I#N-1|I#N|
+ // ^ ^ ^
+ // + Use + Node + Input
+ //
+ // Since every {Use} instance records its {input_index}, pointer arithmetic
+ // can compute the {Node}.
+ //
+ // out-of-line case:
+ // |Node xxxx |
+ // ^ + outline ------------------+
+ // +----------------------------------------+
+ // | |
+ // v | node
+ // |Use #N |Use #N-1|...|Use #1 |Use #0 |OOL xxxxx |I#0|I#1|...|I#N-1|I#N|
+ // ^ ^
+ // + Use + Input
+ //
+ // Out-of-line storage of input lists is needed if appending an input to
+ // a node exceeds the maximum inline capacity.
+
+ Node(NodeId id, const Operator* op, int inline_count, int inline_capacity);
+
+ Node* const* GetInputPtrConst(int input_index) const {
+ return has_inline_inputs() ? &(inputs_.inline_[input_index])
+ : &inputs_.outline_->inputs_[input_index];
+ }
+ Node** GetInputPtr(int input_index) {
+ return has_inline_inputs() ? &(inputs_.inline_[input_index])
+ : &inputs_.outline_->inputs_[input_index];
}
- const Input* GetInputRecordPtr(int index) const {
- return has_appendable_inputs() ? &((*inputs_.appendable_)[index])
- : &inputs_.static_[index];
+ Use* GetUsePtr(int input_index) {
+ Use* ptr = has_inline_inputs() ? reinterpret_cast<Use*>(this)
+ : reinterpret_cast<Use*>(inputs_.outline_);
+ return &ptr[-1 - input_index];
}
- inline void AppendUse(Use* const use);
- inline void RemoveUse(Use* const use);
+ void AppendUse(Use* use);
+ void RemoveUse(Use* use);
void* operator new(size_t, void* location) { return location; }
- typedef ZoneDeque<Input> InputDeque;
-
// Only NodeProperties should manipulate the bounds.
- Bounds bounds() { return bounds_; }
+ Bounds bounds() const { return bounds_; }
void set_bounds(Bounds b) { bounds_ = b; }
// Only NodeMarkers should manipulate the marks on nodes.
Mark mark() { return mark_; }
void set_mark(Mark mark) { mark_ = mark; }
- int input_count() const { return InputCountField::decode(bit_field_); }
- void set_input_count(int input_count) {
- DCHECK_LE(0, input_count);
- bit_field_ = InputCountField::update(bit_field_, input_count);
+ inline bool has_inline_inputs() const {
+ return InlineCountField::decode(bit_field_) != kOutlineMarker;
}
- int reserved_input_count() const {
- return ReservedInputCountField::decode(bit_field_);
- }
- void set_reserved_input_count(int reserved_input_count) {
- DCHECK_LE(0, reserved_input_count);
- bit_field_ =
- ReservedInputCountField::update(bit_field_, reserved_input_count);
- }
-
- bool has_appendable_inputs() const {
- return HasAppendableInputsField::decode(bit_field_);
- }
- void set_has_appendable_inputs(bool has_appendable_inputs) {
- bit_field_ =
- HasAppendableInputsField::update(bit_field_, has_appendable_inputs);
- }
+ void ClearInputs(int start, int count);
- typedef BitField<unsigned, 0, 29> InputCountField;
- typedef BitField<unsigned, 29, 2> ReservedInputCountField;
- typedef BitField<unsigned, 31, 1> HasAppendableInputsField;
- static const int kDefaultReservedInputs = ReservedInputCountField::kMax;
+ typedef BitField<NodeId, 0, 24> IdField;
+ typedef BitField<unsigned, 24, 4> InlineCountField;
+ typedef BitField<unsigned, 28, 4> InlineCapacityField;
+ static const int kOutlineMarker = InlineCountField::kMax;
+ static const int kMaxInlineCount = InlineCountField::kMax - 1;
+ static const int kMaxInlineCapacity = InlineCapacityField::kMax - 1;
const Operator* op_;
Bounds bounds_;
Mark mark_;
- NodeId const id_;
- unsigned bit_field_;
+ uint32_t bit_field_;
Use* first_use_;
union {
- // When a node is initially allocated, it uses a static buffer to hold its
- // inputs under the assumption that the number of outputs will not increase.
- // When the first input is appended, the static buffer is converted into a
- // deque to allow for space-efficient growing.
- Input static_[1];
- InputDeque* appendable_;
+ // Inline storage for inputs or out-of-line storage.
+ Node* inline_[1];
+ OutOfLineInputs* outline_;
} inputs_;
friend class Edge;
@@ -267,26 +346,38 @@ static inline const T& OpParameter(const Node* node) {
// the node having the input.
class Edge final {
public:
- Node* from() const { return input_->use->from; }
- Node* to() const { return input_->to; }
+ Node* from() const { return use_->from(); }
+ Node* to() const { return *input_ptr_; }
int index() const {
- int const index = input_->use->input_index;
- DCHECK_LT(index, input_->use->from->input_count());
+ int const index = use_->input_index();
+ DCHECK_LT(index, use_->from()->InputCount());
return index;
}
- bool operator==(const Edge& other) { return input_ == other.input_; }
+ bool operator==(const Edge& other) { return input_ptr_ == other.input_ptr_; }
bool operator!=(const Edge& other) { return !(*this == other); }
- void UpdateTo(Node* new_to) { input_->Update(new_to); }
+ void UpdateTo(Node* new_to) {
+ Node* old_to = *input_ptr_;
+ if (old_to != new_to) {
+ if (old_to) old_to->RemoveUse(use_);
+ *input_ptr_ = new_to;
+ if (new_to) new_to->AppendUse(use_);
+ }
+ }
private:
friend class Node::UseEdges::iterator;
friend class Node::InputEdges::iterator;
- explicit Edge(Node::Input* input) : input_(input) { DCHECK_NOT_NULL(input); }
+ Edge(Node::Use* use, Node** input_ptr) : use_(use), input_ptr_(input_ptr) {
+ DCHECK_NOT_NULL(use);
+ DCHECK_NOT_NULL(input_ptr);
+ DCHECK_EQ(input_ptr, use->input_ptr());
+ }
- Node::Input* input_;
+ Node::Use* use_;
+ Node** input_ptr_;
};
@@ -299,16 +390,18 @@ class Node::InputEdges::iterator final {
typedef Edge* pointer;
typedef Edge& reference;
- iterator() : input_(nullptr) {}
- iterator(const iterator& other) : input_(other.input_) {}
+ iterator() : use_(nullptr), input_ptr_(nullptr) {}
+ iterator(const iterator& other)
+ : use_(other.use_), input_ptr_(other.input_ptr_) {}
- Edge operator*() const { return Edge(input_); }
+ Edge operator*() const { return Edge(use_, input_ptr_); }
bool operator==(const iterator& other) const {
- return input_ == other.input_;
+ return input_ptr_ == other.input_ptr_;
}
bool operator!=(const iterator& other) const { return !(*this == other); }
iterator& operator++() {
- SetInput(Edge(input_).from(), input_->use->input_index + 1);
+ input_ptr_++;
+ use_--;
return *this;
}
iterator operator++(int);
@@ -316,20 +409,11 @@ class Node::InputEdges::iterator final {
private:
friend class Node;
- explicit iterator(Node* from, int index = 0) : input_(nullptr) {
- SetInput(from, index);
- }
+ explicit iterator(Node* from, int index = 0)
+ : use_(from->GetUsePtr(index)), input_ptr_(from->GetInputPtr(index)) {}
- void SetInput(Node* from, int index) {
- DCHECK(index >= 0 && index <= from->InputCount());
- if (index < from->InputCount()) {
- input_ = from->GetInputRecordPtr(index);
- } else {
- input_ = nullptr;
- }
- }
-
- Input* input_;
+ Use* use_;
+ Node** input_ptr_;
};
@@ -386,18 +470,13 @@ Node::Inputs::const_iterator Node::Inputs::end() const {
}
-// A forward iterator to visit the uses edges of a node. The edges are returned
-// in
-// the order in which they were added as inputs.
+// A forward iterator to visit the uses edges of a node.
class Node::UseEdges::iterator final {
public:
iterator(const iterator& other)
: current_(other.current_), next_(other.next_) {}
- Edge operator*() const {
- return Edge(current_->from->GetInputRecordPtr(current_->input_index));
- }
-
+ Edge operator*() const { return Edge(current_, current_->input_ptr()); }
bool operator==(const iterator& other) const {
return current_ == other.current_;
}
@@ -433,8 +512,7 @@ Node::UseEdges::iterator Node::UseEdges::end() const {
}
-// A forward iterator to visit the uses of a node. The uses are returned in
-// the order in which they were added as inputs.
+// A forward iterator to visit the uses of a node.
class Node::Uses::const_iterator final {
public:
typedef std::forward_iterator_tag iterator_category;
@@ -445,7 +523,7 @@ class Node::Uses::const_iterator final {
const_iterator(const const_iterator& other) : current_(other.current_) {}
- Node* operator*() const { return current_->from; }
+ Node* operator*() const { return current_->from(); }
bool operator==(const const_iterator& other) const {
return other.current_ == current_;
}
@@ -476,11 +554,6 @@ Node::Uses::const_iterator Node::Uses::begin() const {
Node::Uses::const_iterator Node::Uses::end() const { return const_iterator(); }
-
-void Node::ReplaceInput(int index, Node* new_to) {
- GetInputRecordPtr(index)->Update(new_to);
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/opcodes.cc b/deps/v8/src/compiler/opcodes.cc
index 1c94c19c78..2a8e01a26d 100644
--- a/deps/v8/src/compiler/opcodes.cc
+++ b/deps/v8/src/compiler/opcodes.cc
@@ -5,6 +5,7 @@
#include "src/compiler/opcodes.h"
#include <algorithm>
+#include <ostream>
#include "src/base/macros.h"
@@ -29,6 +30,11 @@ char const* IrOpcode::Mnemonic(Value value) {
return kMnemonics[n];
}
+
+std::ostream& operator<<(std::ostream& os, IrOpcode::Value opcode) {
+ return os << IrOpcode::Mnemonic(opcode);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 270b73f294..dcf71eb43d 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -5,10 +5,11 @@
#ifndef V8_COMPILER_OPCODES_H_
#define V8_COMPILER_OPCODES_H_
+#include <iosfwd>
+
// Opcodes for control operators.
#define CONTROL_OP_LIST(V) \
V(Start) \
- V(Dead) \
V(Loop) \
V(Branch) \
V(Switch) \
@@ -55,7 +56,8 @@
#define COMMON_OP_LIST(V) \
CONSTANT_OP_LIST(V) \
- INNER_OP_LIST(V)
+ INNER_OP_LIST(V) \
+ V(Dead)
// Opcodes for JavaScript operators.
#define JS_COMPARE_BINOP_LIST(V) \
@@ -112,8 +114,10 @@
V(JSCreateLiteralObject) \
V(JSLoadProperty) \
V(JSLoadNamed) \
+ V(JSLoadGlobal) \
V(JSStoreProperty) \
V(JSStoreNamed) \
+ V(JSStoreGlobal) \
V(JSDeleteProperty) \
V(JSHasProperty) \
V(JSInstanceOf)
@@ -121,6 +125,8 @@
#define JS_CONTEXT_OP_LIST(V) \
V(JSLoadContext) \
V(JSStoreContext) \
+ V(JSLoadDynamicGlobal) \
+ V(JSLoadDynamicContext) \
V(JSCreateFunctionContext) \
V(JSCreateCatchContext) \
V(JSCreateWithContext) \
@@ -132,6 +138,10 @@
V(JSCallConstruct) \
V(JSCallFunction) \
V(JSCallRuntime) \
+ V(JSForInDone) \
+ V(JSForInNext) \
+ V(JSForInPrepare) \
+ V(JSForInStep) \
V(JSYield) \
V(JSStackCheck)
@@ -161,10 +171,12 @@
V(NumberMultiply) \
V(NumberDivide) \
V(NumberModulus) \
+ V(NumberShiftLeft) \
+ V(NumberShiftRight) \
+ V(NumberShiftRightLogical) \
V(NumberToInt32) \
V(NumberToUint32) \
V(PlainPrimitiveToNumber) \
- V(StringAdd) \
V(ChangeTaggedToInt32) \
V(ChangeTaggedToUint32) \
V(ChangeTaggedToFloat64) \
@@ -194,6 +206,7 @@
V(Int64LessThan) \
V(Int64LessThanOrEqual) \
V(Uint64LessThan) \
+ V(Uint64LessThanOrEqual) \
V(Float32Equal) \
V(Float32LessThan) \
V(Float32LessThanOrEqual) \
@@ -273,6 +286,7 @@
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(LoadStackPointer) \
+ V(LoadFramePointer) \
V(CheckedLoad) \
V(CheckedStore)
@@ -310,7 +324,7 @@ class IrOpcode {
// Returns true if opcode for common operator.
static bool IsCommonOpcode(Value value) {
- return kStart <= value && value <= kProjection;
+ return kStart <= value && value <= kDead;
}
// Returns true if opcode for control operator.
@@ -348,6 +362,8 @@ class IrOpcode {
}
};
+std::ostream& operator<<(std::ostream&, IrOpcode::Value);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index a36caf5513..6de6d2487c 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -26,7 +26,7 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
return 1;
case IrOpcode::kJSCallRuntime: {
const CallRuntimeParameters& p = CallRuntimeParametersOf(op);
- return Linkage::NeedsFrameState(p.id());
+ return Linkage::FrameStateInputCount(p.id());
}
// Strict equality cannot lazily deoptimize.
@@ -40,19 +40,16 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
// Compare operations
case IrOpcode::kJSEqual:
- case IrOpcode::kJSGreaterThan:
- case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSNotEqual:
case IrOpcode::kJSHasProperty:
case IrOpcode::kJSInstanceOf:
- case IrOpcode::kJSLessThan:
- case IrOpcode::kJSLessThanOrEqual:
- case IrOpcode::kJSNotEqual:
// Object operations
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
// Context operations
+ case IrOpcode::kJSLoadDynamicContext:
case IrOpcode::kJSCreateScriptContext:
case IrOpcode::kJSCreateWithContext:
@@ -62,19 +59,21 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSToName:
// Misc operations
+ case IrOpcode::kJSForInNext:
+ case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
+ case IrOpcode::kJSDeleteProperty:
+ return 1;
- // Properties
+ // We record the frame state immediately before and immediately after
+ // every property or global variable access.
case IrOpcode::kJSLoadNamed:
case IrOpcode::kJSStoreNamed:
case IrOpcode::kJSLoadProperty:
- case IrOpcode::kJSDeleteProperty:
- return 1;
-
- // StoreProperty provides a second frame state just before
- // the operation. This is used to lazy-deoptimize a to-number
- // conversion for typed arrays.
case IrOpcode::kJSStoreProperty:
+ case IrOpcode::kJSLoadGlobal:
+ case IrOpcode::kJSStoreGlobal:
+ case IrOpcode::kJSLoadDynamicGlobal:
return 2;
// Binary operators that can deopt in the middle the operation (e.g.,
@@ -93,6 +92,15 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSSubtract:
return 2;
+ // Compare operators that can deopt in the middle the operation (e.g.,
+ // as a result of lazy deopt in ToNumber conversion) need a second frame
+ // state so that we can resume before the operation.
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ return 2;
+
default:
return 0;
}
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index d8ccc79c9f..6c4c49de79 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -5,9 +5,12 @@
#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/control-reducer.h"
+#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/dead-code-elimination.h"
#include "src/compiler/frame.h"
#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/loop-analysis.h"
@@ -44,7 +47,7 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
Zone* tmp_zone, Node* dead,
LoopTree* loop_tree, LoopTree::Loop* osr_loop,
Node* osr_normal_entry, Node* osr_loop_entry) {
- const int original_count = graph->NodeCount();
+ const size_t original_count = graph->NodeCount();
AllNodes all(tmp_zone, graph);
NodeVector tmp_inputs(tmp_zone);
Node* sentinel = graph->NewNode(dead->op());
@@ -228,20 +231,15 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
}
// Merge the ends of the graph copies.
- Node* end = graph->end();
- tmp_inputs.clear();
- for (int i = -1; i < static_cast<int>(copies.size()); i++) {
- Node* input = end->InputAt(0);
- if (i >= 0) input = copies[i]->at(input->id());
- if (input->opcode() == IrOpcode::kMerge) {
- for (Node* node : input->inputs()) tmp_inputs.push_back(node);
- } else {
- tmp_inputs.push_back(input);
+ Node* const end = graph->end();
+ int const input_count = end->InputCount();
+ for (int i = 0; i < input_count; ++i) {
+ NodeId const id = end->InputAt(i)->id();
+ for (NodeVector* const copy : copies) {
+ end->AppendInput(graph->zone(), copy->at(id));
+ end->set_op(common->End(end->InputCount()));
}
}
- int count = static_cast<int>(tmp_inputs.size());
- Node* merge = graph->NewNode(common->Merge(count), count, &tmp_inputs[0]);
- end->ReplaceInput(0, merge);
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
@@ -251,40 +249,6 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
}
-static void TransferOsrValueTypesFromLoopPhis(Zone* zone, Node* osr_loop_entry,
- Node* osr_loop) {
- // Find the index of the osr loop entry into the loop.
- int index = 0;
- for (index = 0; index < osr_loop->InputCount(); index++) {
- if (osr_loop->InputAt(index) == osr_loop_entry) break;
- }
- if (index == osr_loop->InputCount()) return;
-
- for (Node* osr_value : osr_loop_entry->uses()) {
- if (osr_value->opcode() != IrOpcode::kOsrValue) continue;
- bool unknown = true;
- for (Node* phi : osr_value->uses()) {
- if (phi->opcode() != IrOpcode::kPhi) continue;
- if (NodeProperties::GetControlInput(phi) != osr_loop) continue;
- if (phi->InputAt(index) != osr_value) continue;
- if (NodeProperties::IsTyped(phi)) {
- // Transfer the type from the phi to the OSR value itself.
- Bounds phi_bounds = NodeProperties::GetBounds(phi);
- if (unknown) {
- NodeProperties::SetBounds(osr_value, phi_bounds);
- } else {
- Bounds osr_bounds = NodeProperties::GetBounds(osr_value);
- NodeProperties::SetBounds(osr_value,
- Bounds::Both(phi_bounds, osr_bounds, zone));
- }
- unknown = false;
- }
- }
- if (unknown) NodeProperties::SetBounds(osr_value, Bounds::Unbounded(zone));
- }
-}
-
-
void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
Zone* tmp_zone) {
Graph* graph = jsgraph->graph();
@@ -315,13 +279,10 @@ void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
CHECK(osr_loop); // Should have found the OSR loop.
- // Transfer the types from loop phis to the OSR values which flow into them.
- TransferOsrValueTypesFromLoopPhis(graph->zone(), osr_loop_entry, osr_loop);
-
// Analyze the graph to determine how deeply nested the OSR loop is.
LoopTree* loop_tree = LoopFinder::BuildLoopTree(graph, tmp_zone);
- Node* dead = jsgraph->DeadControl();
+ Node* dead = jsgraph->Dead();
LoopTree::Loop* loop = loop_tree->ContainingLoop(osr_loop);
if (loop->depth() > 0) {
PeelOuterLoopsForOsr(graph, common, tmp_zone, dead, loop_tree, loop,
@@ -335,16 +296,31 @@ void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
osr_loop_entry->ReplaceUses(graph->start());
osr_loop_entry->Kill();
- // Normally the control reducer removes loops whose first input is dead,
- // but we need to avoid that because the osr_loop is reachable through
- // the second input, so reduce it and its phis manually.
- osr_loop->ReplaceInput(0, dead);
- Node* node = ControlReducer::ReduceMerge(jsgraph, osr_loop);
- if (node != osr_loop) osr_loop->ReplaceUses(node);
-
- // Run the normal control reduction, which naturally trims away the dead
- // parts of the graph.
- ControlReducer::ReduceGraph(tmp_zone, jsgraph);
+ // Remove the first input to the {osr_loop}.
+ int const live_input_count = osr_loop->InputCount() - 1;
+ CHECK_NE(0, live_input_count);
+ for (Node* const use : osr_loop->uses()) {
+ if (NodeProperties::IsPhi(use)) {
+ use->set_op(common->ResizeMergeOrPhi(use->op(), live_input_count));
+ use->RemoveInput(0);
+ }
+ }
+ osr_loop->set_op(common->ResizeMergeOrPhi(osr_loop->op(), live_input_count));
+ osr_loop->RemoveInput(0);
+
+ // Run control reduction and graph trimming.
+ // TODO(bmeurer): The OSR deconstruction could be a regular reducer and play
+ // nice together with the rest, instead of having this custom stuff here.
+ GraphReducer graph_reducer(tmp_zone, graph);
+ DeadCodeElimination dce(&graph_reducer, graph, common);
+ CommonOperatorReducer cor(&graph_reducer, graph, common, jsgraph->machine());
+ graph_reducer.AddReducer(&dce);
+ graph_reducer.AddReducer(&cor);
+ graph_reducer.ReduceGraph();
+ GraphTrimmer trimmer(tmp_zone, graph);
+ NodeVector roots(tmp_zone);
+ jsgraph->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
}
@@ -356,7 +332,6 @@ void OsrHelper::SetupFrame(Frame* frame) {
frame->SetOsrStackSlotCount(static_cast<int>(UnoptimizedFrameSlots()));
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 197f10058f..55455690dd 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -16,14 +16,17 @@
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
-#include "src/compiler/control-reducer.h"
+#include "src/compiler/dead-code-elimination.h"
#include "src/compiler/frame-elider.h"
#include "src/compiler/graph-replay.h"
+#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/greedy-allocator.h"
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-frame-specialization.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-inlining.h"
#include "src/compiler/js-intrinsic-lowering.h"
@@ -78,7 +81,6 @@ class PipelineData {
javascript_(nullptr),
jsgraph_(nullptr),
js_type_feedback_(nullptr),
- typer_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
@@ -97,7 +99,6 @@ class PipelineData {
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, machine_);
- typer_.Reset(new Typer(isolate_, graph_, info_->context()));
}
// For machine graph testing entry point.
@@ -120,7 +121,6 @@ class PipelineData {
javascript_(nullptr),
jsgraph_(nullptr),
js_type_feedback_(nullptr),
- typer_(nullptr),
schedule_(schedule),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
@@ -149,7 +149,6 @@ class PipelineData {
javascript_(nullptr),
jsgraph_(nullptr),
js_type_feedback_(nullptr),
- typer_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(sequence->zone()),
@@ -193,7 +192,6 @@ class PipelineData {
void set_js_type_feedback(JSTypeFeedbackTable* js_type_feedback) {
js_type_feedback_ = js_type_feedback;
}
- Typer* typer() const { return typer_.get(); }
LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) {
@@ -219,7 +217,6 @@ class PipelineData {
void DeleteGraphZone() {
// Destroy objects with destructors first.
source_positions_.Reset(nullptr);
- typer_.Reset(nullptr);
if (graph_zone_ == nullptr) return;
// Destroy zone and clear pointers.
graph_zone_scope_.Destroy();
@@ -290,8 +287,6 @@ class PipelineData {
JSOperatorBuilder* javascript_;
JSGraph* jsgraph_;
JSTypeFeedbackTable* js_type_feedback_;
- // TODO(dcarney): make this into a ZoneObject.
- SmartPointer<Typer> typer_;
Schedule* schedule_;
// All objects in the following group of fields are allocated in
@@ -371,9 +366,9 @@ class AstGraphBuilderWithPositions final : public AstGraphBuilder {
source_positions_(source_positions),
start_position_(info->shared_info()->start_position()) {}
- bool CreateGraph(bool constant_context, bool stack_check) {
+ bool CreateGraph(bool stack_check) {
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
- return AstGraphBuilder::CreateGraph(constant_context, stack_check);
+ return AstGraphBuilder::CreateGraph(stack_check);
}
#define DEF_VISIT(type) \
@@ -411,6 +406,14 @@ class SourcePositionWrapper final : public Reducer {
};
+class JSGraphReducer final : public GraphReducer {
+ public:
+ JSGraphReducer(JSGraph* jsgraph, Zone* zone)
+ : GraphReducer(zone, jsgraph->graph(), jsgraph->Dead()) {}
+ ~JSGraphReducer() final {}
+};
+
+
void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
Reducer* reducer) {
if (data->info()->is_source_positions_enabled()) {
@@ -472,43 +475,43 @@ struct LoopAssignmentAnalysisPhase {
struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
- void Run(PipelineData* data, Zone* temp_zone, bool constant_context) {
+ void Run(PipelineData* data, Zone* temp_zone) {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
data->js_type_feedback(), data->source_positions());
bool stack_check = !data->info()->IsStub();
- if (!graph_builder.CreateGraph(constant_context, stack_check)) {
+ if (!graph_builder.CreateGraph(stack_check)) {
data->set_compilation_failed();
}
}
};
-struct ContextSpecializerPhase {
- static const char* phase_name() { return "context specializing"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- JSContextSpecializer spec(data->jsgraph());
- GraphReducer graph_reducer(data->graph(), temp_zone);
- AddReducer(data, &graph_reducer, &spec);
- graph_reducer.ReduceGraph();
- }
-};
-
-
struct InliningPhase {
static const char* phase_name() { return "inlining"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- JSInliner inliner(data->info()->is_inlining_enabled()
- ? JSInliner::kGeneralInlining
- : JSInliner::kBuiltinsInlining,
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ JSContextSpecialization context_specialization(
+ &graph_reducer, data->jsgraph(), data->info()->context());
+ JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
+ data->jsgraph());
+ JSInliner inliner(&graph_reducer, data->info()->is_inlining_enabled()
+ ? JSInliner::kGeneralInlining
+ : JSInliner::kRestrictedInlining,
temp_zone, data->info(), data->jsgraph());
- GraphReducer graph_reducer(data->graph(), temp_zone);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ if (data->info()->is_frame_specializing()) {
+ AddReducer(data, &graph_reducer, &frame_specialization);
+ }
+ if (data->info()->is_context_specializing()) {
+ AddReducer(data, &graph_reducer, &context_specialization);
+ }
AddReducer(data, &graph_reducer, &inliner);
graph_reducer.ReduceGraph();
}
@@ -518,7 +521,11 @@ struct InliningPhase {
struct TyperPhase {
static const char* phase_name() { return "typer"; }
- void Run(PipelineData* data, Zone* temp_zone) { data->typer()->Run(); }
+ void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ typer->Run(roots);
+ }
};
@@ -526,8 +533,6 @@ struct OsrDeconstructionPhase {
static const char* phase_name() { return "OSR deconstruction"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
OsrHelper osr_helper(data->info());
osr_helper.Deconstruct(data->jsgraph(), data->common(), temp_zone);
}
@@ -538,13 +543,11 @@ struct JSTypeFeedbackPhase {
static const char* phase_name() { return "type feedback specializing"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
Handle<Context> native_context(data->info()->context()->native_context());
TypeFeedbackOracle oracle(data->isolate(), temp_zone,
data->info()->unoptimized_code(),
data->info()->feedback_vector(), native_context);
- GraphReducer graph_reducer(data->graph(), temp_zone);
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
Handle<GlobalObject> global_object = Handle<GlobalObject>::null();
if (data->info()->has_global_object()) {
global_object =
@@ -553,7 +556,10 @@ struct JSTypeFeedbackPhase {
// TODO(titzer): introduce a specialization mode/flags enum to control
// specializing to the global object here.
JSTypeFeedbackSpecializer specializer(
- data->jsgraph(), data->js_type_feedback(), &oracle, global_object,
+ &graph_reducer, data->jsgraph(), data->js_type_feedback(), &oracle,
+ global_object, data->info()->is_deoptimization_enabled()
+ ? JSTypeFeedbackSpecializer::kDeoptimizationEnabled
+ : JSTypeFeedbackSpecializer::kDeoptimizationDisabled,
data->info()->dependencies());
AddReducer(data, &graph_reducer, &specializer);
graph_reducer.ReduceGraph();
@@ -565,20 +571,24 @@ struct TypedLoweringPhase {
static const char* phase_name() { return "typed lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- LoadElimination load_elimination;
- JSBuiltinReducer builtin_reducer(data->jsgraph());
- JSTypedLowering typed_lowering(data->jsgraph(), temp_zone);
- JSIntrinsicLowering intrinsic_lowering(data->jsgraph());
- SimplifiedOperatorReducer simple_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer(data->jsgraph());
- GraphReducer graph_reducer(data->graph(), temp_zone);
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ LoadElimination load_elimination(&graph_reducer);
+ JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
+ JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(), temp_zone);
+ JSIntrinsicLowering intrinsic_lowering(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSIntrinsicLowering::kDeoptimizationEnabled
+ : JSIntrinsicLowering::kDeoptimizationDisabled);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &builtin_reducer);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &load_elimination);
- AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
}
@@ -589,18 +599,20 @@ struct SimplifiedLoweringPhase {
static const char* phase_name() { return "simplified lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
SimplifiedLowering lowering(data->jsgraph(), temp_zone,
data->source_positions());
lowering.LowerAllNodes();
- ValueNumberingReducer vn_reducer(temp_zone);
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+ ValueNumberingReducer value_numbering(temp_zone);
MachineOperatorReducer machine_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer(data->jsgraph());
- GraphReducer graph_reducer(data->graph(), temp_zone);
- AddReducer(data, &graph_reducer, &vn_reducer);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
@@ -612,9 +624,8 @@ struct ControlFlowOptimizationPhase {
static const char* phase_name() { return "control flow optimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- ControlFlowOptimizer optimizer(data->jsgraph(), temp_zone);
+ ControlFlowOptimizer optimizer(data->graph(), data->common(),
+ data->machine(), temp_zone);
optimizer.Optimize();
}
};
@@ -624,16 +635,18 @@ struct ChangeLoweringPhase {
static const char* phase_name() { return "change lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- ValueNumberingReducer vn_reducer(temp_zone);
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+ ValueNumberingReducer value_numbering(temp_zone);
ChangeLowering lowering(data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer(data->jsgraph());
- GraphReducer graph_reducer(data->graph(), temp_zone);
- AddReducer(data, &graph_reducer, &vn_reducer);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &lowering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -642,22 +655,24 @@ struct ChangeLoweringPhase {
};
-struct EarlyControlReductionPhase {
- static const char* phase_name() { return "early control reduction"; }
+struct EarlyGraphTrimmingPhase {
+ static const char* phase_name() { return "early graph trimming"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- ControlReducer::ReduceGraph(temp_zone, data->jsgraph(), 0);
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
}
};
-struct LateControlReductionPhase {
- static const char* phase_name() { return "late control reduction"; }
+struct LateGraphTrimmingPhase {
+ static const char* phase_name() { return "late graph trimming"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- ControlReducer::ReduceGraph(temp_zone, data->jsgraph(), 0);
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
}
};
@@ -666,8 +681,6 @@ struct StressLoopPeelingPhase {
static const char* phase_name() { return "stress loop peeling"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
// Peel the first outer loop for testing.
// TODO(titzer): peel all loops? the N'th loop? Innermost loops?
LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
@@ -683,17 +696,21 @@ struct GenericLoweringPhase {
static const char* phase_name() { return "generic lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- JSGenericLowering generic(data->info()->is_typing_enabled(),
- data->jsgraph());
- SelectLowering select(data->jsgraph()->graph(), data->jsgraph()->common());
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ JSGenericLowering generic_lowering(data->info()->is_typing_enabled(),
+ data->jsgraph());
+ SelectLowering select_lowering(data->jsgraph()->graph(),
+ data->jsgraph()->common());
TailCallOptimization tco(data->common(), data->graph());
- GraphReducer graph_reducer(data->graph(), temp_zone);
- AddReducer(data, &graph_reducer, &generic);
- AddReducer(data, &graph_reducer, &select);
- // TODO(turbofan): TCO is currently limited to stubs.
- if (data->info()->IsStub()) AddReducer(data, &graph_reducer, &tco);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &generic_lowering);
+ AddReducer(data, &graph_reducer, &select_lowering);
+ AddReducer(data, &graph_reducer, &tco);
graph_reducer.ReduceGraph();
}
};
@@ -949,19 +966,12 @@ Handle<Code> Pipeline::GenerateCode() {
// TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
// the correct solution is to restore the context register after invoking
// builtins from full-codegen.
- Handle<SharedFunctionInfo> shared = info()->shared_info();
for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
Object* builtin = isolate()->js_builtins_object()->javascript_builtin(id);
if (*info()->closure() == builtin) return Handle<Code>::null();
}
- // TODO(dslomov): support turbo optimization of subclass constructors.
- if (IsSubclassConstructor(shared->kind())) {
- shared->DisableOptimization(kSuperReference);
- return Handle<Code>::null();
- }
-
ZonePool zone_pool;
SmartPointer<PipelineStatistics> pipeline_statistics;
@@ -1021,23 +1031,23 @@ Handle<Code> Pipeline::GenerateCode() {
Run<LoopAssignmentAnalysisPhase>();
}
- Run<GraphBuilderPhase>(info()->is_context_specializing());
+ Run<GraphBuilderPhase>();
if (data.compilation_failed()) return Handle<Code>::null();
RunPrintAndVerify("Initial untyped", true);
- Run<EarlyControlReductionPhase>();
- RunPrintAndVerify("Early Control reduced", true);
-
- if (info()->is_context_specializing()) {
- // Specialize the code to the context as aggressively as possible.
- Run<ContextSpecializerPhase>();
- RunPrintAndVerify("Context specialized", true);
+ // Perform OSR deconstruction.
+ if (info()->is_osr()) {
+ Run<OsrDeconstructionPhase>();
+ RunPrintAndVerify("OSR deconstruction", true);
}
- if (info()->is_builtin_inlining_enabled() || info()->is_inlining_enabled()) {
- Run<InliningPhase>();
- RunPrintAndVerify("Inlined", true);
- }
+ // Perform context specialization and inlining (if enabled).
+ Run<InliningPhase>();
+ RunPrintAndVerify("Inlined", true);
+
+ // Remove dead->live edges from the graph.
+ Run<EarlyGraphTrimmingPhase>();
+ RunPrintAndVerify("Early trimmed", true);
if (FLAG_print_turbo_replay) {
// Print a replay of the initial graph.
@@ -1047,9 +1057,11 @@ Handle<Code> Pipeline::GenerateCode() {
// Bailout here in case target architecture is not supported.
if (!SupportedTarget()) return Handle<Code>::null();
+ SmartPointer<Typer> typer;
if (info()->is_typing_enabled()) {
// Type the graph.
- Run<TyperPhase>();
+ typer.Reset(new Typer(isolate(), data.graph(), info()->function_type()));
+ Run<TyperPhase>(typer.get());
RunPrintAndVerify("Typed");
}
@@ -1062,17 +1074,10 @@ Handle<Code> Pipeline::GenerateCode() {
if (FLAG_turbo_stress_loop_peeling) {
Run<StressLoopPeelingPhase>();
- RunPrintAndVerify("Loop peeled", true);
+ RunPrintAndVerify("Loop peeled");
}
- if (info()->is_osr()) {
- Run<OsrDeconstructionPhase>();
- RunPrintAndVerify("OSR deconstruction");
- }
-
- // TODO(turbofan): Type feedback currently requires deoptimization.
- if (info()->is_deoptimization_enabled() &&
- info()->is_type_feedback_enabled()) {
+ if (info()->is_type_feedback_enabled()) {
Run<JSTypeFeedbackPhase>();
RunPrintAndVerify("JSType feedback");
}
@@ -1091,15 +1096,6 @@ Handle<Code> Pipeline::GenerateCode() {
Run<ChangeLoweringPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Lowered changes", true);
-
- Run<LateControlReductionPhase>();
- RunPrintAndVerify("Late Control reduced");
- } else {
- if (info()->is_osr()) {
- Run<OsrDeconstructionPhase>();
- if (info()->bailout_reason() != kNoReason) return Handle<Code>::null();
- RunPrintAndVerify("OSR deconstruction");
- }
}
// Lower any remaining generic JSOperators.
@@ -1107,10 +1103,17 @@ Handle<Code> Pipeline::GenerateCode() {
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Lowered generic", true);
+ Run<LateGraphTrimmingPhase>();
+ // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+ RunPrintAndVerify("Late trimmed", true);
+
BeginPhaseKind("block building");
data.source_positions()->RemoveDecorator();
+ // Kill the Typer and thereby uninstall the decorator (if any).
+ typer.Reset(nullptr);
+
return ScheduleAndGenerateCode(
Linkage::ComputeIncoming(data.instruction_zone(), info()));
}
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index bdebd304c0..cdc1424cc5 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -585,10 +585,7 @@ void CodeGenerator::AssembleDeconstructActivationRecord() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+ __ LeaveFrame(StackFrame::MANUAL);
}
}
@@ -620,6 +617,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
} else {
+ // We cannot use the constant pool to load the target since
+ // we've already restored the caller's frame.
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
}
@@ -657,6 +657,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -687,6 +703,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mr(i.OutputRegister(), sp);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchFramePointer:
+ __ mr(i.OutputRegister(), fp);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -958,6 +978,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Push(i.InputRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kPPC_PushFrame: {
+ int num_slots = i.InputInt32(1);
+ __ StorePU(i.InputRegister(0), MemOperand(sp, -num_slots * kPointerSize));
+ break;
+ }
+ case kPPC_StoreToStackSlot: {
+ int slot = i.InputInt32(1);
+ __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ break;
+ }
case kPPC_ExtendSignWord8:
__ extsb(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -1269,8 +1299,16 @@ void CodeGenerator::AssemblePrologue() {
int register_save_area_size = 0;
RegList frame_saves = fp.bit();
__ mflr(r0);
- __ Push(r0, fp);
- __ mr(fp, sp);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ Push(r0, fp, kConstantPoolRegister);
+ // Adjust FP to point to saved FP.
+ __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ register_save_area_size += kPointerSize;
+ frame_saves |= kConstantPoolRegister.bit();
+ } else {
+ __ Push(r0, fp);
+ __ mr(fp, sp);
+ }
// Save callee-saved registers.
const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
@@ -1284,7 +1322,7 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else if (stack_slots > 0) {
+ } else if (needs_frame_) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -1323,6 +1361,9 @@ void CodeGenerator::AssembleReturn() {
}
// Restore registers.
RegList frame_saves = fp.bit();
+ if (FLAG_enable_embedded_constant_pool) {
+ frame_saves |= kConstantPoolRegister.bit();
+ }
const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
if (saves != 0) {
__ MultiPop(saves);
@@ -1330,12 +1371,20 @@ void CodeGenerator::AssembleReturn() {
}
__ LeaveFrame(StackFrame::MANUAL);
__ Ret();
- } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
- __ Ret();
+ } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : (info()->IsStub()
+ ? info()->code_stub()->GetStackParameterCount()
+ : 0);
+ __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+ __ Ret();
+ }
} else {
__ Ret();
}
@@ -1540,7 +1589,6 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
}
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index 0450495f5d..c817ef2a92 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -68,6 +68,8 @@ namespace compiler {
V(PPC_Tst32) \
V(PPC_Tst64) \
V(PPC_Push) \
+ V(PPC_PushFrame) \
+ V(PPC_StoreToStackSlot) \
V(PPC_ExtendSignWord8) \
V(PPC_ExtendSignWord16) \
V(PPC_ExtendSignWord32) \
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index afe30d4c85..0fe2acb369 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -874,26 +874,31 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
- PPCOperandGenerator g(this);
- Emit(kPPC_Uint32ToUint64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kPPC_Uint32ToUint64, node);
}
#endif
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- PPCOperandGenerator g(this);
- Emit(kPPC_DoubleToFloat32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kPPC_DoubleToFloat32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kPPC_DoubleToInt32, node);
+ }
+ UNREACHABLE();
}
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
- PPCOperandGenerator g(this);
// TODO(mbrandy): inspect input to see if nop is appropriate.
- Emit(kPPC_Int64ToInt32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kPPC_Int64ToInt32, node);
}
#endif
@@ -976,24 +981,16 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) {
- VisitRRR(this, kPPC_MaxDouble, node);
-}
+void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitFloat64Max(Node* node) {
- VisitRRR(this, kPPC_MaxDouble, node);
-}
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitFloat32Min(Node* node) {
- VisitRRR(this, kPPC_MinDouble, node);
-}
+void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitFloat64Min(Node* node) {
- VisitRRR(this, kPPC_MinDouble, node);
-}
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -1193,6 +1190,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
#endif
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
@@ -1395,6 +1395,12 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont(kUnsignedLessThan, node);
VisitWord64Compare(this, node, &cont);
}
+
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
#endif
@@ -1450,17 +1456,45 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// TODO(turbofan): on PPC it's probably better to use the code object in a
// register if there are multiple uses of it. Improve constant pool and the
// heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Push any stack arguments.
- // TODO(mbrandy): reverse order and use push only for first
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kPPC_Push, g.NoOutput(), g.UseRegister(node));
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kStackFrameExtraParamSlot;
+ for (Node* node : buffer.pushed_nodes) {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot));
+ ++slot;
+ }
+ } else {
+ // Push any stack arguments.
+ int num_slots = buffer.pushed_nodes.size();
+ int slot = 0;
+ for (Node* node : buffer.pushed_nodes) {
+ if (slot == 0) {
+ Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(num_slots));
+ } else {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot));
+ }
+ ++slot;
+ }
}
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
@@ -1468,18 +1502,21 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
@@ -1497,9 +1534,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
// TODO(turbofan): Relax restriction for stack parameters.
- if (descriptor->UsesOnlyRegisters() &&
- descriptor->HasSameReturnLocationsAs(
- linkage()->GetIncomingDescriptor())) {
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
@@ -1508,8 +1543,6 @@ void InstructionSelector::VisitTailCall(Node* node) {
// heuristics in the register allocator for where to emit constants.
InitializeCallBuffer(node, &buffer, true, false);
- DCHECK_EQ(0u, buffer.pushed_nodes.size());
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -1623,11 +1656,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat32Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kFloat64RoundDown |
+ return MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway;
// We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
diff --git a/deps/v8/src/compiler/ppc/linkage-ppc.cc b/deps/v8/src/compiler/ppc/linkage-ppc.cc
index 39ebb63efa..677e9d0e6c 100644
--- a/deps/v8/src/compiler/ppc/linkage-ppc.cc
+++ b/deps/v8/src/compiler/ppc/linkage-ppc.cc
@@ -25,11 +25,13 @@ struct PPCLinkageHelperTraits {
r24.bit() | r25.bit() | r26.bit() | r27.bit() | r28.bit() |
r29.bit() | r30.bit() | fp.bit();
}
+ static RegList CCalleeSaveFPRegisters() { return 0; }
static Register CRegisterParameter(int i) {
static Register register_parameters[] = {r3, r4, r5, r6, r7, r8, r9, r10};
return register_parameters[i];
}
static int CRegisterParametersLength() { return 8; }
+ static int CStackBackingStoreLength() { return 0; }
};
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 6a339e7c91..3e87ef5d97 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -23,7 +23,6 @@ RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
call_descriptor_(
Linkage::GetSimplifiedCDescriptor(graph->zone(), machine_sig)),
parameters_(NULL),
- exit_label_(schedule()->end()),
current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
Node* s = graph->NewNode(common_.Start(param_count));
@@ -54,12 +53,6 @@ Node* RawMachineAssembler::Parameter(size_t index) {
}
-RawMachineAssembler::Label* RawMachineAssembler::Exit() {
- exit_label_.used_ = true;
- return &exit_label_;
-}
-
-
void RawMachineAssembler::Goto(Label* label) {
DCHECK(current_block_ != schedule()->end());
schedule()->AddGoto(CurrentBlock(), Use(label));
@@ -151,6 +144,78 @@ Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
}
+Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
+ Node* function) {
+ MachineSignature::Builder builder(zone(), 1, 0);
+ builder.AddReturn(return_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ Node* call = graph()->NewNode(common()->Call(descriptor), function);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
+}
+
+
+Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
+ MachineType arg0_type, Node* function,
+ Node* arg0) {
+ MachineSignature::Builder builder(zone(), 1, 1);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ Node* call = graph()->NewNode(common()->Call(descriptor), function, arg0);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
+}
+
+
+Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
+ MachineType arg0_type,
+ MachineType arg1_type, Node* function,
+ Node* arg0, Node* arg1) {
+ MachineSignature::Builder builder(zone(), 1, 2);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ Node* call =
+ graph()->NewNode(common()->Call(descriptor), function, arg0, arg1);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
+}
+
+
+Node* RawMachineAssembler::CallCFunction8(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type, MachineType arg7_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, Node* arg6, Node* arg7) {
+ MachineSignature::Builder builder(zone(), 1, 8);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ builder.AddParam(arg3_type);
+ builder.AddParam(arg4_type);
+ builder.AddParam(arg5_type);
+ builder.AddParam(arg6_type);
+ builder.AddParam(arg7_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ Node* call = graph()->NewNode(common()->Call(descriptor), function, arg0,
+ arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
+}
+
+
void RawMachineAssembler::Bind(Label* label) {
DCHECK(current_block_ == NULL);
DCHECK(!label->bound_);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index b338368024..bc28e6c817 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -285,11 +285,14 @@ class RawMachineAssembler : public GraphBuilder {
Node* Int64LessThan(Node* a, Node* b) {
return NewNode(machine()->Int64LessThan(), a, b);
}
+ Node* Int64LessThanOrEqual(Node* a, Node* b) {
+ return NewNode(machine()->Int64LessThanOrEqual(), a, b);
+ }
Node* Uint64LessThan(Node* a, Node* b) {
return NewNode(machine()->Uint64LessThan(), a, b);
}
- Node* Int64LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Int64LessThanOrEqual(), a, b);
+ Node* Uint64LessThanOrEqual(Node* a, Node* b) {
+ return NewNode(machine()->Uint64LessThanOrEqual(), a, b);
}
Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
@@ -302,15 +305,6 @@ class RawMachineAssembler : public GraphBuilder {
return NewNode(machine()->Uint64Mod(), a, b);
}
- // TODO(turbofan): What is this used for?
- Node* ConvertIntPtrToInt32(Node* a) {
- return kPointerSize == 8 ? NewNode(machine()->TruncateInt64ToInt32(), a)
- : a;
- }
- Node* ConvertInt32ToIntPtr(Node* a) {
- return kPointerSize == 8 ? NewNode(machine()->ChangeInt32ToInt64(), a) : a;
- }
-
#define INTPTR_BINOP(prefix, name) \
Node* IntPtr##name(Node* a, Node* b) { \
return kPointerSize == 8 ? prefix##64##name(a, b) \
@@ -418,20 +412,20 @@ class RawMachineAssembler : public GraphBuilder {
Node* TruncateFloat64ToFloat32(Node* a) {
return NewNode(machine()->TruncateFloat64ToFloat32(), a);
}
- Node* TruncateFloat64ToInt32(Node* a) {
- return NewNode(machine()->TruncateFloat64ToInt32(), a);
+ Node* TruncateFloat64ToInt32(TruncationMode mode, Node* a) {
+ return NewNode(machine()->TruncateFloat64ToInt32(mode), a);
}
Node* TruncateInt64ToInt32(Node* a) {
return NewNode(machine()->TruncateInt64ToInt32(), a);
}
Node* Float64RoundDown(Node* a) {
- return NewNode(machine()->Float64RoundDown(), a);
+ return NewNode(machine()->Float64RoundDown().op(), a);
}
Node* Float64RoundTruncate(Node* a) {
- return NewNode(machine()->Float64RoundTruncate(), a);
+ return NewNode(machine()->Float64RoundTruncate().op(), a);
}
Node* Float64RoundTiesAway(Node* a) {
- return NewNode(machine()->Float64RoundTiesAway(), a);
+ return NewNode(machine()->Float64RoundTiesAway().op(), a);
}
// Float64 bit operations.
@@ -450,12 +444,23 @@ class RawMachineAssembler : public GraphBuilder {
// Stack operations.
Node* LoadStackPointer() { return NewNode(machine()->LoadStackPointer()); }
+ Node* LoadFramePointer() { return NewNode(machine()->LoadFramePointer()); }
// Parameters.
Node* Parameter(size_t index);
+ // Pointer utilities.
+ Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
+ return Load(rep, PointerConstant(address), Int32Constant(offset));
+ }
+ void StoreToPointer(void* address, MachineType rep, Node* node) {
+ Store(rep, PointerConstant(address), node);
+ }
+ Node* StringConstant(const char* string) {
+ return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
+ }
+
// Control flow.
- Label* Exit();
void Goto(Label* label);
void Branch(Node* condition, Label* true_val, Label* false_val);
void Switch(Node* index, Label* default_label, int32_t* case_values,
@@ -469,6 +474,23 @@ class RawMachineAssembler : public GraphBuilder {
// Call to a runtime function with zero parameters.
Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context,
Node* frame_state);
+ // Call to a C function with zero parameters.
+ Node* CallCFunction0(MachineType return_type, Node* function);
+ // Call to a C function with one parameter.
+ Node* CallCFunction1(MachineType return_type, MachineType arg0_type,
+ Node* function, Node* arg0);
+ // Call to a C function with two parameters.
+ Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, Node* function, Node* arg0,
+ Node* arg1);
+ // Call to a C function with eight parameters.
+ Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type,
+ MachineType arg7_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, Node* arg6, Node* arg7);
void Return(Node* value);
void Bind(Label* label);
void Deoptimize(Node* state);
@@ -509,7 +531,6 @@ class RawMachineAssembler : public GraphBuilder {
const MachineSignature* machine_sig_;
CallDescriptor* call_descriptor_;
Node** parameters_;
- Label exit_label_;
BasicBlock* current_block_;
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 08b71f2e40..5bf858a86c 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -32,13 +32,6 @@ int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
}
-const ZoneVector<LiveRange*>& GetFixedRegisters(
- const RegisterAllocationData* data, RegisterKind kind) {
- return kind == DOUBLE_REGISTERS ? data->fixed_double_live_ranges()
- : data->fixed_live_ranges();
-}
-
-
const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
const InstructionBlock* block) {
auto index = block->loop_header();
@@ -53,13 +46,6 @@ const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
}
-bool IsBlockBoundary(const InstructionSequence* code, LifetimePosition pos) {
- return pos.IsFullStart() &&
- code->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
- pos.ToInstructionIndex();
-}
-
-
Instruction* GetLastInstruction(InstructionSequence* code,
const InstructionBlock* block) {
return code->InstructionAt(block->last_instruction_index());
@@ -251,6 +237,10 @@ struct LiveRange::SpillAtDefinitionList : ZoneObject {
};
+const float LiveRange::kInvalidWeight = -1;
+const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
+
+
LiveRange::LiveRange(int id, MachineType machine_type)
: id_(id),
spill_start_index_(kMaxInt),
@@ -264,7 +254,9 @@ LiveRange::LiveRange(int id, MachineType machine_type)
spills_at_definition_(nullptr),
current_interval_(nullptr),
last_processed_use_(nullptr),
- current_hint_position_(nullptr) {
+ current_hint_position_(nullptr),
+ size_(kInvalidSize),
+ weight_(kInvalidWeight) {
DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
AssignedRegisterField::encode(kUnassignedRegister) |
@@ -573,6 +565,10 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
result->next_ = next_;
next_ = result;
+ // Invalidate size and weight of this range. The child range has them
+ // invalid at construction.
+ size_ = kInvalidSize;
+ weight_ = kInvalidWeight;
#ifdef DEBUG
Verify();
result->Verify();
@@ -763,6 +759,19 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
}
+unsigned LiveRange::GetSize() {
+ if (size_ == kInvalidSize) {
+ size_ = 0;
+ for (auto interval = first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ size_ += (interval->end().value() - interval->start().value());
+ }
+ }
+
+ return static_cast<unsigned>(size_);
+}
+
+
static bool AreUseIntervalsIntersecting(UseInterval* interval1,
UseInterval* interval2) {
while (interval1 != nullptr && interval2 != nullptr) {
@@ -1071,6 +1080,13 @@ void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
}
+bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
+ return pos.IsFullStart() &&
+ code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
+ pos.ToInstructionIndex();
+}
+
+
ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
: data_(data) {}
@@ -1853,6 +1869,21 @@ void RegisterAllocator::Spill(LiveRange* range) {
}
+const ZoneVector<LiveRange*>& RegisterAllocator::GetFixedRegisters() const {
+ return mode() == DOUBLE_REGISTERS ? data()->fixed_double_live_ranges()
+ : data()->fixed_live_ranges();
+}
+
+
+const char* RegisterAllocator::RegisterName(int allocation_index) const {
+ if (mode() == GENERAL_REGISTERS) {
+ return data()->config()->general_register_name(allocation_index);
+ } else {
+ return data()->config()->double_register_name(allocation_index);
+ }
+}
+
+
LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
RegisterKind kind, Zone* local_zone)
: RegisterAllocator(data, kind),
@@ -1884,7 +1915,7 @@ void LinearScanAllocator::AllocateRegisters() {
SortUnhandled();
DCHECK(UnhandledIsSorted());
- auto& fixed_ranges = GetFixedRegisters(data(), mode());
+ auto& fixed_ranges = GetFixedRegisters();
for (auto current : fixed_ranges) {
if (current != nullptr) {
DCHECK_EQ(mode(), current->kind());
@@ -1959,15 +1990,6 @@ void LinearScanAllocator::AllocateRegisters() {
}
-const char* LinearScanAllocator::RegisterName(int allocation_index) const {
- if (mode() == GENERAL_REGISTERS) {
- return data()->config()->general_register_name(allocation_index);
- } else {
- return data()->config()->double_register_name(allocation_index);
- }
-}
-
-
void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
int reg) {
data()->MarkAllocated(range->kind(), reg);
@@ -2380,7 +2402,7 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
// Split it at position between ]start+1, end[, spill the middle part
// and put the rest to unhandled.
auto third_part_end = end.PrevStart().End();
- if (IsBlockBoundary(code(), end.Start())) {
+ if (data()->IsBlockBoundary(end.Start())) {
third_part_end = end.Start();
}
auto third_part = SplitBetween(
@@ -2398,446 +2420,6 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
}
-class CoalescedLiveRanges : public ZoneObject {
- public:
- explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
-
- LiveRange* Find(UseInterval* query) {
- ZoneSplayTree<Config>::Locator locator;
-
- if (storage_.Find(GetKey(query), &locator)) {
- return locator.value();
- }
- return nullptr;
- }
-
- // TODO(mtrofin): Change to void returning if we do not care if the interval
- // was previously added.
- bool Insert(LiveRange* range) {
- auto* interval = range->first_interval();
- while (interval != nullptr) {
- if (!Insert(interval, range)) return false;
- interval = interval->next();
- }
- return true;
- }
-
- bool Remove(LiveRange* range) {
- bool ret = false;
- auto* segment = range->first_interval();
- while (segment != nullptr) {
- ret |= Remove(segment);
- segment = segment->next();
- }
- return ret;
- }
-
- bool IsEmpty() { return storage_.is_empty(); }
-
- private:
- struct Config {
- typedef std::pair<int, int> Key;
- typedef LiveRange* Value;
- static const Key kNoKey;
- static Value NoValue() { return nullptr; }
- static int Compare(const Key& a, const Key& b) {
- if (a.second <= b.first) return -1;
- if (a.first >= b.second) return 1;
- return 0;
- }
- };
-
- Config::Key GetKey(UseInterval* interval) {
- if (interval == nullptr) return std::make_pair(0, 0);
- return std::make_pair(interval->start().value(), interval->end().value());
- }
-
- // TODO(mtrofin): Change to void returning if we do not care if the interval
- // was previously added.
- bool Insert(UseInterval* interval, LiveRange* range) {
- ZoneSplayTree<Config>::Locator locator;
- bool ret = storage_.Insert(GetKey(interval), &locator);
- if (ret) locator.set_value(range);
- return ret;
- }
-
- bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); }
-
- ZoneSplayTree<Config> storage_;
- DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
-};
-
-
-const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0};
-
-GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
- RegisterKind kind, Zone* local_zone)
- : RegisterAllocator(data, kind),
- local_zone_(local_zone),
- allocations_(local_zone),
- queue_(local_zone) {}
-
-
-unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) {
- auto interval = range->first_interval();
- if (interval == nullptr) return 0;
-
- unsigned size = 0;
- while (interval != nullptr) {
- size += (interval->end().value() - interval->start().value());
- interval = interval->next();
- }
-
- return size;
-}
-
-
-void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
- allocations_[reg_id]->Insert(range);
- if (range->HasRegisterAssigned()) {
- DCHECK_EQ(reg_id, range->assigned_register());
- return;
- }
- range->set_assigned_register(reg_id);
- range->SetUseHints(reg_id);
- if (range->is_phi()) {
- data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
- }
-}
-
-
-float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
- InstructionOperand* first_hint = nullptr;
- if (range->FirstHintPosition() != nullptr) {
- first_hint = range->FirstHintPosition()->operand();
- }
-
- if (range->IsFixed()) return std::numeric_limits<float>::max();
- bool spill;
- if (!FindProgressingSplitPosition(range, &spill).IsValid())
- return std::numeric_limits<float>::max();
-
- float multiplier = 1.0;
- if (first_hint != nullptr && first_hint->IsRegister()) {
- multiplier = 3.0;
- }
-
- unsigned use_count = 0;
- auto* pos = range->first_pos();
- while (pos != nullptr) {
- use_count++;
- pos = pos->next();
- }
-
- unsigned range_size = GetLiveRangeSize(range);
- DCHECK_NE(0U, range_size);
-
- return multiplier * static_cast<float>(use_count) /
- static_cast<float>(range_size);
-}
-
-
-float GreedyAllocator::CalculateMaxSpillWeight(
- const ZoneSet<LiveRange*>& ranges) {
- float max = 0.0;
- for (auto* r : ranges) {
- max = std::max(max, CalculateSpillWeight(r));
- }
- return max;
-}
-
-
-void GreedyAllocator::Evict(LiveRange* range) {
- bool removed = allocations_[range->assigned_register()]->Remove(range);
- CHECK(removed);
- range->UnsetUseHints();
- range->UnsetAssignedRegister();
- if (range->is_phi()) {
- data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister();
- }
-}
-
-
-bool GreedyAllocator::TryAllocatePhysicalRegister(
- unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) {
- auto* segment = range->first_interval();
-
- auto* alloc_info = allocations_[reg_id];
- while (segment != nullptr) {
- if (auto* existing = alloc_info->Find(segment)) {
- DCHECK(existing->HasRegisterAssigned());
- conflicting->insert(existing);
- }
- segment = segment->next();
- }
- if (!conflicting->empty()) return false;
- // No conflicts means we can safely allocate this register to this range.
- AssignRangeToRegister(reg_id, range);
- return true;
-}
-
-
-int GreedyAllocator::GetHintedRegister(LiveRange* range) {
- int reg;
- if (range->FirstHintPosition(&reg) != nullptr) {
- return reg;
- }
- return -1;
-}
-
-
-bool GreedyAllocator::TryAllocate(LiveRange* current,
- ZoneSet<LiveRange*>* conflicting) {
- if (current->IsFixed()) {
- return TryAllocatePhysicalRegister(current->assigned_register(), current,
- conflicting);
- }
-
- int hinted_reg_id = GetHintedRegister(current);
- if (hinted_reg_id >= 0) {
- if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) {
- return true;
- }
- }
-
- ZoneSet<LiveRange*> local_conflicts(local_zone());
- for (unsigned candidate_reg = 0; candidate_reg < allocations_.size();
- candidate_reg++) {
- if (hinted_reg_id >= 0 &&
- candidate_reg == static_cast<size_t>(hinted_reg_id))
- continue;
- local_conflicts.clear();
- if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) {
- conflicting->clear();
- return true;
- } else {
- conflicting->insert(local_conflicts.begin(), local_conflicts.end());
- }
- }
- return false;
-}
-
-
-LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
- LifetimePosition start,
- LifetimePosition until,
- LifetimePosition end) {
- CHECK(start < end);
- auto second_part = SplitRangeAt(range, start);
-
- if (second_part->Start() < end) {
- // The split result intersects with [start, end[.
- // Split it at position between ]start+1, end[, spill the middle part
- // and put the rest to unhandled.
- auto third_part_end = end.PrevStart().End();
- if (IsBlockBoundary(code(), end.Start())) {
- third_part_end = end.Start();
- }
- auto third_part = SplitBetween(
- second_part, Max(second_part->Start().End(), until), third_part_end);
-
- DCHECK(third_part != second_part);
-
- Spill(second_part);
- return third_part;
- } else {
- // The split result does not intersect with [start, end[.
- // Nothing to spill. Just return it for re-processing.
- return second_part;
- }
-}
-
-
-void GreedyAllocator::Enqueue(LiveRange* range) {
- if (range == nullptr || range->IsEmpty()) return;
- unsigned size = GetLiveRangeSize(range);
- TRACE("Enqueuing range %d\n", range->id());
- queue_.push(std::make_pair(size, range));
-}
-
-
-bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
- auto position = range->Start();
- TRACE("Processing interval %d start=%d\n", range->id(), position.value());
-
- if (!range->HasNoSpillType()) {
- TRACE("Live range %d already has a spill operand\n", range->id());
- auto next_pos = position;
- if (next_pos.IsGapPosition()) {
- next_pos = next_pos.NextStart();
- }
- auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
- // If the range already has a spill operand and it doesn't need a
- // register immediately, split it and spill the first part of the range.
- if (pos == nullptr) {
- Spill(range);
- return true;
- } else if (pos->pos() > range->Start().NextStart()) {
- // Do not spill live range eagerly if use position that can benefit from
- // the register is too close to the start of live range.
- auto* reminder = SpillBetweenUntil(range, position, position, pos->pos());
- Enqueue(reminder);
- return true;
- }
- }
- return TryReuseSpillForPhi(range);
-}
-
-
-void GreedyAllocator::AllocateRegisters() {
- for (auto range : data()->live_ranges()) {
- if (range == nullptr) continue;
- if (range->kind() == mode()) {
- DCHECK(!range->HasRegisterAssigned() && !range->spilled());
- TRACE("Enqueueing live range %d to priority queue \n", range->id());
- Enqueue(range);
- }
- }
-
- allocations_.resize(num_registers());
- for (int i = 0; i < num_registers(); i++) {
- allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
- }
-
- for (auto* current : GetFixedRegisters(data(), mode())) {
- if (current != nullptr) {
- DCHECK_EQ(mode(), current->kind());
- int reg_nr = current->assigned_register();
- bool inserted = allocations_[reg_nr]->Insert(current);
- CHECK(inserted);
- }
- }
-
- while (!queue_.empty()) {
- auto current_pair = queue_.top();
- queue_.pop();
- auto current = current_pair.second;
- if (HandleSpillOperands(current)) {
- continue;
- }
- bool spill = false;
- ZoneSet<LiveRange*> conflicting(local_zone());
- if (!TryAllocate(current, &conflicting)) {
- DCHECK(!conflicting.empty());
- float this_weight = std::numeric_limits<float>::max();
- LifetimePosition split_pos =
- FindProgressingSplitPosition(current, &spill);
- if (split_pos.IsValid()) {
- this_weight = CalculateSpillWeight(current);
- }
-
- bool evicted = false;
- for (auto* conflict : conflicting) {
- if (CalculateSpillWeight(conflict) < this_weight) {
- Evict(conflict);
- Enqueue(conflict);
- evicted = true;
- }
- }
- if (evicted) {
- conflicting.clear();
- TryAllocate(current, &conflicting);
- }
- if (!conflicting.empty()) {
- DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start()));
- DCHECK(split_pos.IsValid());
- AllocateBlockedRange(current, split_pos, spill);
- }
- }
- }
-
- for (size_t i = 0; i < allocations_.size(); ++i) {
- if (!allocations_[i]->IsEmpty()) {
- data()->MarkAllocated(mode(), static_cast<int>(i));
- }
- }
-}
-
-
-LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) {
- auto ret = pos.PrevStart().End();
- if (IsBlockBoundary(code(), pos.Start())) {
- ret = pos.Start();
- }
- DCHECK(ret <= pos);
- return ret;
-}
-
-LifetimePosition GreedyAllocator::FindProgressingSplitPosition(
- LiveRange* range, bool* is_spill_pos) {
- auto start = range->Start();
- auto end = range->End();
-
- UsePosition* next_reg_use = range->first_pos();
- while (next_reg_use != nullptr &&
- next_reg_use->type() != UsePositionType::kRequiresRegister) {
- next_reg_use = next_reg_use->next();
- }
-
- if (next_reg_use == nullptr) {
- *is_spill_pos = true;
- auto ret = FindOptimalSpillingPos(range, start);
- DCHECK(ret.IsValid());
- return ret;
- }
-
- *is_spill_pos = false;
- auto reg_pos = next_reg_use->pos();
- auto correct_pos = GetSplittablePos(reg_pos);
- if (start < correct_pos && correct_pos < end) {
- return correct_pos;
- }
-
- if (correct_pos >= end) {
- return LifetimePosition::Invalid();
- }
-
- // Correct_pos must be at or before start. Find the next use position.
- next_reg_use = next_reg_use->next();
- auto reference = reg_pos;
- while (next_reg_use != nullptr) {
- auto pos = next_reg_use->pos();
- // Skip over tight successive uses.
- if (reference.NextStart() < pos) {
- break;
- }
- reference = pos;
- next_reg_use = next_reg_use->next();
- }
-
- if (next_reg_use == nullptr) {
- // While there may not be another use, we may still have space in the range
- // to clip off.
- correct_pos = reference.NextStart();
- if (start < correct_pos && correct_pos < end) {
- return correct_pos;
- }
- return LifetimePosition::Invalid();
- }
-
- correct_pos = GetSplittablePos(next_reg_use->pos());
- if (start < correct_pos && correct_pos < end) {
- DCHECK(reference < correct_pos);
- return correct_pos;
- }
- return LifetimePosition::Invalid();
-}
-
-
-void GreedyAllocator::AllocateBlockedRange(LiveRange* current,
- LifetimePosition pos, bool spill) {
- auto tail = SplitRangeAt(current, pos);
- if (spill) {
- Spill(tail);
- } else {
- Enqueue(tail);
- }
- if (tail != current) {
- Enqueue(current);
- }
-}
-
-
SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
: data_(data) {}
@@ -2857,91 +2439,6 @@ void SpillSlotLocator::LocateSpillSlots() {
}
-bool GreedyAllocator::TryReuseSpillForPhi(LiveRange* range) {
- if (range->IsChild() || !range->is_phi()) return false;
- DCHECK(!range->HasSpillOperand());
-
- auto phi_map_value = data()->GetPhiMapValueFor(range->id());
- auto phi = phi_map_value->phi();
- auto block = phi_map_value->block();
- // Count the number of spilled operands.
- size_t spilled_count = 0;
- LiveRange* first_op = nullptr;
- for (size_t i = 0; i < phi->operands().size(); i++) {
- int op = phi->operands()[i];
- LiveRange* op_range = LiveRangeFor(op);
- if (!op_range->HasSpillRange()) continue;
- auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
- while (op_range != nullptr && !op_range->CanCover(pred_end)) {
- op_range = op_range->next();
- }
- if (op_range != nullptr && op_range->spilled()) {
- spilled_count++;
- if (first_op == nullptr) {
- first_op = op_range->TopLevel();
- }
- }
- }
-
- // Only continue if more than half of the operands are spilled.
- if (spilled_count * 2 <= phi->operands().size()) {
- return false;
- }
-
- // Try to merge the spilled operands and count the number of merged spilled
- // operands.
- DCHECK(first_op != nullptr);
- auto first_op_spill = first_op->GetSpillRange();
- size_t num_merged = 1;
- for (size_t i = 1; i < phi->operands().size(); i++) {
- int op = phi->operands()[i];
- auto op_range = LiveRangeFor(op);
- if (!op_range->HasSpillRange()) continue;
- auto op_spill = op_range->GetSpillRange();
- if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
- num_merged++;
- }
- }
-
- // Only continue if enough operands could be merged to the
- // same spill slot.
- if (num_merged * 2 <= phi->operands().size() ||
- AreUseIntervalsIntersecting(first_op_spill->interval(),
- range->first_interval())) {
- return false;
- }
-
- // If the range does not need register soon, spill it to the merged
- // spill range.
- auto next_pos = range->Start();
- if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
- auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
- if (pos == nullptr) {
- auto spill_range =
- range->TopLevel()->HasSpillRange()
- ? range->TopLevel()->GetSpillRange()
- : data()->AssignSpillRangeToLiveRange(range->TopLevel());
- bool merged = first_op_spill->TryMerge(spill_range);
- CHECK(merged);
- Spill(range);
- return true;
- } else if (pos->pos() > range->Start().NextStart()) {
- auto spill_range =
- range->TopLevel()->HasSpillRange()
- ? range->TopLevel()->GetSpillRange()
- : data()->AssignSpillRangeToLiveRange(range->TopLevel());
- bool merged = first_op_spill->TryMerge(spill_range);
- CHECK(merged);
- Enqueue(
- SpillBetweenUntil(range, range->Start(), range->Start(), pos->pos()));
- return true;
- }
- return false;
-}
-
-
OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
@@ -3329,7 +2826,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
// boundary.
if (second_range->spilled()) continue;
if (first_range->End() != pos) continue;
- if (IsBlockBoundary(code(), pos) &&
+ if (data()->IsBlockBoundary(pos) &&
!CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
continue;
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index baffedd919..83f95cbac6 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -428,6 +428,15 @@ class LiveRange final : public ZoneObject {
return spills_at_definition_;
}
+ // Used solely by the Greedy Allocator:
+ unsigned GetSize();
+ float weight() const { return weight_; }
+ void set_weight(float weight) { weight_ = weight; }
+
+ static const int kInvalidSize = -1;
+ static const float kInvalidWeight;
+ static const float kMaxWeight;
+
private:
void set_spill_type(SpillType value) {
bits_ = SpillTypeField::update(bits_, value);
@@ -468,6 +477,14 @@ class LiveRange final : public ZoneObject {
// This is used as a cache, it's invalid outside of BuildLiveRanges.
mutable UsePosition* current_hint_position_;
+ // greedy: the number of LifetimePositions covered by this range. Used to
+ // prioritize selecting live ranges for register assignment, as well as
+ // in weight calculations.
+ int size_;
+
+ // greedy: a metric for resolving conflicts between ranges with an assigned
+ // register and ranges that intersect them and need a register.
+ float weight_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
@@ -607,6 +624,7 @@ class RegisterAllocationData final : public ZoneObject {
PhiMapValue* InitializePhiMap(const InstructionBlock* block,
PhiInstruction* phi);
PhiMapValue* GetPhiMapValueFor(int virtual_register);
+ bool IsBlockBoundary(LifetimePosition pos) const;
private:
Zone* const allocation_zone_;
@@ -768,6 +786,9 @@ class RegisterAllocator : public ZoneObject {
LifetimePosition FindOptimalSpillingPos(LiveRange* range,
LifetimePosition pos);
+ const ZoneVector<LiveRange*>& GetFixedRegisters() const;
+ const char* RegisterName(int allocation_index) const;
+
private:
RegisterAllocationData* const data_;
const RegisterKind mode_;
@@ -786,8 +807,6 @@ class LinearScanAllocator final : public RegisterAllocator {
void AllocateRegisters();
private:
- const char* RegisterName(int allocation_index) const;
-
ZoneVector<LiveRange*>& unhandled_live_ranges() {
return unhandled_live_ranges_;
}
@@ -840,55 +859,6 @@ class LinearScanAllocator final : public RegisterAllocator {
DISALLOW_COPY_AND_ASSIGN(LinearScanAllocator);
};
-class CoalescedLiveRanges;
-
-
-// A variant of the LLVM Greedy Register Allocator. See
-// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
-class GreedyAllocator final : public RegisterAllocator {
- public:
- explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
- Zone* local_zone);
-
- void AllocateRegisters();
-
- private:
- LifetimePosition GetSplittablePos(LifetimePosition pos);
- const RegisterConfiguration* config() const { return data()->config(); }
- Zone* local_zone() const { return local_zone_; }
- bool TryReuseSpillForPhi(LiveRange* range);
- int GetHintedRegister(LiveRange* range);
-
- typedef ZonePriorityQueue<std::pair<unsigned, LiveRange*>> PQueue;
-
- unsigned GetLiveRangeSize(LiveRange* range);
- void Enqueue(LiveRange* range);
-
- void Evict(LiveRange* range);
- float CalculateSpillWeight(LiveRange* range);
- float CalculateMaxSpillWeight(const ZoneSet<LiveRange*>& ranges);
-
-
- bool TryAllocate(LiveRange* current, ZoneSet<LiveRange*>* conflicting);
- bool TryAllocatePhysicalRegister(unsigned reg_id, LiveRange* range,
- ZoneSet<LiveRange*>* conflicting);
- bool HandleSpillOperands(LiveRange* range);
- void AllocateBlockedRange(LiveRange* current, LifetimePosition pos,
- bool spill);
-
- LiveRange* SpillBetweenUntil(LiveRange* range, LifetimePosition start,
- LifetimePosition until, LifetimePosition end);
- void AssignRangeToRegister(int reg_id, LiveRange* range);
-
- LifetimePosition FindProgressingSplitPosition(LiveRange* range,
- bool* is_spill_pos);
-
- Zone* local_zone_;
- ZoneVector<CoalescedLiveRanges*> allocations_;
- PQueue queue_;
- DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
-};
-
class SpillSlotLocator final : public ZoneObject {
public:
diff --git a/deps/v8/src/compiler/register-configuration.cc b/deps/v8/src/compiler/register-configuration.cc
index 30946fc373..a3d3be1790 100644
--- a/deps/v8/src/compiler/register-configuration.cc
+++ b/deps/v8/src/compiler/register-configuration.cc
@@ -21,8 +21,13 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
public:
ArchDefaultRegisterConfiguration()
: RegisterConfiguration(Register::kMaxNumAllocatableRegisters,
+#if V8_TARGET_ARCH_X87
+ 1,
+ 1,
+#else
DoubleRegister::kMaxNumAllocatableRegisters,
DoubleRegister::NumAllocatableAliasedRegisters(),
+#endif
general_register_name_table_,
double_register_name_table_) {
DCHECK_EQ(Register::kMaxNumAllocatableRegisters,
@@ -46,7 +51,7 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
static base::LazyInstance<ArchDefaultRegisterConfiguration>::type
kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
-} // namepace
+} // namespace
const RegisterConfiguration* RegisterConfiguration::ArchDefault() {
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 6ec4b86bb4..9538684af2 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -232,13 +232,13 @@ class RepresentationChanger {
// Select the correct X -> Word32 truncation operator.
const Operator* op = NULL;
if (output_type & kRepFloat64) {
- op = machine()->TruncateFloat64ToInt32();
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
} else if (output_type & kRepFloat32) {
node = InsertChangeFloat32ToFloat64(node);
- op = machine()->TruncateFloat64ToInt32();
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
} else if (output_type & kRepTagged) {
node = InsertChangeTaggedToFloat64(node);
- op = machine()->TruncateFloat64ToInt32();
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
} else {
return TypeError(node, output_type, kRepWord32);
}
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index adb80a7b08..69ece96d4e 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -145,8 +145,7 @@ BasicBlock* Schedule::block(Node* node) const {
bool Schedule::IsScheduled(Node* node) {
- int length = static_cast<int>(nodeid_to_block_.size());
- if (node->id() >= length) return false;
+ if (node->id() >= nodeid_to_block_.size()) return false;
return nodeid_to_block_[node->id()] != NULL;
}
@@ -324,8 +323,7 @@ void Schedule::SetControlInput(BasicBlock* block, Node* node) {
void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
- int length = static_cast<int>(nodeid_to_block_.size());
- if (node->id() >= length) {
+ if (node->id() >= nodeid_to_block_.size()) {
nodeid_to_block_.resize(node->id() + 1);
}
nodeid_to_block_[node->id()] = block;
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 862969e752..aa9a7cfdb2 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -1580,13 +1580,11 @@ class ScheduleLateNodeVisitor {
Node* CloneNode(Node* node) {
int const input_count = node->InputCount();
- Node** const inputs = scheduler_->zone_->NewArray<Node*>(input_count);
for (int index = 0; index < input_count; ++index) {
Node* const input = node->InputAt(index);
scheduler_->IncrementUnscheduledUseCount(input, index, node);
- inputs[index] = input;
}
- Node* copy = scheduler_->graph_->NewNode(node->op(), input_count, inputs);
+ Node* const copy = scheduler_->graph_->CloneNode(node);
TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
copy->id());
scheduler_->node_data_.resize(copy->id() + 1,
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 39246df246..006b6ab28f 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -344,6 +344,8 @@ class RepresentationSelector {
} else if (upper->Is(Type::Number())) {
// multiple uses => pick kRepFloat64.
return kRepFloat64;
+ } else if (upper->Is(Type::Internal())) {
+ return kMachPtr;
}
return kRepTagged;
}
@@ -444,7 +446,9 @@ class RepresentationSelector {
}
bool CanLowerToInt32Binop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, Type::Signed32()) && !CanObserveNonInt32(use);
+ return BothInputsAre(node, Type::Signed32()) &&
+ (!CanObserveNonInt32(use) ||
+ NodeProperties::GetBounds(node).upper->Is(Type::Signed32()));
}
bool CanLowerToInt32AdditiveBinop(Node* node, MachineTypeUnion use) {
@@ -453,7 +457,9 @@ class RepresentationSelector {
}
bool CanLowerToUint32Binop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, Type::Unsigned32()) && !CanObserveNonUint32(use);
+ return BothInputsAre(node, Type::Unsigned32()) &&
+ (!CanObserveNonUint32(use) ||
+ NodeProperties::GetBounds(node).upper->Is(Type::Unsigned32()));
}
bool CanLowerToUint32AdditiveBinop(Node* node, MachineTypeUnion use) {
@@ -683,6 +689,21 @@ class RepresentationSelector {
if (lower()) node->set_op(Float64Op(node));
break;
}
+ case IrOpcode::kNumberShiftLeft: {
+ VisitBinop(node, kMachInt32, kMachUint32, kMachInt32);
+ if (lower()) lowering->DoShift(node, lowering->machine()->Word32Shl());
+ break;
+ }
+ case IrOpcode::kNumberShiftRight: {
+ VisitBinop(node, kMachInt32, kMachUint32, kMachInt32);
+ if (lower()) lowering->DoShift(node, lowering->machine()->Word32Sar());
+ break;
+ }
+ case IrOpcode::kNumberShiftRightLogical: {
+ VisitBinop(node, kMachUint32, kMachUint32, kMachUint32);
+ if (lower()) lowering->DoShift(node, lowering->machine()->Word32Shr());
+ break;
+ }
case IrOpcode::kNumberToInt32: {
MachineTypeUnion use_rep = use & kRepMask;
Node* input = node->InputAt(0);
@@ -706,8 +727,10 @@ class RepresentationSelector {
// Require the input in float64 format and perform truncation.
// TODO(turbofan): avoid a truncation with a smi check.
VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
- if (lower())
- node->set_op(lowering->machine()->TruncateFloat64ToInt32());
+ if (lower()) {
+ node->set_op(lowering->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript));
+ }
}
break;
}
@@ -734,8 +757,10 @@ class RepresentationSelector {
// Require the input in float64 format and perform truncation.
// TODO(turbofan): avoid a truncation with a smi check.
VisitUnop(node, kTypeUint32 | kRepFloat64, kTypeUint32 | kRepWord32);
- if (lower())
- node->set_op(lowering->machine()->TruncateFloat64ToInt32());
+ if (lower()) {
+ node->set_op(lowering->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript));
+ }
}
break;
}
@@ -776,11 +801,6 @@ class RepresentationSelector {
if (lower()) lowering->DoStringLessThanOrEqual(node);
break;
}
- case IrOpcode::kStringAdd: {
- VisitBinop(node, kMachAnyTagged, kMachAnyTagged);
- if (lower()) lowering->DoStringAdd(node);
- break;
- }
case IrOpcode::kAllocate: {
ProcessInput(node, 0, kMachAnyTagged);
ProcessRemainingInputs(node, 1);
@@ -992,6 +1012,9 @@ class RepresentationSelector {
case IrOpcode::kTruncateFloat64ToFloat32:
return VisitUnop(node, kTypeNumber | kRepFloat64,
kTypeNumber | kRepFloat32);
+ case IrOpcode::kTruncateFloat64ToInt32:
+ return VisitUnop(node, kTypeNumber | kRepFloat64,
+ kTypeInt32 | kRepWord32);
case IrOpcode::kTruncateInt64ToInt32:
// TODO(titzer): Is kTypeInt32 correct here?
return VisitUnop(node, kTypeInt32 | kRepWord64,
@@ -1037,6 +1060,7 @@ class RepresentationSelector {
case IrOpcode::kFloat64InsertHighWord32:
return VisitBinop(node, kMachFloat64, kMachInt32, kMachFloat64);
case IrOpcode::kLoadStackPointer:
+ case IrOpcode::kLoadFramePointer:
return VisitLeaf(node, kMachPtr);
case IrOpcode::kStateValues:
VisitStateValues(node);
@@ -1083,7 +1107,7 @@ class RepresentationSelector {
private:
JSGraph* jsgraph_;
- int count_; // number of nodes in the graph
+ size_t const count_; // number of nodes in the graph
NodeInfo* info_; // node id -> usage information
NodeVector nodes_; // collected nodes
NodeVector replacements_; // replacements to be done after lowering
@@ -1116,6 +1140,14 @@ Node* SimplifiedLowering::IsTagged(Node* node) {
}
+SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
+ SourcePositionTable* source_positions)
+ : jsgraph_(jsgraph),
+ zone_(zone),
+ zero_thirtyone_range_(Type::Range(0, 31, zone)),
+ source_positions_(source_positions) {}
+
+
void SimplifiedLowering::LowerAllNodes() {
SimplifiedOperatorBuilder simplified(graph()->zone());
RepresentationChanger changer(jsgraph(), &simplified, jsgraph()->isolate());
@@ -1268,7 +1300,7 @@ void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
// Replace effect uses of {node} with the {ephi}.
- NodeProperties::ReplaceWithValue(node, node, ephi);
+ NodeProperties::ReplaceUses(node, node, ephi);
// Turn the {node} into a Phi.
node->set_op(common()->Phi(output_type, 2));
@@ -1307,23 +1339,6 @@ void SimplifiedLowering::DoStoreElement(Node* node) {
}
-void SimplifiedLowering::DoStringAdd(Node* node) {
- Operator::Properties properties = node->op()->properties();
- Callable callable = CodeFactory::StringAdd(
- jsgraph()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- jsgraph()->isolate(), zone(), callable.descriptor(), 0, flags,
- properties);
- node->set_op(common()->Call(desc));
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->AppendInput(graph()->zone(), jsgraph()->UndefinedConstant());
- node->AppendInput(graph()->zone(), graph()->start());
- node->AppendInput(graph()->zone(), graph()->start());
-}
-
-
Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
Runtime::FunctionId f =
requires_ordering ? Runtime::kStringCompareRT : Runtime::kStringEquals;
@@ -1591,6 +1606,17 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
}
+void SimplifiedLowering::DoShift(Node* node, Operator const* op) {
+ node->set_op(op);
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ Type* const rhs_type = NodeProperties::GetBounds(rhs).upper;
+ if (!rhs_type->Is(zero_thirtyone_range_)) {
+ node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
+ jsgraph()->Int32Constant(0x1f)));
+ }
+}
+
+
void SimplifiedLowering::DoStringEqual(Node* node) {
node->set_op(machine()->WordEqual());
node->ReplaceInput(0, StringComparison(node, false));
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 124090efb5..302908d5d8 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -21,8 +21,7 @@ class SourcePositionTable;
class SimplifiedLowering final {
public:
SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
- SourcePositionTable* source_positions)
- : jsgraph_(jsgraph), zone_(zone), source_positions_(source_positions) {}
+ SourcePositionTable* source_positions);
~SimplifiedLowering() {}
void LowerAllNodes();
@@ -38,7 +37,7 @@ class SimplifiedLowering final {
void DoStoreBuffer(Node* node);
void DoLoadElement(Node* node);
void DoStoreElement(Node* node);
- void DoStringAdd(Node* node);
+ void DoShift(Node* node, Operator const* op);
void DoStringEqual(Node* node);
void DoStringLessThan(Node* node);
void DoStringLessThanOrEqual(Node* node);
@@ -46,6 +45,7 @@ class SimplifiedLowering final {
private:
JSGraph* const jsgraph_;
Zone* const zone_;
+ Type* const zero_thirtyone_range_;
// TODO(danno): SimplifiedLowering shouldn't know anything about the source
// positions table, but must for now since there currently is no other way to
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index d64a95c62e..2e87f362e7 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -23,32 +23,25 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kBooleanNot: {
- HeapObjectMatcher<HeapObject> m(node->InputAt(0));
- if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
- return Replace(jsgraph()->TrueConstant());
+ HeapObjectMatcher m(node->InputAt(0));
+ if (m.HasValue()) {
+ return Replace(
+ jsgraph()->BooleanConstant(!m.Value().handle()->BooleanValue()));
}
- if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
- return Replace(jsgraph()->FalseConstant());
- }
- if (m.IsBooleanNot()) return Replace(m.node()->InputAt(0));
+ if (m.IsBooleanNot()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeBitToBool: {
Int32Matcher m(node->InputAt(0));
if (m.Is(0)) return Replace(jsgraph()->FalseConstant());
if (m.Is(1)) return Replace(jsgraph()->TrueConstant());
- if (m.IsChangeBoolToBit()) return Replace(m.node()->InputAt(0));
+ if (m.IsChangeBoolToBit()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeBoolToBit: {
- HeapObjectMatcher<HeapObject> m(node->InputAt(0));
- if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
- return ReplaceInt32(0);
- }
- if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
- return ReplaceInt32(1);
- }
- if (m.IsChangeBitToBool()) return Replace(m.node()->InputAt(0));
+ HeapObjectMatcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(m.Value().handle()->BooleanValue());
+ if (m.IsChangeBitToBool()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeFloat64ToTagged: {
@@ -66,12 +59,10 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceFloat64(m.Value());
if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
if (m.IsChangeInt32ToTagged()) {
- return Change(node, machine()->ChangeInt32ToFloat64(),
- m.node()->InputAt(0));
+ return Change(node, machine()->ChangeInt32ToFloat64(), m.InputAt(0));
}
if (m.IsChangeUint32ToTagged()) {
- return Change(node, machine()->ChangeUint32ToFloat64(),
- m.node()->InputAt(0));
+ return Change(node, machine()->ChangeUint32ToFloat64(), m.InputAt(0));
}
break;
}
@@ -79,20 +70,18 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
if (m.IsChangeFloat64ToTagged()) {
- return Change(node, machine()->ChangeFloat64ToInt32(),
- m.node()->InputAt(0));
+ return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
}
- if (m.IsChangeInt32ToTagged()) return Replace(m.node()->InputAt(0));
+ if (m.IsChangeInt32ToTagged()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeTaggedToUint32: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
if (m.IsChangeFloat64ToTagged()) {
- return Change(node, machine()->ChangeFloat64ToUint32(),
- m.node()->InputAt(0));
+ return Change(node, machine()->ChangeFloat64ToUint32(), m.InputAt(0));
}
- if (m.IsChangeUint32ToTagged()) return Replace(m.node()->InputAt(0));
+ if (m.IsChangeUint32ToTagged()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeUint32ToTagged: {
@@ -100,21 +89,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
break;
}
- case IrOpcode::kStoreField: {
- // TODO(turbofan): Poor man's store elimination, remove this once we have
- // a fully featured store elimination in place.
- Node* const effect = node->InputAt(2);
- if (effect->op()->Equals(node->op()) && effect->OwnedBy(node) &&
- effect->InputAt(0) == node->InputAt(0)) {
- // The {effect} is a store to the same field in the same object, and
- // {node} is the only effect observer, so we can kill {effect} and
- // instead make {node} depend on the incoming effect to {effect}.
- node->ReplaceInput(2, effect->InputAt(2));
- effect->Kill();
- return Changed(node);
- }
- break;
- }
default:
break;
}
@@ -155,11 +129,6 @@ Reduction SimplifiedOperatorReducer::ReplaceNumber(int32_t value) {
Graph* SimplifiedOperatorReducer::graph() const { return jsgraph()->graph(); }
-Factory* SimplifiedOperatorReducer::factory() const {
- return jsgraph()->isolate()->factory();
-}
-
-
MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
return jsgraph()->machine();
}
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 53e9821f2f..c302250d26 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -10,17 +10,13 @@
namespace v8 {
namespace internal {
-
-// Forward declarations.
-class Factory;
-class Heap;
-
namespace compiler {
// Forward declarations.
class JSGraph;
class MachineOperatorBuilder;
+
class SimplifiedOperatorReducer final : public Reducer {
public:
explicit SimplifiedOperatorReducer(JSGraph* jsgraph);
@@ -39,12 +35,11 @@ class SimplifiedOperatorReducer final : public Reducer {
Reduction ReplaceNumber(int32_t value);
Graph* graph() const;
- Factory* factory() const;
JSGraph* jsgraph() const { return jsgraph_; }
MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
- JSGraph* jsgraph_;
+ JSGraph* const jsgraph_;
SimplifiedOperatorBuilder simplified_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 9b34668d5c..d401fb7862 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -157,33 +157,35 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
}
-#define PURE_OP_LIST(V) \
- V(BooleanNot, Operator::kNoProperties, 1) \
- V(BooleanToNumber, Operator::kNoProperties, 1) \
- V(NumberEqual, Operator::kCommutative, 2) \
- V(NumberLessThan, Operator::kNoProperties, 2) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
- V(NumberAdd, Operator::kCommutative, 2) \
- V(NumberSubtract, Operator::kNoProperties, 2) \
- V(NumberMultiply, Operator::kCommutative, 2) \
- V(NumberDivide, Operator::kNoProperties, 2) \
- V(NumberModulus, Operator::kNoProperties, 2) \
- V(NumberToInt32, Operator::kNoProperties, 1) \
- V(NumberToUint32, Operator::kNoProperties, 1) \
- V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
- V(StringEqual, Operator::kCommutative, 2) \
- V(StringLessThan, Operator::kNoProperties, 2) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2) \
- V(StringAdd, Operator::kNoProperties, 2) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
- V(ChangeBoolToBit, Operator::kNoProperties, 1) \
- V(ChangeBitToBool, Operator::kNoProperties, 1) \
- V(ObjectIsSmi, Operator::kNoProperties, 1) \
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1) \
+ V(BooleanToNumber, Operator::kNoProperties, 1) \
+ V(NumberEqual, Operator::kCommutative, 2) \
+ V(NumberLessThan, Operator::kNoProperties, 2) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
+ V(NumberAdd, Operator::kCommutative, 2) \
+ V(NumberSubtract, Operator::kNoProperties, 2) \
+ V(NumberMultiply, Operator::kCommutative, 2) \
+ V(NumberDivide, Operator::kNoProperties, 2) \
+ V(NumberModulus, Operator::kNoProperties, 2) \
+ V(NumberShiftLeft, Operator::kNoProperties, 2) \
+ V(NumberShiftRight, Operator::kNoProperties, 2) \
+ V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
+ V(NumberToInt32, Operator::kNoProperties, 1) \
+ V(NumberToUint32, Operator::kNoProperties, 1) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
+ V(StringEqual, Operator::kCommutative, 2) \
+ V(StringLessThan, Operator::kNoProperties, 2) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeBoolToBit, Operator::kNoProperties, 1) \
+ V(ChangeBitToBool, Operator::kNoProperties, 1) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1) \
V(ObjectIsNonNegativeSmi, Operator::kNoProperties, 1)
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 484b39b18c..1460cb04f3 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -139,6 +139,9 @@ class SimplifiedOperatorBuilder final {
const Operator* NumberMultiply();
const Operator* NumberDivide();
const Operator* NumberModulus();
+ const Operator* NumberShiftLeft();
+ const Operator* NumberShiftRight();
+ const Operator* NumberShiftRightLogical();
const Operator* NumberToInt32();
const Operator* NumberToUint32();
@@ -149,7 +152,6 @@ class SimplifiedOperatorBuilder final {
const Operator* StringEqual();
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
- const Operator* StringAdd();
const Operator* ChangeTaggedToInt32();
const Operator* ChangeTaggedToUint32();
diff --git a/deps/v8/src/compiler/source-position.cc b/deps/v8/src/compiler/source-position.cc
index 97577c7966..48361ecac7 100644
--- a/deps/v8/src/compiler/source-position.cc
+++ b/deps/v8/src/compiler/source-position.cc
@@ -15,8 +15,7 @@ class SourcePositionTable::Decorator final : public GraphDecorator {
explicit Decorator(SourcePositionTable* source_positions)
: source_positions_(source_positions) {}
- void Decorate(Node* node, bool incomplete) final {
- DCHECK(!source_positions_->current_position_.IsInvalid());
+ void Decorate(Node* node) final {
source_positions_->table_.Set(node, source_positions_->current_position_);
}
@@ -28,7 +27,7 @@ class SourcePositionTable::Decorator final : public GraphDecorator {
SourcePositionTable::SourcePositionTable(Graph* graph)
: graph_(graph),
decorator_(nullptr),
- current_position_(SourcePosition::Invalid()),
+ current_position_(SourcePosition::Unknown()),
table_(graph->zone()) {}
@@ -56,7 +55,7 @@ void SourcePositionTable::Print(std::ostream& os) const {
bool needs_comma = false;
for (auto i : table_) {
SourcePosition pos = i.second;
- if (!pos.IsUnknown()) {
+ if (pos.IsKnown()) {
if (needs_comma) {
os << ",";
}
diff --git a/deps/v8/src/compiler/source-position.h b/deps/v8/src/compiler/source-position.h
index 3033f74d17..81db1d2e3e 100644
--- a/deps/v8/src/compiler/source-position.h
+++ b/deps/v8/src/compiler/source-position.h
@@ -20,16 +20,12 @@ class SourcePosition final {
static SourcePosition Unknown() { return SourcePosition(kUnknownPosition); }
bool IsUnknown() const { return raw() == kUnknownPosition; }
-
- static SourcePosition Invalid() { return SourcePosition(kInvalidPosition); }
- bool IsInvalid() const { return raw() == kInvalidPosition; }
+ bool IsKnown() const { return raw() != kUnknownPosition; }
int raw() const { return raw_; }
private:
- static const int kInvalidPosition = -2;
static const int kUnknownPosition = RelocInfo::kNoPosition;
- STATIC_ASSERT(kInvalidPosition != kUnknownPosition);
int raw_;
};
@@ -61,9 +57,7 @@ class SourcePositionTable final {
private:
void Init(SourcePosition position) {
- if (!position.IsUnknown() || prev_position_.IsInvalid()) {
- source_positions_->current_position_ = position;
- }
+ if (position.IsKnown()) source_positions_->current_position_ = position;
}
SourcePositionTable* const source_positions_;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 3e9d3d98ec..3f4bd27ec6 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -2,203 +2,147 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/typer.h"
+
#include "src/base/flags.h"
+#include "src/base/lazy-instance.h"
#include "src/bootstrapper.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/compiler/typer.h"
namespace v8 {
namespace internal {
namespace compiler {
-#define NATIVE_TYPES(V) \
- V(Int8) \
- V(Uint8) \
- V(Int16) \
- V(Uint16) \
- V(Int32) \
- V(Uint32) \
- V(Float32) \
- V(Float64)
-
-enum LazyCachedType {
- kNumberFunc0,
- kNumberFunc1,
- kNumberFunc2,
- kImulFunc,
- kClz32Func,
- kArrayBufferFunc,
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- k##Type, k##Type##Array, k##Type##ArrayFunc,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- kNumLazyCachedTypes
-};
-
+class TyperCache final {
+ private:
+ // This has to be first for the initialization magic to work.
+ Zone zone_;
-// Constructs and caches types lazily.
-// TODO(turbofan): these types could be globally cached or cached per isolate.
-class LazyTypeCache final : public ZoneObject {
public:
- explicit LazyTypeCache(Isolate* isolate, Zone* zone)
- : isolate_(isolate), zone_(zone) {
- memset(cache_, 0, sizeof(cache_));
- }
-
- inline Type* Get(LazyCachedType type) {
- int index = static_cast<int>(type);
- DCHECK(index < kNumLazyCachedTypes);
- if (cache_[index] == NULL) cache_[index] = Create(type);
- return cache_[index];
- }
+ TyperCache() = default;
+
+ Type* const kInt8 =
+ CreateNative(CreateRange<int8_t>(), Type::UntaggedSigned8());
+ Type* const kUint8 =
+ CreateNative(CreateRange<uint8_t>(), Type::UntaggedUnsigned8());
+ Type* const kUint8Clamped = kUint8;
+ Type* const kInt16 =
+ CreateNative(CreateRange<int16_t>(), Type::UntaggedSigned16());
+ Type* const kUint16 =
+ CreateNative(CreateRange<uint16_t>(), Type::UntaggedUnsigned16());
+ Type* const kInt32 = CreateNative(Type::Signed32(), Type::UntaggedSigned32());
+ Type* const kUint32 =
+ CreateNative(Type::Unsigned32(), Type::UntaggedUnsigned32());
+ Type* const kFloat32 = CreateNative(Type::Number(), Type::UntaggedFloat32());
+ Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
+
+ Type* const kSingletonZero = CreateRange(0.0, 0.0);
+ Type* const kSingletonOne = CreateRange(1.0, 1.0);
+ Type* const kZeroOrOne = CreateRange(0.0, 1.0);
+ Type* const kZeroish =
+ Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
+ Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
+ Type* const kWeakint = Type::Union(kInteger, Type::MinusZeroOrNaN(), zone());
+ Type* const kWeakintFunc1 = Type::Function(kWeakint, Type::Number(), zone());
+
+ Type* const kRandomFunc0 = Type::Function(Type::OrderedNumber(), zone());
+ Type* const kAnyFunc0 = Type::Function(Type::Any(), zone());
+ Type* const kAnyFunc1 = Type::Function(Type::Any(), Type::Any(), zone());
+ Type* const kAnyFunc2 =
+ Type::Function(Type::Any(), Type::Any(), Type::Any(), zone());
+ Type* const kAnyFunc3 = Type::Function(Type::Any(), Type::Any(), Type::Any(),
+ Type::Any(), zone());
+ Type* const kNumberFunc0 = Type::Function(Type::Number(), zone());
+ Type* const kNumberFunc1 =
+ Type::Function(Type::Number(), Type::Number(), zone());
+ Type* const kNumberFunc2 =
+ Type::Function(Type::Number(), Type::Number(), Type::Number(), zone());
+ Type* const kImulFunc = Type::Function(Type::Signed32(), Type::Integral32(),
+ Type::Integral32(), zone());
+ Type* const kClz32Func =
+ Type::Function(CreateRange(0, 32), Type::Number(), zone());
+
+#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
+ Type* const k##TypeName##Array = CreateArray(k##TypeName);
+ TYPED_ARRAYS(TYPED_ARRAY)
+#undef TYPED_ARRAY
private:
- Type* Create(LazyCachedType type) {
- switch (type) {
- case kInt8:
- return CreateNative(CreateRange<int8_t>(), Type::UntaggedSigned8());
- case kUint8:
- return CreateNative(CreateRange<uint8_t>(), Type::UntaggedUnsigned8());
- case kInt16:
- return CreateNative(CreateRange<int16_t>(), Type::UntaggedSigned16());
- case kUint16:
- return CreateNative(CreateRange<uint16_t>(),
- Type::UntaggedUnsigned16());
- case kInt32:
- return CreateNative(Type::Signed32(), Type::UntaggedSigned32());
- case kUint32:
- return CreateNative(Type::Unsigned32(), Type::UntaggedUnsigned32());
- case kFloat32:
- return CreateNative(Type::Number(), Type::UntaggedFloat32());
- case kFloat64:
- return CreateNative(Type::Number(), Type::UntaggedFloat64());
- case kUint8Clamped:
- return Get(kUint8);
- case kNumberFunc0:
- return Type::Function(Type::Number(), zone());
- case kNumberFunc1:
- return Type::Function(Type::Number(), Type::Number(), zone());
- case kNumberFunc2:
- return Type::Function(Type::Number(), Type::Number(), Type::Number(),
- zone());
- case kImulFunc:
- return Type::Function(Type::Signed32(), Type::Integral32(),
- Type::Integral32(), zone());
- case kClz32Func:
- return Type::Function(CreateRange(0, 32), Type::Number(), zone());
- case kArrayBufferFunc:
- return Type::Function(Type::Object(zone()), Type::Unsigned32(), zone());
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case k##Type##Array: \
- return CreateArray(Get(k##Type)); \
- case k##Type##ArrayFunc: \
- return CreateArrayFunction(Get(k##Type##Array));
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- case kNumLazyCachedTypes:
- break;
- }
- UNREACHABLE();
- return NULL;
- }
-
- Type* CreateArray(Type* element) const {
- return Type::Array(element, zone());
- }
+ Type* CreateArray(Type* element) { return Type::Array(element, zone()); }
- Type* CreateArrayFunction(Type* array) const {
+ Type* CreateArrayFunction(Type* array) {
Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
Type* arg3 = arg2;
return Type::Function(array, arg1, arg2, arg3, zone());
}
- Type* CreateNative(Type* semantic, Type* representation) const {
+ Type* CreateNative(Type* semantic, Type* representation) {
return Type::Intersect(semantic, representation, zone());
}
template <typename T>
- Type* CreateRange() const {
+ Type* CreateRange() {
return CreateRange(std::numeric_limits<T>::min(),
std::numeric_limits<T>::max());
}
- Type* CreateRange(double min, double max) const {
+ Type* CreateRange(double min, double max) {
return Type::Range(min, max, zone());
}
- Factory* factory() const { return isolate()->factory(); }
- Isolate* isolate() const { return isolate_; }
- Zone* zone() const { return zone_; }
-
- Type* cache_[kNumLazyCachedTypes];
- Isolate* isolate_;
- Zone* zone_;
+ Zone* zone() { return &zone_; }
};
+namespace {
+
+base::LazyInstance<TyperCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+
class Typer::Decorator final : public GraphDecorator {
public:
explicit Decorator(Typer* typer) : typer_(typer) {}
- void Decorate(Node* node, bool incomplete) final;
+ void Decorate(Node* node) final;
private:
- Typer* typer_;
+ Typer* const typer_;
};
-Typer::Typer(Isolate* isolate, Graph* graph, MaybeHandle<Context> context)
+Typer::Typer(Isolate* isolate, Graph* graph, Type::FunctionType* function_type)
: isolate_(isolate),
graph_(graph),
- context_(context),
- decorator_(NULL),
- cache_(new (graph->zone()) LazyTypeCache(isolate, graph->zone())) {
+ function_type_(function_type),
+ decorator_(nullptr),
+ cache_(kCache.Get()) {
Zone* zone = this->zone();
- Factory* f = isolate->factory();
+ Factory* const factory = isolate->factory();
- Handle<Object> infinity = f->NewNumber(+V8_INFINITY);
- Handle<Object> minusinfinity = f->NewNumber(-V8_INFINITY);
-
- Type* number = Type::Number();
- Type* signed32 = Type::Signed32();
- Type* unsigned32 = Type::Unsigned32();
- Type* nan_or_minuszero = Type::Union(Type::NaN(), Type::MinusZero(), zone);
+ Type* infinity = Type::Constant(factory->infinity_value(), zone);
+ Type* minus_infinity = Type::Constant(factory->minus_infinity_value(), zone);
Type* truncating_to_zero =
- Type::Union(Type::Union(Type::Constant(infinity, zone),
- Type::Constant(minusinfinity, zone), zone),
- nan_or_minuszero, zone);
-
- boolean_or_number = Type::Union(Type::Boolean(), Type::Number(), zone);
- undefined_or_null = Type::Union(Type::Undefined(), Type::Null(), zone);
- undefined_or_number = Type::Union(Type::Undefined(), Type::Number(), zone);
- singleton_false = Type::Constant(f->false_value(), zone);
- singleton_true = Type::Constant(f->true_value(), zone);
- singleton_zero = Type::Range(0.0, 0.0, zone);
- singleton_one = Type::Range(1.0, 1.0, zone);
- zero_or_one = Type::Union(singleton_zero, singleton_one, zone);
- zeroish = Type::Union(singleton_zero, nan_or_minuszero, zone);
- signed32ish = Type::Union(signed32, truncating_to_zero, zone);
- unsigned32ish = Type::Union(unsigned32, truncating_to_zero, zone);
- falsish = Type::Union(Type::Undetectable(),
- Type::Union(Type::Union(singleton_false, zeroish, zone),
- undefined_or_null, zone),
- zone);
- truish = Type::Union(
- singleton_true,
+ Type::Union(Type::Union(infinity, minus_infinity, zone),
+ Type::MinusZeroOrNaN(), zone);
+
+ singleton_false_ = Type::Constant(factory->false_value(), zone);
+ singleton_true_ = Type::Constant(factory->true_value(), zone);
+ signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
+ unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
+ falsish_ = Type::Union(
+ Type::Undetectable(),
+ Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
+ Type::NullOrUndefined(), zone),
+ zone);
+ truish_ = Type::Union(
+ singleton_true_,
Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone), zone);
- integer = Type::Range(-V8_INFINITY, V8_INFINITY, zone);
- weakint = Type::Union(integer, nan_or_minuszero, zone);
-
- number_fun0_ = Type::Function(number, zone);
- number_fun1_ = Type::Function(number, number, zone);
- number_fun2_ = Type::Function(number, number, number, zone);
-
- weakint_fun1_ = Type::Function(weakint, number, zone);
- random_fun_ = Type::Function(Type::OrderedNumber(), zone);
decorator_ = new (zone) Decorator(this);
graph_->AddDecorator(decorator_);
@@ -240,7 +184,6 @@ class Typer::Visitor : public Reducer {
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
- DECLARE_CASE(Dead)
DECLARE_CASE(Loop)
DECLARE_CASE(Branch)
DECLARE_CASE(IfTrue)
@@ -285,7 +228,6 @@ class Typer::Visitor : public Reducer {
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
- DECLARE_CASE(Dead)
DECLARE_CASE(Loop)
DECLARE_CASE(Branch)
DECLARE_CASE(IfTrue)
@@ -314,7 +256,6 @@ class Typer::Visitor : public Reducer {
private:
Typer* typer_;
- MaybeHandle<Context> context_;
ZoneSet<NodeId> weakened_nodes_;
#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
@@ -339,7 +280,6 @@ class Typer::Visitor : public Reducer {
Zone* zone() { return typer_->zone(); }
Isolate* isolate() { return typer_->isolate(); }
Graph* graph() { return typer_->graph(); }
- MaybeHandle<Context> context() { return typer_->context(); }
void SetWeakened(NodeId node_id) { weakened_nodes_.insert(node_id); }
bool IsWeakened(NodeId node_id) {
@@ -384,6 +324,7 @@ class Typer::Visitor : public Reducer {
#undef DECLARE_METHOD
static Type* JSUnaryNotTyper(Type*, Typer*);
+ static Type* JSTypeOfTyper(Type*, Typer*);
static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
static Type* JSCallFunctionTyper(Type*, Typer*);
@@ -416,37 +357,19 @@ class Typer::Visitor : public Reducer {
};
-void Typer::Run() {
- {
- // TODO(titzer): this is a hack. Reset types for interior nodes first.
- NodeDeque deque(zone());
- NodeMarker<bool> marked(graph(), 2);
- deque.push_front(graph()->end());
- marked.Set(graph()->end(), true);
- while (!deque.empty()) {
- Node* node = deque.front();
- deque.pop_front();
- // TODO(titzer): there shouldn't be a need to retype constants.
- if (node->op()->ValueOutputCount() > 0)
- NodeProperties::RemoveBounds(node);
- for (Node* input : node->inputs()) {
- if (!marked.Get(input)) {
- marked.Set(input, true);
- deque.push_back(input);
- }
- }
- }
- }
+void Typer::Run() { Run(NodeVector(zone())); }
+
+void Typer::Run(const NodeVector& roots) {
Visitor visitor(this);
- GraphReducer graph_reducer(graph(), zone());
+ GraphReducer graph_reducer(zone(), graph());
graph_reducer.AddReducer(&visitor);
+ for (Node* const root : roots) graph_reducer.ReduceNode(root);
graph_reducer.ReduceGraph();
}
-void Typer::Decorator::Decorate(Node* node, bool incomplete) {
- if (incomplete) return;
+void Typer::Decorator::Decorate(Node* node) {
if (node->op()->ValueOutputCount() > 0) {
// Only eagerly type-decorate nodes with known input types.
// Other cases will generally require a proper fixpoint iteration with Run.
@@ -507,8 +430,8 @@ Bounds Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
Type* Typer::Visitor::Invert(Type* type, Typer* t) {
DCHECK(type->Is(Type::Boolean()));
DCHECK(type->IsInhabited());
- if (type->Is(t->singleton_false)) return t->singleton_true;
- if (type->Is(t->singleton_true)) return t->singleton_false;
+ if (type->Is(t->singleton_false_)) return t->singleton_true_;
+ if (type->Is(t->singleton_true_)) return t->singleton_false_;
return type;
}
@@ -527,17 +450,17 @@ Type* Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
if ((outcome & kComparisonFalse) != 0 ||
(outcome & kComparisonUndefined) != 0) {
return (outcome & kComparisonTrue) != 0 ? Type::Boolean()
- : t->singleton_false;
+ : t->singleton_false_;
}
// Type should be non empty, so we know it should be true.
DCHECK((outcome & kComparisonTrue) != 0);
- return t->singleton_true;
+ return t->singleton_true_;
}
Type* Typer::Visitor::Rangify(Type* type, Typer* t) {
if (type->IsRange()) return type; // Shortcut.
- if (!type->Is(t->integer) && !type->Is(Type::Integral32())) {
+ if (!type->Is(t->cache_.kInteger)) {
return type; // Give up on non-integer types.
}
double min = type->Min();
@@ -565,10 +488,10 @@ Type* Typer::Visitor::ToPrimitive(Type* type, Typer* t) {
Type* Typer::Visitor::ToBoolean(Type* type, Typer* t) {
if (type->Is(Type::Boolean())) return type;
- if (type->Is(t->falsish)) return t->singleton_false;
- if (type->Is(t->truish)) return t->singleton_true;
+ if (type->Is(t->falsish_)) return t->singleton_false_;
+ if (type->Is(t->truish_)) return t->singleton_true_;
if (type->Is(Type::PlainNumber()) && (type->Max() < 0 || 0 < type->Min())) {
- return t->singleton_true; // Ruled out nan, -0 and +0.
+ return t->singleton_true_; // Ruled out nan, -0 and +0.
}
return Type::Boolean();
}
@@ -576,21 +499,21 @@ Type* Typer::Visitor::ToBoolean(Type* type, Typer* t) {
Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
if (type->Is(Type::Number())) return type;
- if (type->Is(Type::Null())) return t->singleton_zero;
- if (type->Is(Type::Undefined())) return Type::NaN();
- if (type->Is(t->undefined_or_null)) {
- return Type::Union(Type::NaN(), t->singleton_zero, t->zone());
+ if (type->Is(Type::NullOrUndefined())) {
+ if (type->Is(Type::Null())) return t->cache_.kSingletonZero;
+ if (type->Is(Type::Undefined())) return Type::NaN();
+ return Type::Union(Type::NaN(), t->cache_.kSingletonZero, t->zone());
}
- if (type->Is(t->undefined_or_number)) {
+ if (type->Is(Type::NumberOrUndefined())) {
return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
Type::NaN(), t->zone());
}
- if (type->Is(t->singleton_false)) return t->singleton_zero;
- if (type->Is(t->singleton_true)) return t->singleton_one;
- if (type->Is(Type::Boolean())) return t->zero_or_one;
- if (type->Is(t->boolean_or_number)) {
+ if (type->Is(t->singleton_false_)) return t->cache_.kSingletonZero;
+ if (type->Is(t->singleton_true_)) return t->cache_.kSingletonOne;
+ if (type->Is(Type::Boolean())) return t->cache_.kZeroOrOne;
+ if (type->Is(Type::BooleanOrNumber())) {
return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
- t->zero_or_one, t->zone());
+ t->cache_.kZeroOrOne, t->zone());
}
return Type::Number();
}
@@ -605,10 +528,11 @@ Type* Typer::Visitor::ToString(Type* type, Typer* t) {
Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
// TODO(neis): DCHECK(type->Is(Type::Number()));
if (type->Is(Type::Signed32())) return type;
- if (type->Is(t->zeroish)) return t->singleton_zero;
- if (type->Is(t->signed32ish)) {
- return Type::Intersect(Type::Union(type, t->singleton_zero, t->zone()),
- Type::Signed32(), t->zone());
+ if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
+ if (type->Is(t->signed32ish_)) {
+ return Type::Intersect(
+ Type::Union(type, t->cache_.kSingletonZero, t->zone()),
+ Type::Signed32(), t->zone());
}
return Type::Signed32();
}
@@ -617,10 +541,11 @@ Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
// TODO(neis): DCHECK(type->Is(Type::Number()));
if (type->Is(Type::Unsigned32())) return type;
- if (type->Is(t->zeroish)) return t->singleton_zero;
- if (type->Is(t->unsigned32ish)) {
- return Type::Intersect(Type::Union(type, t->singleton_zero, t->zone()),
- Type::Unsigned32(), t->zone());
+ if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
+ if (type->Is(t->unsigned32ish_)) {
+ return Type::Intersect(
+ Type::Union(type, t->cache_.kSingletonZero, t->zone()),
+ Type::Unsigned32(), t->zone());
}
return Type::Unsigned32();
}
@@ -646,21 +571,17 @@ Bounds Typer::Visitor::TypeIfException(Node* node) {
Bounds Typer::Visitor::TypeParameter(Node* node) {
+ int param = OpParameter<int>(node);
+ Type::FunctionType* function_type = typer_->function_type();
+ if (function_type != nullptr && param >= 0 &&
+ param < static_cast<int>(function_type->Arity())) {
+ return Bounds(Type::None(), function_type->Parameter(param));
+ }
return Bounds::Unbounded(zone());
}
Bounds Typer::Visitor::TypeOsrValue(Node* node) {
- if (node->InputAt(0)->opcode() == IrOpcode::kOsrLoopEntry) {
- // Before deconstruction, OSR values have type {None} to avoid polluting
- // the types of phis and other nodes in the graph.
- return Bounds(Type::None(), Type::None());
- }
- if (NodeProperties::IsTyped(node)) {
- // After deconstruction, OSR values may have had a type explicitly set.
- return NodeProperties::GetBounds(node);
- }
- // Otherwise, be conservative.
return Bounds::Unbounded(zone());
}
@@ -774,23 +695,28 @@ Bounds Typer::Visitor::TypeProjection(Node* node) {
}
+Bounds Typer::Visitor::TypeDead(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
// JS comparison operators.
Type* Typer::Visitor::JSEqualTyper(Type* lhs, Type* rhs, Typer* t) {
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false;
- if (lhs->Is(t->undefined_or_null) && rhs->Is(t->undefined_or_null)) {
- return t->singleton_true;
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false_;
+ if (lhs->Is(Type::NullOrUndefined()) && rhs->Is(Type::NullOrUndefined())) {
+ return t->singleton_true_;
}
if (lhs->Is(Type::Number()) && rhs->Is(Type::Number()) &&
(lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
- return t->singleton_false;
+ return t->singleton_false_;
}
if (lhs->IsConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
// TODO(neis): Extend this to Range(x,x), MinusZero, ...?
- return t->singleton_true;
+ return t->singleton_true_;
}
return Type::Boolean();
}
@@ -814,16 +740,16 @@ static Type* JSType(Type* type) {
Type* Typer::Visitor::JSStrictEqualTyper(Type* lhs, Type* rhs, Typer* t) {
- if (!JSType(lhs)->Maybe(JSType(rhs))) return t->singleton_false;
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false;
+ if (!JSType(lhs)->Maybe(JSType(rhs))) return t->singleton_false_;
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false_;
if (lhs->Is(Type::Number()) && rhs->Is(Type::Number()) &&
(lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
- return t->singleton_false;
+ return t->singleton_false_;
}
if (lhs->IsConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
- return t->singleton_true;
+ return t->singleton_true_;
}
return Type::Boolean();
}
@@ -1164,13 +1090,13 @@ Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
// the discontinuity makes it too complicated. Note that even if none of the
// "results" above is nan, the actual result may still be, so we have to do a
// different check:
- bool maybe_nan = (lhs->Maybe(t->singleton_zero) &&
+ bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
(rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
- (rhs->Maybe(t->singleton_zero) &&
+ (rhs->Maybe(t->cache_.kSingletonZero) &&
(lmin == -V8_INFINITY || lmax == +V8_INFINITY));
- if (maybe_nan) return t->weakint; // Giving up.
- bool maybe_minuszero = (lhs->Maybe(t->singleton_zero) && rmin < 0) ||
- (rhs->Maybe(t->singleton_zero) && lmin < 0);
+ if (maybe_nan) return t->cache_.kWeakint; // Giving up.
+ bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
+ (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
Type* range =
Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
@@ -1196,7 +1122,7 @@ Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
// Division is tricky, so all we do is try ruling out nan.
// TODO(neis): try ruling out -0 as well?
bool maybe_nan =
- lhs->Maybe(Type::NaN()) || rhs->Maybe(t->zeroish) ||
+ lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
(rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
return maybe_nan ? Type::Number() : Type::OrderedNumber();
@@ -1241,7 +1167,7 @@ Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
rhs = ToNumber(rhs, t);
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- if (lhs->Maybe(Type::NaN()) || rhs->Maybe(t->zeroish) ||
+ if (lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) {
// Result maybe NaN.
return Type::Number();
@@ -1269,8 +1195,25 @@ Bounds Typer::Visitor::TypeJSUnaryNot(Node* node) {
}
+Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
+ Factory* const f = t->isolate()->factory();
+ if (type->Is(Type::Boolean())) {
+ return Type::Constant(f->boolean_string(), t->zone());
+ } else if (type->Is(Type::Number())) {
+ return Type::Constant(f->number_string(), t->zone());
+ } else if (type->Is(Type::Symbol())) {
+ return Type::Constant(f->symbol_string(), t->zone());
+ } else if (type->Is(Type::Union(Type::Undefined(), Type::Undetectable()))) {
+ return Type::Constant(f->undefined_string(), t->zone());
+ } else if (type->Is(Type::Null())) {
+ return Type::Constant(f->object_string(), t->zone());
+ }
+ return Type::InternalizedString();
+}
+
+
Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
- return Bounds(Type::None(zone()), Type::InternalizedString(zone()));
+ return TypeUnaryOp(node, JSTypeOfTyper);
}
@@ -1345,6 +1288,11 @@ Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
}
+Bounds Typer::Visitor::TypeJSLoadGlobal(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
// Returns a somewhat larger range if we previously assigned
// a (smaller) range to this node. This is used to speed up
// the fixpoint calculation in case there appears to be a loop
@@ -1369,15 +1317,14 @@ Type* Typer::Visitor::Weaken(Node* node, Type* current_type,
STATIC_ASSERT(arraysize(kWeakenMinLimits) == arraysize(kWeakenMaxLimits));
// If the types have nothing to do with integers, return the types.
- if (!previous_type->Maybe(typer_->integer)) {
+ Type* const integer = typer_->cache_.kInteger;
+ if (!previous_type->Maybe(integer)) {
return current_type;
}
- DCHECK(current_type->Maybe(typer_->integer));
+ DCHECK(current_type->Maybe(integer));
- Type* current_integer =
- Type::Intersect(current_type, typer_->integer, zone());
- Type* previous_integer =
- Type::Intersect(previous_type, typer_->integer, zone());
+ Type* current_integer = Type::Intersect(current_type, integer, zone());
+ Type* previous_integer = Type::Intersect(previous_type, integer, zone());
// Once we start weakening a node, we should always weaken.
if (!IsWeakened(node->id())) {
@@ -1398,7 +1345,7 @@ Type* Typer::Visitor::Weaken(Node* node, Type* current_type,
// Find the closest lower entry in the list of allowed
// minima (or negative infinity if there is no such entry).
if (current_min != previous_integer->Min()) {
- new_min = typer_->integer->AsRange()->Min();
+ new_min = -V8_INFINITY;
for (double const min : kWeakenMinLimits) {
if (min <= current_min) {
new_min = min;
@@ -1412,7 +1359,7 @@ Type* Typer::Visitor::Weaken(Node* node, Type* current_type,
// Find the closest greater entry in the list of allowed
// maxima (or infinity if there is no such entry).
if (current_max != previous_integer->Max()) {
- new_max = typer_->integer->AsRange()->Max();
+ new_max = V8_INFINITY;
for (double const max : kWeakenMaxLimits) {
if (max >= current_max) {
new_max = max;
@@ -1439,6 +1386,12 @@ Bounds Typer::Visitor::TypeJSStoreNamed(Node* node) {
}
+Bounds Typer::Visitor::TypeJSStoreGlobal(Node* node) {
+ UNREACHABLE();
+ return Bounds();
+}
+
+
Bounds Typer::Visitor::TypeJSDeleteProperty(Node* node) {
return Bounds(Type::None(zone()), Type::Boolean(zone()));
}
@@ -1461,12 +1414,9 @@ Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
ContextAccess access = OpParameter<ContextAccess>(node);
Bounds outer = Operand(node, 0);
Type* context_type = outer.upper;
- Type* upper = (access.index() == Context::GLOBAL_OBJECT_INDEX)
- ? Type::GlobalObject()
- : Type::Any();
if (context_type->Is(Type::None())) {
// Upper bound of context is not yet known.
- return Bounds(Type::None(), upper);
+ return Bounds(Type::None(), Type::Any());
}
DCHECK(context_type->Maybe(Type::Internal()));
@@ -1496,7 +1446,7 @@ Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
handle(context.ToHandleChecked()->get(static_cast<int>(access.index())),
isolate()));
}
- return Bounds(lower, upper);
+ return Bounds(lower, Type::Any());
}
@@ -1506,6 +1456,16 @@ Bounds Typer::Visitor::TypeJSStoreContext(Node* node) {
}
+Bounds Typer::Visitor::TypeJSLoadDynamicGlobal(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSLoadDynamicContext(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
Bounds Typer::Visitor::WrapContextBoundsForInput(Node* node) {
Bounds outer = BoundsOrNone(NodeProperties::GetContextInput(node));
if (outer.upper->Is(Type::None())) {
@@ -1576,6 +1536,9 @@ Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineIsSmi:
case Runtime::kInlineIsNonNegativeSmi:
case Runtime::kInlineIsArray:
+ case Runtime::kInlineIsDate:
+ case Runtime::kInlineIsTypedArray:
+ case Runtime::kInlineIsMinusZero:
case Runtime::kInlineIsFunction:
case Runtime::kInlineIsRegExp:
return Bounds(Type::None(zone()), Type::Boolean(zone()));
@@ -1583,6 +1546,7 @@ Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineDoubleHi:
return Bounds(Type::None(zone()), Type::Signed32());
case Runtime::kInlineConstructDouble:
+ case Runtime::kInlineDateField:
case Runtime::kInlineMathFloor:
case Runtime::kInlineMathSqrt:
case Runtime::kInlineMathAcos:
@@ -1601,6 +1565,30 @@ Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
}
+Bounds Typer::Visitor::TypeJSForInNext(Node* node) {
+ return Bounds(Type::None(zone()),
+ Type::Union(Type::Name(), Type::Undefined(), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSForInPrepare(Node* node) {
+ // TODO(bmeurer): Return a tuple type here.
+ return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSForInDone(Node* node) {
+ return Bounds(Type::None(zone()), Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSForInStep(Node* node) {
+ STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
+ return Bounds(Type::None(zone()),
+ Type::Range(1, FixedArray::kMaxLength + 1, zone()));
+}
+
+
Bounds Typer::Visitor::TypeJSStackCheck(Node* node) {
return Bounds::Unbounded(zone());
}
@@ -1659,6 +1647,21 @@ Bounds Typer::Visitor::TypeNumberModulus(Node* node) {
}
+Bounds Typer::Visitor::TypeNumberShiftLeft(Node* node) {
+ return Bounds(Type::None(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberShiftRight(Node* node) {
+ return Bounds(Type::None(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
+ return Bounds(Type::None(zone()), Type::Unsigned32(zone()));
+}
+
+
Bounds Typer::Visitor::TypeNumberToInt32(Node* node) {
return TypeUnaryOp(node, NumberToInt32);
}
@@ -1694,11 +1697,6 @@ Bounds Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
}
-Bounds Typer::Visitor::TypeStringAdd(Node* node) {
- return Bounds(Type::None(zone()), Type::String(zone()));
-}
-
-
namespace {
Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
@@ -1799,7 +1797,7 @@ Bounds Typer::Visitor::TypeLoadBuffer(Node* node) {
switch (BufferAccessOf(node->op()).external_array_type()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return Bounds(typer_->cache_->Get(k##Type));
+ return Bounds(typer_->cache_.k##Type);
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
@@ -2059,6 +2057,11 @@ Bounds Typer::Visitor::TypeUint64LessThan(Node* node) {
}
+Bounds Typer::Visitor::TypeUint64LessThanOrEqual(Node* node) {
+ return Bounds(Type::Boolean());
+}
+
+
Bounds Typer::Visitor::TypeUint64Mod(Node* node) {
return Bounds(Type::Internal());
}
@@ -2282,6 +2285,11 @@ Bounds Typer::Visitor::TypeLoadStackPointer(Node* node) {
}
+Bounds Typer::Visitor::TypeLoadFramePointer(Node* node) {
+ return Bounds(Type::Internal());
+}
+
+
Bounds Typer::Visitor::TypeCheckedLoad(Node* node) {
return Bounds::Unbounded(zone());
}
@@ -2301,13 +2309,11 @@ Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
if (JSFunction::cast(*value)->shared()->HasBuiltinFunctionId()) {
switch (JSFunction::cast(*value)->shared()->builtin_function_id()) {
case kMathRandom:
- return typer_->random_fun_;
+ return typer_->cache_.kRandomFunc0;
case kMathFloor:
- return typer_->weakint_fun1_;
case kMathRound:
- return typer_->weakint_fun1_;
case kMathCeil:
- return typer_->weakint_fun1_;
+ return typer_->cache_.kWeakintFunc1;
// Unary math functions.
case kMathAbs: // TODO(rossberg): can't express overloading
case kMathLog:
@@ -2320,48 +2326,47 @@ Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
case kMathAsin:
case kMathAtan:
case kMathFround:
- return typer_->cache_->Get(kNumberFunc1);
+ return typer_->cache_.kNumberFunc1;
// Binary math functions.
case kMathAtan2:
case kMathPow:
case kMathMax:
case kMathMin:
- return typer_->cache_->Get(kNumberFunc2);
+ return typer_->cache_.kNumberFunc2;
case kMathImul:
- return typer_->cache_->Get(kImulFunc);
+ return typer_->cache_.kImulFunc;
case kMathClz32:
- return typer_->cache_->Get(kClz32Func);
+ return typer_->cache_.kClz32Func;
default:
break;
}
- } else if (JSFunction::cast(*value)->IsBuiltin() && !context().is_null()) {
- Handle<Context> native =
- handle(context().ToHandleChecked()->native_context(), isolate());
- if (*value == native->array_buffer_fun()) {
- return typer_->cache_->Get(kArrayBufferFunc);
- } else if (*value == native->int8_array_fun()) {
- return typer_->cache_->Get(kInt8ArrayFunc);
- } else if (*value == native->int16_array_fun()) {
- return typer_->cache_->Get(kInt16ArrayFunc);
- } else if (*value == native->int32_array_fun()) {
- return typer_->cache_->Get(kInt32ArrayFunc);
- } else if (*value == native->uint8_array_fun()) {
- return typer_->cache_->Get(kUint8ArrayFunc);
- } else if (*value == native->uint16_array_fun()) {
- return typer_->cache_->Get(kUint16ArrayFunc);
- } else if (*value == native->uint32_array_fun()) {
- return typer_->cache_->Get(kUint32ArrayFunc);
- } else if (*value == native->float32_array_fun()) {
- return typer_->cache_->Get(kFloat32ArrayFunc);
- } else if (*value == native->float64_array_fun()) {
- return typer_->cache_->Get(kFloat64ArrayFunc);
+ }
+ int const arity =
+ JSFunction::cast(*value)->shared()->internal_formal_parameter_count();
+ switch (arity) {
+ case SharedFunctionInfo::kDontAdaptArgumentsSentinel:
+ // Some smart optimization at work... &%$!&@+$!
+ break;
+ case 0:
+ return typer_->cache_.kAnyFunc0;
+ case 1:
+ return typer_->cache_.kAnyFunc1;
+ case 2:
+ return typer_->cache_.kAnyFunc2;
+ case 3:
+ return typer_->cache_.kAnyFunc3;
+ default: {
+ DCHECK_LT(3, arity);
+ Type** const params = zone()->NewArray<Type*>(arity);
+ std::fill(&params[0], &params[arity], Type::Any(zone()));
+ return Type::Function(Type::Any(zone()), arity, params, zone());
}
}
} else if (value->IsJSTypedArray()) {
switch (JSTypedArray::cast(*value)->type()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return typer_->cache_->Get(k##Type##Array);
+ return typer_->cache_.k##Type##Array;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 4c04ddb973..f5ef4f1553 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -13,54 +13,40 @@ namespace internal {
namespace compiler {
// Forward declarations.
-class LazyTypeCache;
+class TyperCache;
class Typer {
public:
- Typer(Isolate* isolate, Graph* graph, MaybeHandle<Context> context);
+ Typer(Isolate* isolate, Graph* graph,
+ Type::FunctionType* function_type = nullptr);
~Typer();
void Run();
-
- Graph* graph() { return graph_; }
- MaybeHandle<Context> context() { return context_; }
- Zone* zone() { return graph_->zone(); }
- Isolate* isolate() { return isolate_; }
+ // TODO(bmeurer,jarin): Remove this once we have a notion of "roots" on Graph.
+ void Run(const ZoneVector<Node*>& roots);
private:
class Visitor;
class Decorator;
- Isolate* isolate_;
- Graph* graph_;
- MaybeHandle<Context> context_;
- Decorator* decorator_;
+ Graph* graph() const { return graph_; }
+ Zone* zone() const { return graph()->zone(); }
+ Isolate* isolate() const { return isolate_; }
+ Type::FunctionType* function_type() const { return function_type_; }
- Zone* zone_;
- Type* boolean_or_number;
- Type* undefined_or_null;
- Type* undefined_or_number;
- Type* negative_signed32;
- Type* non_negative_signed32;
- Type* singleton_false;
- Type* singleton_true;
- Type* singleton_zero;
- Type* singleton_one;
- Type* zero_or_one;
- Type* zeroish;
- Type* signed32ish;
- Type* unsigned32ish;
- Type* falsish;
- Type* truish;
- Type* integer;
- Type* weakint;
- Type* number_fun0_;
- Type* number_fun1_;
- Type* number_fun2_;
- Type* weakint_fun1_;
- Type* random_fun_;
- LazyTypeCache* cache_;
+ Isolate* const isolate_;
+ Graph* const graph_;
+ Type::FunctionType* function_type_;
+ Decorator* decorator_;
+ TyperCache const& cache_;
+
+ Type* singleton_false_;
+ Type* singleton_true_;
+ Type* signed32ish_;
+ Type* unsigned32ish_;
+ Type* falsish_;
+ Type* truish_;
DISALLOW_COPY_AND_ASSIGN(Typer);
};
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index e7580037fb..690fd04577 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -98,6 +98,15 @@ class Verifier::Visitor {
FATAL(str.str().c_str());
}
}
+ void CheckOutput(Node* node, Node* use, int count, const char* kind) {
+ if (count <= 0) {
+ std::ostringstream str;
+ str << "GraphError: node #" << node->id() << ":" << *node->op()
+ << " does not produce " << kind << " output used by node #"
+ << use->id() << ":" << *use->op();
+ FATAL(str.str().c_str());
+ }
+ }
};
@@ -118,9 +127,9 @@ void Verifier::Visitor::Check(Node* node) {
for (int i = 0; i < frame_state_count; i++) {
Node* frame_state = NodeProperties::GetFrameStateInput(node, i);
CHECK(frame_state->opcode() == IrOpcode::kFrameState ||
- // kFrameState uses undefined as a sentinel.
+ // kFrameState uses Start as a sentinel.
(node->opcode() == IrOpcode::kFrameState &&
- frame_state->opcode() == IrOpcode::kHeapConstant));
+ frame_state->opcode() == IrOpcode::kStart));
CHECK(IsDefUseChainLinkPresent(frame_state, node));
CHECK(IsUseDefChainLinkPresent(frame_state, node));
}
@@ -128,7 +137,7 @@ void Verifier::Visitor::Check(Node* node) {
// Verify all value inputs actually produce a value.
for (int i = 0; i < value_count; ++i) {
Node* value = NodeProperties::GetValueInput(node, i);
- CHECK(value->op()->ValueOutputCount() > 0);
+ CheckOutput(value, node, value->op()->ValueOutputCount(), "value");
CHECK(IsDefUseChainLinkPresent(value, node));
CHECK(IsUseDefChainLinkPresent(value, node));
}
@@ -136,7 +145,7 @@ void Verifier::Visitor::Check(Node* node) {
// Verify all context inputs are value nodes.
for (int i = 0; i < context_count; ++i) {
Node* context = NodeProperties::GetContextInput(node);
- CHECK(context->op()->ValueOutputCount() > 0);
+ CheckOutput(context, node, context->op()->ValueOutputCount(), "context");
CHECK(IsDefUseChainLinkPresent(context, node));
CHECK(IsUseDefChainLinkPresent(context, node));
}
@@ -144,7 +153,7 @@ void Verifier::Visitor::Check(Node* node) {
// Verify all effect inputs actually have an effect.
for (int i = 0; i < effect_count; ++i) {
Node* effect = NodeProperties::GetEffectInput(node);
- CHECK(effect->op()->EffectOutputCount() > 0);
+ CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
CHECK(IsDefUseChainLinkPresent(effect, node));
CHECK(IsUseDefChainLinkPresent(effect, node));
}
@@ -152,7 +161,7 @@ void Verifier::Visitor::Check(Node* node) {
// Verify all control inputs are control nodes.
for (int i = 0; i < control_count; ++i) {
Node* control = NodeProperties::GetControlInput(node, i);
- CHECK(control->op()->ControlOutputCount() > 0);
+ CheckOutput(control, node, control->op()->ControlOutputCount(), "control");
CHECK(IsDefUseChainLinkPresent(control, node));
CHECK(IsUseDefChainLinkPresent(control, node));
}
@@ -186,6 +195,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kDead:
// Dead is never connected to the graph.
UNREACHABLE();
+ break;
case IrOpcode::kBranch: {
// Branch uses are IfTrue and IfFalse.
int count_true = 0, count_false = 0;
@@ -270,33 +280,35 @@ void Verifier::Visitor::Check(Node* node) {
CheckNotTyped(node);
break;
case IrOpcode::kDeoptimize:
- // TODO(rossberg): check successor is End
- // Type is empty.
- CheckNotTyped(node);
case IrOpcode::kReturn:
- // TODO(rossberg): check successor is End
- // Type is empty.
- CheckNotTyped(node);
- break;
case IrOpcode::kThrow:
- // TODO(rossberg): what are the constraints on these?
+ // Deoptimize, Return and Throw uses are End.
+ for (auto use : node->uses()) {
+ CHECK_EQ(IrOpcode::kEnd, use->opcode());
+ }
// Type is empty.
CheckNotTyped(node);
break;
case IrOpcode::kTerminate:
+ // Terminates take one loop and effect.
+ CHECK_EQ(1, control_count);
+ CHECK_EQ(1, effect_count);
+ CHECK_EQ(2, input_count);
CHECK_EQ(IrOpcode::kLoop,
NodeProperties::GetControlInput(node)->opcode());
+ // Terminate uses are End.
+ for (auto use : node->uses()) {
+ CHECK_EQ(IrOpcode::kEnd, use->opcode());
+ }
// Type is empty.
CheckNotTyped(node);
- CHECK_EQ(1, control_count);
- CHECK_EQ(1, effect_count);
- CHECK_EQ(2, input_count);
break;
case IrOpcode::kOsrNormalEntry:
case IrOpcode::kOsrLoopEntry:
- // Osr entries have
- CHECK_EQ(1, effect_count);
+ // Osr entries take one control and effect.
CHECK_EQ(1, control_count);
+ CHECK_EQ(1, effect_count);
+ CHECK_EQ(2, input_count);
// Type is empty.
CheckNotTyped(node);
break;
@@ -306,13 +318,13 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kParameter: {
// Parameters have the start node as inputs.
CHECK_EQ(1, input_count);
- CHECK_EQ(IrOpcode::kStart,
- NodeProperties::GetValueInput(node, 0)->opcode());
// Parameter has an input that produces enough values.
- int index = OpParameter<int>(node);
- Node* input = NodeProperties::GetValueInput(node, 0);
+ int const index = ParameterIndexOf(node->op());
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ CHECK_EQ(IrOpcode::kStart, start->opcode());
// Currently, parameter indices start at -1 instead of 0.
- CHECK_GT(input->op()->ValueOutputCount(), index + 1);
+ CHECK_LE(-1, index);
+ CHECK_LT(index + 1, start->op()->ValueOutputCount());
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
@@ -421,6 +433,10 @@ void Verifier::Visitor::Check(Node* node) {
}
case IrOpcode::kFrameState:
// TODO(jarin): what are the constraints on these?
+ CHECK_EQ(5, value_count);
+ CHECK_EQ(0, control_count);
+ CHECK_EQ(0, effect_count);
+ CHECK_EQ(6, input_count);
break;
case IrOpcode::kStateValues:
case IrOpcode::kTypedStateValues:
@@ -505,11 +521,13 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSLoadGlobal:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSStoreNamed:
+ case IrOpcode::kJSStoreGlobal:
// Type is empty.
CheckNotTyped(node);
break;
@@ -525,6 +543,8 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSLoadContext:
+ case IrOpcode::kJSLoadDynamicGlobal:
+ case IrOpcode::kJSLoadDynamicContext:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
@@ -558,6 +578,29 @@ void Verifier::Visitor::Check(Node* node) {
CheckUpperIs(node, Type::Any());
break;
+ case IrOpcode::kJSForInPrepare: {
+ // TODO(bmeurer): What are the constraints on thse?
+ CheckUpperIs(node, Type::Any());
+ break;
+ }
+ case IrOpcode::kJSForInDone: {
+ // TODO(bmeurer): OSR breaks this invariant, although the node is not user
+ // visible, so we know it is safe (fullcodegen has an unsigned smi there).
+ // CheckValueInputIs(node, 0, Type::UnsignedSmall());
+ break;
+ }
+ case IrOpcode::kJSForInNext: {
+ CheckUpperIs(node, Type::Union(Type::Name(), Type::Undefined()));
+ break;
+ }
+ case IrOpcode::kJSForInStep: {
+ // TODO(bmeurer): OSR breaks this invariant, although the node is not user
+ // visible, so we know it is safe (fullcodegen has an unsigned smi there).
+ // CheckValueInputIs(node, 0, Type::UnsignedSmall());
+ CheckUpperIs(node, Type::UnsignedSmall());
+ break;
+ }
+
case IrOpcode::kJSStackCheck:
// Type is empty.
CheckNotTyped(node);
@@ -594,6 +637,19 @@ void Verifier::Visitor::Check(Node* node) {
// TODO(rossberg): activate once we retype after opcode changes.
// CheckUpperIs(node, Type::Number());
break;
+ case IrOpcode::kNumberShiftLeft:
+ case IrOpcode::kNumberShiftRight:
+ // (Signed32, Unsigned32) -> Signed32
+ CheckValueInputIs(node, 0, Type::Signed32());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckUpperIs(node, Type::Signed32());
+ break;
+ case IrOpcode::kNumberShiftRightLogical:
+ // (Unsigned32, Unsigned32) -> Unsigned32
+ CheckValueInputIs(node, 0, Type::Unsigned32());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckUpperIs(node, Type::Unsigned32());
+ break;
case IrOpcode::kNumberToInt32:
// Number -> Signed32
CheckValueInputIs(node, 0, Type::Number());
@@ -617,12 +673,6 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::String());
CheckUpperIs(node, Type::Boolean());
break;
- case IrOpcode::kStringAdd:
- // (String, String) -> String
- CheckValueInputIs(node, 0, Type::String());
- CheckValueInputIs(node, 1, Type::String());
- CheckUpperIs(node, Type::String());
- break;
case IrOpcode::kReferenceEqual: {
// (Unique, Any) -> Boolean and
// (Any, Unique) -> Boolean
@@ -792,6 +842,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kUint64Div:
case IrOpcode::kUint64Mod:
case IrOpcode::kUint64LessThan:
+ case IrOpcode::kUint64LessThanOrEqual:
case IrOpcode::kFloat32Add:
case IrOpcode::kFloat32Sub:
case IrOpcode::kFloat32Mul:
@@ -833,6 +884,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
case IrOpcode::kLoadStackPointer:
+ case IrOpcode::kLoadFramePointer:
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
// TODO(rossberg): Check.
@@ -1100,6 +1152,6 @@ void ScheduleVerifier::Run(Schedule* schedule) {
}
}
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 0f1e959adc..bdce083201 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -38,7 +38,12 @@ class X64OperandConverter : public InstructionOperandConverter {
Operand OutputOperand() { return ToOperand(instr_->Output()); }
Immediate ToImmediate(InstructionOperand* operand) {
- return Immediate(ToConstant(operand).ToInt32());
+ Constant constant = ToConstant(operand);
+ if (constant.type() == Constant::kFloat64) {
+ DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
+ return Immediate(0);
+ }
+ return Immediate(constant.ToInt32());
}
Operand ToOperand(InstructionOperand* op, int extra = 0) {
@@ -533,13 +538,6 @@ void CodeGenerator::AssembleDeconstructActivationRecord() {
if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ movq(rsp, rbp);
__ popq(rbp);
- int32_t bytes_to_pop =
- descriptor->IsJSFunctionCall()
- ? static_cast<int32_t>(descriptor->JSParameterCount() *
- kPointerSize)
- : 0;
- __ popq(Operand(rsp, bytes_to_pop));
- __ addq(rsp, Immediate(bytes_to_pop));
}
}
@@ -597,6 +595,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters);
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
@@ -621,6 +635,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchStackPointer:
__ movq(i.OutputRegister(), rsp);
break;
+ case kArchFramePointer:
+ __ movq(i.OutputRegister(), rbp);
+ break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
@@ -1207,6 +1224,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
}
break;
+ case kX64Poke: {
+ int const slot = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
+ } else {
+ __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
+ }
+ break;
+ }
case kX64StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
@@ -1436,14 +1462,31 @@ void CodeGenerator::AssemblePrologue() {
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ pushq(rbp);
__ movq(rbp, rsp);
+ int register_save_area_size = 0;
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) { // Save callee-saved registers.
- int register_save_area_size = 0;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
if (!((1 << i) & saves)) continue;
__ pushq(Register::from_code(i));
register_save_area_size += kPointerSize;
}
+ }
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) { // Save callee-saved XMM registers.
+ const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ const int stack_size = saves_fp_count * 16;
+ // Adjust the stack pointer.
+ __ subp(rsp, Immediate(stack_size));
+ // Store the registers on the stack.
+ int slot_idx = 0;
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ if (!((1 << i) & saves_fp)) continue;
+ __ movdqu(Operand(rsp, 16 * slot_idx), XMMRegister::from_code(i));
+ slot_idx++;
+ }
+ register_save_area_size += stack_size;
+ }
+ if (register_save_area_size > 0) {
frame()->SetRegisterSaveAreaSize(register_save_area_size);
}
} else if (descriptor->IsJSFunctionCall()) {
@@ -1451,7 +1494,7 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else if (stack_slots > 0) {
+ } else if (needs_frame_) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
@@ -1488,8 +1531,22 @@ void CodeGenerator::AssembleReturn() {
if (stack_slots > 0) {
__ addq(rsp, Immediate(stack_slots * kPointerSize));
}
- const RegList saves = descriptor->CalleeSavedRegisters();
// Restore registers.
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ const int stack_size = saves_fp_count * 16;
+ // Load the registers from the stack.
+ int slot_idx = 0;
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ if (!((1 << i) & saves_fp)) continue;
+ __ movdqu(XMMRegister::from_code(i), Operand(rsp, 16 * slot_idx));
+ slot_idx++;
+ }
+ // Adjust the stack pointer.
+ __ addp(rsp, Immediate(stack_size));
+ }
+ const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
@@ -1504,15 +1561,27 @@ void CodeGenerator::AssembleReturn() {
__ popq(rbp); // Pop caller's frame pointer.
__ ret(0);
}
- } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
- __ popq(rbp); // Pop caller's frame pointer.
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Ret(pop_count * kPointerSize, rbx);
+ } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
+ __ popq(rbp); // Pop caller's frame pointer.
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : (info()->IsStub()
+ ? info()->code_stub()->GetStackParameterCount()
+ : 0);
+ if (pop_count == 0) {
+ __ Ret();
+ } else {
+ __ Ret(pop_count * kPointerSize, rbx);
+ }
+ }
} else {
- __ ret(0);
+ __ Ret();
}
}
@@ -1704,7 +1773,6 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
__ Nop(padding_size);
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 428b7dc186..2e10729954 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -114,6 +114,7 @@ namespace compiler {
V(X64Dec32) \
V(X64Inc32) \
V(X64Push) \
+ V(X64Poke) \
V(X64StoreWriteBarrier) \
V(X64StackCheck)
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 26d8960b91..6d7fca472e 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -27,6 +27,10 @@ class X64OperandGenerator final : public OperandGenerator {
const int64_t value = OpParameter<int64_t>(node);
return value == static_cast<int64_t>(static_cast<int32_t>(value));
}
+ case IrOpcode::kNumberConstant: {
+ const double value = OpParameter<double>(node);
+ return bit_cast<int64_t>(value) == 0;
+ }
default:
return false;
}
@@ -814,37 +818,23 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
}
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+namespace {
+
+void VisitRO(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
-void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
- X64OperandGenerator g(this);
- Node* value = node->InputAt(0);
- if (CanCover(node, value)) {
- switch (value->opcode()) {
- case IrOpcode::kWord64Sar:
- case IrOpcode::kWord64Shr: {
- Int64BinopMatcher m(value);
- if (m.right().Is(32)) {
- Emit(kX64Shr, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.TempImmediate(32));
- return;
- }
- break;
- }
- default:
- break;
- }
- }
- Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
+void VisitRR(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
}
-namespace {
-
void VisitFloatBinop(InstructionSelector* selector, Node* node,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
X64OperandGenerator g(selector);
@@ -868,10 +858,48 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
}
}
-
} // namespace
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ VisitRO(this, node, kSSEFloat64ToFloat32);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, node, kArchTruncateDoubleToI);
+ case TruncationMode::kRoundToZero:
+ return VisitRO(this, node, kSSEFloat64ToInt32);
+ }
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ X64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar:
+ case IrOpcode::kWord64Shr: {
+ Int64BinopMatcher m(value);
+ if (m.right().Is(32)) {
+ Emit(kX64Shr, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(32));
+ return;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
}
@@ -910,14 +938,12 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
void InstructionSelector::VisitFloat32Abs(Node* node) {
- X64OperandGenerator g(this);
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
}
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat32Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat32Sqrt);
}
@@ -980,37 +1006,22 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
void InstructionSelector::VisitFloat64Abs(Node* node) {
- X64OperandGenerator g(this);
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-namespace {
-
-void VisitRRFloat64(InstructionSelector* selector, InstructionCode opcode,
- Node* node) {
- X64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat64Sqrt);
}
-} // namespace
-
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundDown), node);
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundToZero),
- node);
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
@@ -1034,19 +1045,41 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, true);
- // Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
- Emit(kX64Push, g.NoOutput(), value);
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
+ if (Node* node = buffer.pushed_nodes[n]) {
+ int const slot = static_cast<int>(n);
+ InstructionOperand value =
+ g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
+ Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ // TODO(titzer): handle pushing double parameters.
+ InstructionOperand value =
+ g.CanBeImmediate(node)
+ ? g.UseImmediate(node)
+ : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ Emit(kX64Push, g.NoOutput(), value);
+ }
}
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
@@ -1054,17 +1087,21 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject;
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
@@ -1082,16 +1119,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
// TODO(turbofan): Relax restriction for stack parameters.
- if (descriptor->UsesOnlyRegisters() &&
- descriptor->HasSameReturnLocationsAs(
- linkage()->GetIncomingDescriptor())) {
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, true);
- DCHECK_EQ(0u, buffer.pushed_nodes.size());
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -1304,9 +1337,27 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
case IrOpcode::kUint32LessThanOrEqual:
cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(this, value, kX64Cmp32, &cont);
- case IrOpcode::kWord64Equal:
+ case IrOpcode::kWord64Equal: {
cont.OverwriteAndNegateIfEqual(kEqual);
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ // Try to combine the branch with a comparison.
+ Node* const user = m.node();
+ Node* const value = m.left().node();
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt64Sub:
+ return VisitWord64Compare(this, value, &cont);
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kX64Test, &cont);
+ default:
+ break;
+ }
+ }
+ return VisitCompareZero(this, value, kX64Cmp, &cont);
+ }
return VisitWord64Compare(this, value, &cont);
+ }
case IrOpcode::kInt64LessThan:
cont.OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord64Compare(this, value, &cont);
@@ -1316,6 +1367,9 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
case IrOpcode::kUint64LessThan:
cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(this, value, &cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kFloat32Equal:
cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat32Compare(this, value, &cont);
@@ -1469,25 +1523,12 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
void InstructionSelector::VisitWord64Equal(Node* const node) {
- Node* user = node;
FlagsContinuation cont(kEqual, node);
- Int64BinopMatcher m(user);
+ Int64BinopMatcher m(node);
if (m.right().Is(0)) {
- Node* value = m.left().node();
-
- // Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value) && value->opcode() == IrOpcode::kWord64Equal) {
- Int64BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- }
-
- // Try to combine the branch with a comparison.
+ // Try to combine the equality check with a comparison.
+ Node* const user = m.node();
+ Node* const value = m.left().node();
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
@@ -1498,7 +1539,6 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
break;
}
}
- return VisitCompareZero(this, value, kX64Cmp, &cont);
}
VisitWord64Compare(this, node, &cont);
}
@@ -1542,6 +1582,12 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
}
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node);
VisitFloat32Compare(this, node, &cont);
diff --git a/deps/v8/src/compiler/x64/linkage-x64.cc b/deps/v8/src/compiler/x64/linkage-x64.cc
index 1b840a995d..b272eb6f76 100644
--- a/deps/v8/src/compiler/x64/linkage-x64.cc
+++ b/deps/v8/src/compiler/x64/linkage-x64.cc
@@ -33,6 +33,16 @@ struct X64LinkageHelperTraits {
return rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit();
}
}
+ static RegList CCalleeSaveFPRegisters() {
+ if (kWin64) {
+ return (1 << xmm6.code()) | (1 << xmm7.code()) | (1 << xmm8.code()) |
+ (1 << xmm9.code()) | (1 << xmm10.code()) | (1 << xmm11.code()) |
+ (1 << xmm12.code()) | (1 << xmm13.code()) | (1 << xmm14.code()) |
+ (1 << xmm15.code());
+ } else {
+ return 0;
+ }
+ }
static Register CRegisterParameter(int i) {
if (kWin64) {
static Register register_parameters[] = {rcx, rdx, r8, r9};
@@ -43,6 +53,7 @@ struct X64LinkageHelperTraits {
}
}
static int CRegisterParametersLength() { return kWin64 ? 4 : 6; }
+ static int CStackBackingStoreLength() { return kWin64 ? 4 : 0; }
};
typedef LinkageHelper<X64LinkageHelperTraits> LH;
diff --git a/deps/v8/src/compiler/x87/OWNERS b/deps/v8/src/compiler/x87/OWNERS
new file mode 100644
index 0000000000..61245ae8e2
--- /dev/null
+++ b/deps/v8/src/compiler/x87/OWNERS
@@ -0,0 +1,2 @@
+weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
new file mode 100644
index 0000000000..1335d3f568
--- /dev/null
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -0,0 +1,1852 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/scopes.h"
+#include "src/x87/assembler-x87.h"
+#include "src/x87/macro-assembler-x87.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds X87 specific methods for decoding operands.
+class X87OperandConverter : public InstructionOperandConverter {
+ public:
+ X87OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ Operand InputOperand(size_t index, int extra = 0) {
+ return ToOperand(instr_->InputAt(index), extra);
+ }
+
+ Immediate InputImmediate(size_t index) {
+ return ToImmediate(instr_->InputAt(index));
+ }
+
+ Operand OutputOperand() { return ToOperand(instr_->Output()); }
+
+ Operand ToOperand(InstructionOperand* op, int extra = 0) {
+ if (op->IsRegister()) {
+ DCHECK(extra == 0);
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ DCHECK(extra == 0);
+ UNIMPLEMENTED();
+ }
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index(), frame(), extra);
+ return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ }
+
+ Operand HighOperand(InstructionOperand* op) {
+ DCHECK(op->IsDoubleStackSlot());
+ return ToOperand(op, kPointerSize);
+ }
+
+ Immediate ToImmediate(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Immediate(constant.ToInt32());
+ case Constant::kFloat32:
+ return Immediate(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ case Constant::kFloat64:
+ return Immediate(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kExternalReference:
+ return Immediate(constant.ToExternalReference());
+ case Constant::kHeapObject:
+ return Immediate(constant.ToHeapObject());
+ case Constant::kInt64:
+ break;
+ case Constant::kRpoNumber:
+ return Immediate::CodeRelativeOffset(ToLabel(operand));
+ }
+ UNREACHABLE();
+ return Immediate(-1);
+ }
+
+ static size_t NextOffset(size_t* offset) {
+ size_t i = *offset;
+ (*offset)++;
+ return i;
+ }
+
+ static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
+ STATIC_ASSERT(0 == static_cast<int>(times_1));
+ STATIC_ASSERT(1 == static_cast<int>(times_2));
+ STATIC_ASSERT(2 == static_cast<int>(times_4));
+ STATIC_ASSERT(3 == static_cast<int>(times_8));
+ int scale = static_cast<int>(mode - one);
+ DCHECK(scale >= 0 && scale < 4);
+ return static_cast<ScaleFactor>(scale);
+ }
+
+ Operand MemoryOperand(size_t* offset) {
+ AddressingMode mode = AddressingModeField::decode(instr_->opcode());
+ switch (mode) {
+ case kMode_MR: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = 0;
+ return Operand(base, disp);
+ }
+ case kMode_MRI: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, disp);
+ }
+ case kMode_MR1:
+ case kMode_MR2:
+ case kMode_MR4:
+ case kMode_MR8: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1, mode);
+ int32_t disp = 0;
+ return Operand(base, index, scale, disp);
+ }
+ case kMode_MR1I:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, index, scale, disp);
+ }
+ case kMode_M1:
+ case kMode_M2:
+ case kMode_M4:
+ case kMode_M8: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1, mode);
+ int32_t disp = 0;
+ return Operand(index, scale, disp);
+ }
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(index, scale, disp);
+ }
+ case kMode_MI: {
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(Immediate(disp));
+ }
+ case kMode_None:
+ UNREACHABLE();
+ return Operand(no_reg, 0);
+ }
+ UNREACHABLE();
+ return Operand(no_reg, 0);
+ }
+
+ Operand MemoryOperand(size_t first_input = 0) {
+ return MemoryOperand(&first_input);
+ }
+};
+
+
+namespace {
+
+bool HasImmediateInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsImmediate();
+}
+
+
+class OutOfLineLoadInteger final : public OutOfLineCode {
+ public:
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final { __ xor_(result_, result_); }
+
+ private:
+ Register const result_;
+};
+
+
+class OutOfLineLoadFloat final : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat(CodeGenerator* gen, X87Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ DCHECK(result_.code() == 0);
+ USE(result_);
+ __ fstp(0);
+ __ push(Immediate(0xffffffff));
+ __ push(Immediate(0x7fffffff));
+ __ fld_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ }
+
+ private:
+ X87Register const result_;
+};
+
+
+class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
+ public:
+ OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
+ X87Register input)
+ : OutOfLineCode(gen), result_(result), input_(input) {}
+
+ void Generate() final {
+ UNIMPLEMENTED();
+ USE(result_);
+ USE(input_);
+ }
+
+ private:
+ Register const result_;
+ X87Register const input_;
+};
+
+} // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
+ do { \
+ auto result = i.OutputDoubleRegister(); \
+ auto offset = i.InputRegister(0); \
+ DCHECK(result.code() == 0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ fstp(0); \
+ __ asm_instr(i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ DCHECK(i.InputDoubleRegister(2).code() == 0); \
+ __ j(above_equal, &done, Label::kNear); \
+ __ asm_instr(i.MemoryOperand(3)); \
+ __ bind(&done); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ __ j(above_equal, &done, Label::kNear); \
+ if (instr->InputAt(2)->IsRegister()) { \
+ __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
+ } else { \
+ __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
+ } \
+ __ bind(&done); \
+ } while (false)
+
+
+void CodeGenerator::AssembleDeconstructActivationRecord() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ X87OperandConverter i(this, instr);
+
+ switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ }
+ RecordCallPosition(instr);
+ bool double_result =
+ instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ if (double_result) {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ }
+ __ fninit();
+ if (double_result) {
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ } else {
+ __ fld1();
+ }
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ AssembleDeconstructActivationRecord();
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ jmp(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ __ jmp(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ }
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Assert(equal, kWrongFunctionContext);
+ }
+ __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ RecordCallPosition(instr);
+ bool double_result =
+ instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ if (double_result) {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ }
+ __ fninit();
+ if (double_result) {
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ } else {
+ __ fld1();
+ }
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Assert(equal, kWrongFunctionContext);
+ }
+ AssembleDeconstructActivationRecord();
+ __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ break;
+ }
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), ebp);
+ break;
+ case kArchStackPointer:
+ __ mov(i.OutputRegister(), esp);
+ break;
+ case kArchTruncateDoubleToI: {
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_d(i.InputOperand(0));
+ }
+ __ TruncateX87TOSToI(i.OutputRegister());
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
+ break;
+ }
+ case kX87Add:
+ if (HasImmediateInput(instr, 1)) {
+ __ add(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ add(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87And:
+ if (HasImmediateInput(instr, 1)) {
+ __ and_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ and_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Cmp:
+ if (HasImmediateInput(instr, 1)) {
+ __ cmp(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ cmp(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Test:
+ if (HasImmediateInput(instr, 1)) {
+ __ test(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ test(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Imul:
+ if (HasImmediateInput(instr, 1)) {
+ __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
+ } else {
+ __ imul(i.OutputRegister(), i.InputOperand(1));
+ }
+ break;
+ case kX87ImulHigh:
+ __ imul(i.InputRegister(1));
+ break;
+ case kX87UmulHigh:
+ __ mul(i.InputRegister(1));
+ break;
+ case kX87Idiv:
+ __ cdq();
+ __ idiv(i.InputOperand(1));
+ break;
+ case kX87Udiv:
+ __ Move(edx, Immediate(0));
+ __ div(i.InputOperand(1));
+ break;
+ case kX87Not:
+ __ not_(i.OutputOperand());
+ break;
+ case kX87Neg:
+ __ neg(i.OutputOperand());
+ break;
+ case kX87Or:
+ if (HasImmediateInput(instr, 1)) {
+ __ or_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ or_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Xor:
+ if (HasImmediateInput(instr, 1)) {
+ __ xor_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ xor_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Sub:
+ if (HasImmediateInput(instr, 1)) {
+ __ sub(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ sub(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Shl:
+ if (HasImmediateInput(instr, 1)) {
+ __ shl(i.OutputOperand(), i.InputInt5(1));
+ } else {
+ __ shl_cl(i.OutputOperand());
+ }
+ break;
+ case kX87Shr:
+ if (HasImmediateInput(instr, 1)) {
+ __ shr(i.OutputOperand(), i.InputInt5(1));
+ } else {
+ __ shr_cl(i.OutputOperand());
+ }
+ break;
+ case kX87Sar:
+ if (HasImmediateInput(instr, 1)) {
+ __ sar(i.OutputOperand(), i.InputInt5(1));
+ } else {
+ __ sar_cl(i.OutputOperand());
+ }
+ break;
+ case kX87Ror:
+ if (HasImmediateInput(instr, 1)) {
+ __ ror(i.OutputOperand(), i.InputInt5(1));
+ } else {
+ __ ror_cl(i.OutputOperand());
+ }
+ break;
+ case kX87Lzcnt:
+ __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kX87LoadFloat64Constant: {
+ InstructionOperand* source = instr->InputAt(0);
+ InstructionOperand* destination = instr->Output();
+ DCHECK(source->IsConstant());
+ X87OperandConverter g(this, NULL);
+ Constant src_constant = g.ToConstant(source);
+
+ DCHECK_EQ(Constant::kFloat64, src_constant.type());
+ uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+ uint32_t lower = static_cast<uint32_t>(src);
+ uint32_t upper = static_cast<uint32_t>(src >> 32);
+ if (destination->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ mov(MemOperand(esp, 0), Immediate(lower));
+ __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case kX87Float32Cmp: {
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ FCmp();
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
+ case kX87Float32Add: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ faddp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float32Sub: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fsubp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float32Mul: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fmulp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float32Div: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fdivp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float32Max: {
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = below;
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+
+ __ fadd(1);
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp(); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear); // left == NaN.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
+ case kX87Float32Min: {
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = above;
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ // Push st0 and st1 to stack, then pop them to temp registers and OR them,
+ // load it to left.
+ __ push(eax);
+ __ fld(1);
+ __ fld(1);
+ __ sub(esp, Immediate(2 * kPointerSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, kPointerSize));
+ __ pop(eax);
+ __ xor_(MemOperand(esp, 0), eax);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ pop(eax); // restore esp
+ __ pop(eax); // restore esp
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp(); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear); // left == NaN.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
+ case kX87Float32Sqrt: {
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ fsqrt();
+ __ lea(esp, Operand(esp, kFloatSize));
+ break;
+ }
+ case kX87Float32Abs: {
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ fabs();
+ __ lea(esp, Operand(esp, kFloatSize));
+ break;
+ }
+ case kX87Float64Add: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ faddp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float64Sub: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fsub_d(MemOperand(esp, 0));
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float64Mul: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fmul_d(MemOperand(esp, 0));
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float64Div: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fdiv_d(MemOperand(esp, 0));
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float64Mod: {
+ FrameScope frame_scope(&masm_, StackFrame::MANUAL);
+ __ mov(eax, esp);
+ __ PrepareCallCFunction(4, eax);
+ __ fstp(0);
+ __ fld_d(MemOperand(eax, 0));
+ __ fstp_d(Operand(esp, 1 * kDoubleSize));
+ __ fld_d(MemOperand(eax, kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 4);
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kX87Float64Max: {
+ Label check_zero, return_left, return_right;
+ Condition condition = below;
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fld_d(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &return_right,
+ Label::kNear); // At least one NaN, Return right.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kX87Float64Min: {
+ Label check_zero, return_left, return_right;
+ Condition condition = above;
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fld_d(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &return_right,
+ Label::kNear); // At least one NaN, return right value.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kX87Float64Abs: {
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fabs();
+ __ lea(esp, Operand(esp, kDoubleSize));
+ break;
+ }
+ case kX87Int32ToFloat64: {
+ InstructionOperand* input = instr->InputAt(0);
+ DCHECK(input->IsRegister() || input->IsStackSlot());
+ __ fstp(0);
+ if (input->IsRegister()) {
+ Register input_reg = i.InputRegister(0);
+ __ push(input_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(input_reg);
+ } else {
+ __ fild_s(i.InputOperand(0));
+ }
+ break;
+ }
+ case kX87Float32ToFloat64: {
+ InstructionOperand* input = instr->InputAt(0);
+ if (input->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ DCHECK(input->IsDoubleStackSlot());
+ __ fstp(0);
+ __ fld_s(i.InputOperand(0));
+ }
+ break;
+ }
+ case kX87Uint32ToFloat64: {
+ __ fstp(0);
+ __ LoadUint32NoSSE2(i.InputRegister(0));
+ break;
+ }
+ case kX87Float64ToInt32: {
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_d(i.InputOperand(0));
+ }
+ __ TruncateX87TOSToI(i.OutputRegister(0));
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
+ break;
+ }
+ case kX87Float64ToFloat32: {
+ InstructionOperand* input = instr->InputAt(0);
+ if (input->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ DCHECK(input->IsDoubleStackSlot());
+ __ fstp(0);
+ __ fld_d(i.InputOperand(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ }
+ break;
+ }
+ case kX87Float64ToUint32: {
+ __ push_imm32(-2147483648);
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_d(i.InputOperand(0));
+ }
+ __ fild_s(Operand(esp, 0));
+ __ fadd(1);
+ __ fstp(0);
+ __ TruncateX87TOSToI(i.OutputRegister(0));
+ __ add(esp, Immediate(kInt32Size));
+ __ add(i.OutputRegister(), Immediate(0x80000000));
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
+ break;
+ }
+ case kX87Float64ExtractHighWord32: {
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
+ }
+ break;
+ }
+ case kX87Float64ExtractLowWord32: {
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ mov(i.OutputRegister(), MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ __ mov(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ }
+ case kX87Float64InsertHighWord32: {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+ __ mov(MemOperand(esp, kDoubleSize / 2), i.InputRegister(1));
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ break;
+ }
+ case kX87Float64InsertLowWord32: {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+ __ mov(MemOperand(esp, 0), i.InputRegister(1));
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ break;
+ }
+ case kX87Float64Sqrt: {
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fsqrt();
+ __ lea(esp, Operand(esp, kDoubleSize));
+ break;
+ }
+ case kX87Float64Round: {
+ RoundingMode mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ if (mode == MiscField::encode(kRoundDown)) {
+ __ X87SetRC(0x0400);
+ } else {
+ __ X87SetRC(0x0c00);
+ }
+
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ __ fstp(0);
+ __ fld_d(i.InputOperand(0));
+ }
+ __ frndint();
+ __ X87SetRC(0x0000);
+ break;
+ }
+ case kX87Float64Cmp: {
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fld_d(MemOperand(esp, 0));
+ __ FCmp();
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kX87Movsxbl:
+ __ movsx_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX87Movzxbl:
+ __ movzx_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX87Movb: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ mov_b(operand, i.InputInt8(index));
+ } else {
+ __ mov_b(operand, i.InputRegister(index));
+ }
+ break;
+ }
+ case kX87Movsxwl:
+ __ movsx_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX87Movzxwl:
+ __ movzx_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX87Movw: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ mov_w(operand, i.InputInt16(index));
+ } else {
+ __ mov_w(operand, i.InputRegister(index));
+ }
+ break;
+ }
+ case kX87Movl:
+ if (instr->HasOutput()) {
+ __ mov(i.OutputRegister(), i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ mov(operand, i.InputImmediate(index));
+ } else {
+ __ mov(operand, i.InputRegister(index));
+ }
+ }
+ break;
+ case kX87Movsd: {
+ if (instr->HasOutput()) {
+ X87Register output = i.OutputDoubleRegister();
+ USE(output);
+ DCHECK(output.code() == 0);
+ __ fstp(0);
+ __ fld_d(i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ fst_d(operand);
+ }
+ break;
+ }
+ case kX87Movss: {
+ if (instr->HasOutput()) {
+ X87Register output = i.OutputDoubleRegister();
+ USE(output);
+ DCHECK(output.code() == 0);
+ __ fstp(0);
+ __ fld_s(i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ fst_s(operand);
+ }
+ break;
+ }
+ case kX87Lea: {
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
+ // and addressing mode just happens to work out. The "addl"/"subl" forms
+ // in these cases are faster based on measurements.
+ if (mode == kMode_MI) {
+ __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
+ } else if (i.InputRegister(0).is(i.OutputRegister())) {
+ if (mode == kMode_MRI) {
+ int32_t constant_summand = i.InputInt32(1);
+ if (constant_summand > 0) {
+ __ add(i.OutputRegister(), Immediate(constant_summand));
+ } else if (constant_summand < 0) {
+ __ sub(i.OutputRegister(), Immediate(-constant_summand));
+ }
+ } else if (mode == kMode_MR1) {
+ if (i.InputRegister(1).is(i.OutputRegister())) {
+ __ shl(i.OutputRegister(), 1);
+ } else {
+ __ lea(i.OutputRegister(), i.MemoryOperand());
+ }
+ } else if (mode == kMode_M2) {
+ __ shl(i.OutputRegister(), 1);
+ } else if (mode == kMode_M4) {
+ __ shl(i.OutputRegister(), 2);
+ } else if (mode == kMode_M8) {
+ __ shl(i.OutputRegister(), 3);
+ } else {
+ __ lea(i.OutputRegister(), i.MemoryOperand());
+ }
+ } else {
+ __ lea(i.OutputRegister(), i.MemoryOperand());
+ }
+ break;
+ }
+ case kX87Push:
+ if (HasImmediateInput(instr, 0)) {
+ __ push(i.InputImmediate(0));
+ } else {
+ __ push(i.InputOperand(0));
+ }
+ break;
+ case kX87Poke: {
+ int const slot = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
+ } else {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
+ }
+ break;
+ }
+ case kX87PushFloat32:
+ __ lea(esp, Operand(esp, -kFloatSize));
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ fld_s(i.InputOperand(0));
+ __ fstp_s(MemOperand(esp, 0));
+ } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ fst_s(MemOperand(esp, 0));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case kX87PushFloat64:
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ fld_d(i.InputOperand(0));
+ __ fstp_d(MemOperand(esp, 0));
+ } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ fst_d(MemOperand(esp, 0));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case kX87StoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register value = i.InputRegister(2);
+ SaveFPRegsMode mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ if (HasImmediateInput(instr, 1)) {
+ int index = i.InputInt32(1);
+ Register scratch = i.TempRegister(1);
+ __ mov(Operand(object, index), value);
+ __ RecordWriteContextSlot(object, index, value, scratch, mode);
+ } else {
+ Register index = i.InputRegister(1);
+ __ mov(Operand(object, index, times_1, 0), value);
+ __ lea(index, Operand(object, index, times_1, 0));
+ __ RecordWrite(object, index, value, mode);
+ }
+ break;
+ }
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(mov);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(fst_s);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(fst_d);
+ break;
+ case kX87StackCheck: {
+ ExternalReference const stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ break;
+ }
+ }
+} // NOLINT(readability/fn_size)
+
+
+// Assembles a branch after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ X87OperandConverter i(this, instr);
+ Label::Distance flabel_distance =
+ branch->fallthru ? Label::kNear : Label::kFar;
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ switch (branch->condition) {
+ case kUnorderedEqual:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kEqual:
+ __ j(equal, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ j(not_equal, tlabel);
+ break;
+ case kSignedLessThan:
+ __ j(less, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ j(greater_equal, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ j(less_equal, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ j(greater, tlabel);
+ break;
+ case kUnsignedLessThan:
+ __ j(below, tlabel);
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ __ j(above_equal, tlabel);
+ break;
+ case kUnsignedLessThanOrEqual:
+ __ j(below_equal, tlabel);
+ break;
+ case kUnsignedGreaterThan:
+ __ j(above, tlabel);
+ break;
+ case kOverflow:
+ __ j(overflow, tlabel);
+ break;
+ case kNotOverflow:
+ __ j(no_overflow, tlabel);
+ break;
+ }
+ // Add a jump if not falling through to the next block.
+ if (!branch->fallthru) __ jmp(flabel);
+}
+
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ X87OperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ Label check;
+ DCHECK_NE(0u, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = no_condition;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ Move(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kEqual:
+ cc = equal;
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kNotEqual:
+ cc = not_equal;
+ break;
+ case kSignedLessThan:
+ cc = less;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = greater_equal;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = less_equal;
+ break;
+ case kSignedGreaterThan:
+ cc = greater;
+ break;
+ case kUnsignedLessThan:
+ cc = below;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = above_equal;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = below_equal;
+ break;
+ case kUnsignedGreaterThan:
+ cc = above;
+ break;
+ case kOverflow:
+ cc = overflow;
+ break;
+ case kNotOverflow:
+ cc = no_overflow;
+ break;
+ }
+ __ bind(&check);
+ if (reg.is_byte_register()) {
+ // setcc for byte registers (al, bl, cl, dl).
+ __ setcc(cc, reg);
+ __ movzx_b(reg, reg);
+ } else {
+ // Emit a branch to set a register to either 1 or 0.
+ Label set;
+ __ j(cc, &set, Label::kNear);
+ __ Move(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&set);
+ __ mov(reg, Immediate(1));
+ }
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ X87OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmp(input, Immediate(i.InputInt32(index + 0)));
+ __ j(equal, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ X87OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (size_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ cmp(input, Immediate(case_count));
+ __ j(above_equal, GetLabel(i.InputRpo(1)));
+ __ jmp(Operand::JumpTable(input, times_4, table));
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, bailout_type);
+ __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+// The calling convention for JSFunctions on X87 passes arguments on the
+// stack and the JSFunction and context in EDI and ESI, respectively, thus
+// the steps of the call look as follows:
+
+// --{ before the call instruction }--------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// --{ push arguments and setup ESI, EDI }--------------------------------------
+// | args + receiver | caller frame |
+// ^ esp ^ ebp
+// [edi = JSFunction, esi = context]
+
+// --{ call [edi + kCodeEntryOffset] }------------------------------------------
+// | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+// | FP | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+// | FP | RET | args + receiver | caller frame |
+// ^ ebp,esp
+
+// --{ push esi }---------------------------------------------------------------
+// | CTX | FP | RET | args + receiver | caller frame |
+// ^esp ^ ebp
+
+// --{ push edi }---------------------------------------------------------------
+// | FNC | CTX | FP | RET | args + receiver | caller frame |
+// ^esp ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
+// ^esp ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ mov esp, ebp }-----------------------------------------------------------
+// | FP | RET | args + receiver | caller frame |
+// ^ esp,ebp
+
+// --{ pop ebp }-----------------------------------------------------------
+// | | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// --{ ret #A+1 }-----------------------------------------------------------
+// | | caller frame |
+// ^ esp ^ ebp
+
+
+// Runtime function calls are accomplished by doing a stub call to the
+// CEntryStub (a real code object). On X87 passes arguments on the
+// stack, the number of arguments in EAX, the address of the runtime function
+// in EBX, and the context in ESI.
+
+// --{ before the call instruction }--------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
+// | args + receiver | caller frame |
+// ^ esp ^ ebp
+// [eax = #args, ebx = runtime function, esi = context]
+
+// --{ call #CEntryStub }-------------------------------------------------------
+// | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// =={ body of runtime function }===============================================
+
+// --{ runtime returns }--------------------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// Other custom linkages (e.g. for calling directly into and out of C++) may
+// need to save callee-saved registers on the stack, which is done in the
+// function prologue of generated code.
+
+// --{ before the call instruction }--------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// --{ set up arguments in registers on stack }---------------------------------
+// | args | caller frame |
+// ^ esp ^ ebp
+// [r0 = arg0, r1 = arg1, ...]
+
+// --{ call code }--------------------------------------------------------------
+// | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+// | FP | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+// | FP | RET | args | caller frame |
+// ^ ebp,esp
+
+// --{ save registers }---------------------------------------------------------
+// | regs | FP | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+// | callee frame | regs | FP | RET | args | caller frame |
+// ^esp ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ restore registers }------------------------------------------------------
+// | regs | FP | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// --{ mov esp, ebp }-----------------------------------------------------------
+// | FP | RET | args | caller frame |
+// ^ esp,ebp
+
+// --{ pop ebp }----------------------------------------------------------------
+// | RET | args | caller frame |
+// ^ esp ^ ebp
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ // Assemble a prologue similar the to cdecl calling convention.
+ __ push(ebp);
+ __ mov(ebp, esp);
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ int register_save_area_size = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ push(Register::from_code(i));
+ register_save_area_size += kPointerSize;
+ }
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ // TODO(turbofan): this prologue is redundant with OSR, but needed for
+ // code aging.
+ CompilationInfo* info = this->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else if (needs_frame_) {
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
+ stack_slots -= frame()->GetOsrStackSlotCount();
+ }
+
+ if (stack_slots > 0) {
+ // Allocate the stack slots used by this frame.
+ __ sub(esp, Immediate(stack_slots * kPointerSize));
+ }
+
+ // Initailize FPU state.
+ __ fninit();
+ __ fld1();
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ if (stack_slots > 0) {
+ __ add(esp, Immediate(stack_slots * kPointerSize));
+ }
+ // Restore registers.
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ pop(Register::from_code(i));
+ }
+ }
+ __ pop(ebp); // Pop caller's frame pointer.
+ __ ret(0);
+ } else {
+ // No saved registers.
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ __ ret(0);
+ }
+ } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : (info()->IsStub()
+ ? info()->code_stub()->GetStackParameterCount()
+ : 0);
+ if (pop_count == 0) {
+ __ ret(0);
+ } else {
+ __ Ret(pop_count * kPointerSize, ebx);
+ }
+ }
+ } else {
+ __ ret(0);
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ X87OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ Operand dst = g.ToOperand(destination);
+ __ mov(dst, src);
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Operand src = g.ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, src);
+ } else {
+ Operand dst = g.ToOperand(destination);
+ __ push(src);
+ __ pop(dst);
+ }
+ } else if (source->IsConstant()) {
+ Constant src_constant = g.ToConstant(source);
+ if (src_constant.type() == Constant::kHeapObject) {
+ Handle<HeapObject> src = src_constant.ToHeapObject();
+ int offset;
+ if (IsMaterializableFromFrame(src, &offset)) {
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, Operand(ebp, offset));
+ } else {
+ DCHECK(destination->IsStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ push(Operand(ebp, offset));
+ __ pop(dst);
+ }
+ } else if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ LoadHeapObject(dst, src);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ Operand dst = g.ToOperand(destination);
+ AllowDeferredHandleDereference embedding_raw_address;
+ if (isolate()->heap()->InNewSpace(*src)) {
+ __ PushHeapObject(src);
+ __ pop(dst);
+ } else {
+ __ mov(dst, src);
+ }
+ }
+ } else if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(dst, g.ToImmediate(source));
+ } else if (destination->IsStackSlot()) {
+ Operand dst = g.ToOperand(destination);
+ __ Move(dst, g.ToImmediate(source));
+ } else if (src_constant.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
+ if (destination->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kInt32Size));
+ __ mov(MemOperand(esp, 0), Immediate(src));
+ // always only push one value into the x87 stack.
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ add(esp, Immediate(kInt32Size));
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ Move(dst, Immediate(src));
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src_constant.type());
+ uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+ uint32_t lower = static_cast<uint32_t>(src);
+ uint32_t upper = static_cast<uint32_t>(src >> 32);
+ if (destination->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ mov(MemOperand(esp, 0), Immediate(lower));
+ __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
+ // always only push one value into the x87 stack.
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst0 = g.ToOperand(destination);
+ Operand dst1 = g.HighOperand(destination);
+ __ Move(dst0, Immediate(lower));
+ __ Move(dst1, Immediate(upper));
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ auto allocated = AllocatedOperand::cast(*source);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fst_s(dst);
+ break;
+ case kRepFloat64:
+ __ fst_d(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ Operand src = g.ToOperand(source);
+ auto allocated = AllocatedOperand::cast(*source);
+ if (destination->IsDoubleRegister()) {
+ // always only push one value into the x87 stack.
+ __ fstp(0);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fld_s(src);
+ break;
+ case kRepFloat64:
+ __ fld_d(src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ Operand dst = g.ToOperand(destination);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fld_s(src);
+ __ fstp_s(dst);
+ break;
+ case kRepFloat64:
+ __ fld_d(src);
+ __ fstp_d(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ X87OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Register-register.
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ xchg(dst, src);
+ } else if (source->IsRegister() && destination->IsStackSlot()) {
+ // Register-memory.
+ __ xchg(g.ToRegister(source), g.ToOperand(destination));
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory.
+ Operand src = g.ToOperand(source);
+ Operand dst = g.ToOperand(destination);
+ __ push(dst);
+ __ push(src);
+ __ pop(dst);
+ __ pop(src);
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ UNREACHABLE();
+ } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+ auto allocated = AllocatedOperand::cast(*source);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fld_s(g.ToOperand(destination));
+ __ fxch();
+ __ fstp_s(g.ToOperand(destination));
+ break;
+ case kRepFloat64:
+ __ fld_d(g.ToOperand(destination));
+ __ fxch();
+ __ fstp_d(g.ToOperand(destination));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ auto allocated = AllocatedOperand::cast(*source);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fld_s(g.ToOperand(source));
+ __ fld_s(g.ToOperand(destination));
+ __ fstp_s(g.ToOperand(source));
+ __ fstp_s(g.ToOperand(destination));
+ break;
+ case kRepFloat64:
+ __ fld_d(g.ToOperand(source));
+ __ fld_d(g.ToOperand(destination));
+ __ fstp_d(g.ToOperand(source));
+ __ fstp_d(g.ToOperand(destination));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ dd(targets[index]);
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
+ }
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/x87/instruction-codes-x87.h b/deps/v8/src/compiler/x87/instruction-codes-x87.h
new file mode 100644
index 0000000000..d1b759be34
--- /dev/null
+++ b/deps/v8/src/compiler/x87/instruction-codes-x87.h
@@ -0,0 +1,122 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
+#define V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-codes.h"
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// X87-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(X87Add) \
+ V(X87And) \
+ V(X87Cmp) \
+ V(X87Test) \
+ V(X87Or) \
+ V(X87Xor) \
+ V(X87Sub) \
+ V(X87Imul) \
+ V(X87ImulHigh) \
+ V(X87UmulHigh) \
+ V(X87Idiv) \
+ V(X87Udiv) \
+ V(X87Not) \
+ V(X87Neg) \
+ V(X87Shl) \
+ V(X87Shr) \
+ V(X87Sar) \
+ V(X87Ror) \
+ V(X87Lzcnt) \
+ V(X87Float32Cmp) \
+ V(X87Float32Add) \
+ V(X87Float32Sub) \
+ V(X87Float32Mul) \
+ V(X87Float32Div) \
+ V(X87Float32Max) \
+ V(X87Float32Min) \
+ V(X87Float32Abs) \
+ V(X87Float32Sqrt) \
+ V(X87LoadFloat64Constant) \
+ V(X87Float64Add) \
+ V(X87Float64Sub) \
+ V(X87Float64Mul) \
+ V(X87Float64Div) \
+ V(X87Float64Mod) \
+ V(X87Float64Max) \
+ V(X87Float64Min) \
+ V(X87Float64Abs) \
+ V(X87Int32ToFloat64) \
+ V(X87Float32ToFloat64) \
+ V(X87Uint32ToFloat64) \
+ V(X87Float64ToInt32) \
+ V(X87Float64ToFloat32) \
+ V(X87Float64ToUint32) \
+ V(X87Float64ExtractHighWord32) \
+ V(X87Float64ExtractLowWord32) \
+ V(X87Float64InsertHighWord32) \
+ V(X87Float64InsertLowWord32) \
+ V(X87Float64Sqrt) \
+ V(X87Float64Round) \
+ V(X87Float64Cmp) \
+ V(X87Movsxbl) \
+ V(X87Movzxbl) \
+ V(X87Movb) \
+ V(X87Movsxwl) \
+ V(X87Movzxwl) \
+ V(X87Movw) \
+ V(X87Movl) \
+ V(X87Movss) \
+ V(X87Movsd) \
+ V(X87Lea) \
+ V(X87Push) \
+ V(X87PushFloat64) \
+ V(X87PushFloat32) \
+ V(X87Poke) \
+ V(X87StoreWriteBarrier) \
+ V(X87StackCheck)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// M = memory operand
+// R = base register
+// N = index register * N for N in {1, 2, 4, 8}
+// I = immediate displacement (int32_t)
+
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MR) /* [%r1 ] */ \
+ V(MRI) /* [%r1 + K] */ \
+ V(MR1) /* [%r1 + %r2*1 ] */ \
+ V(MR2) /* [%r1 + %r2*2 ] */ \
+ V(MR4) /* [%r1 + %r2*4 ] */ \
+ V(MR8) /* [%r1 + %r2*8 ] */ \
+ V(MR1I) /* [%r1 + %r2*1 + K] */ \
+ V(MR2I) /* [%r1 + %r2*2 + K] */ \
+ V(MR4I) /* [%r1 + %r2*3 + K] */ \
+ V(MR8I) /* [%r1 + %r2*4 + K] */ \
+ V(M1) /* [ %r2*1 ] */ \
+ V(M2) /* [ %r2*2 ] */ \
+ V(M4) /* [ %r2*4 ] */ \
+ V(M8) /* [ %r2*8 ] */ \
+ V(M1I) /* [ %r2*1 + K] */ \
+ V(M2I) /* [ %r2*2 + K] */ \
+ V(M4I) /* [ %r2*4 + K] */ \
+ V(M8I) /* [ %r2*8 + K] */ \
+ V(MI) /* [ K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
new file mode 100644
index 0000000000..d350738e0b
--- /dev/null
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -0,0 +1,1355 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/adapters.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds X87-specific methods for generating operands.
+class X87OperandGenerator final : public OperandGenerator {
+ public:
+ explicit X87OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseByteRegister(Node* node) {
+ // TODO(titzer): encode byte register use constraints.
+ return UseFixed(node, edx);
+ }
+
+ InstructionOperand DefineAsByteRegister(Node* node) {
+ // TODO(titzer): encode byte register def constraints.
+ return DefineAsRegister(node);
+ }
+
+ InstructionOperand CreateImmediate(int imm) {
+ return sequence()->AddImmediate(Constant(imm));
+ }
+
+ bool CanBeImmediate(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kExternalConstant:
+ return true;
+ case IrOpcode::kHeapConstant: {
+ // Constants in new space cannot be used as immediates in V8 because
+ // the GC does not scan code objects when collecting the new generation.
+ Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
+ Isolate* isolate = value.handle()->GetIsolate();
+ return !isolate->heap()->InNewSpace(*value.handle());
+ }
+ default:
+ return false;
+ }
+ }
+
+ AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
+ Node* displacement_node,
+ InstructionOperand inputs[],
+ size_t* input_count) {
+ AddressingMode mode = kMode_MRI;
+ int32_t displacement = (displacement_node == NULL)
+ ? 0
+ : OpParameter<int32_t>(displacement_node);
+ if (base != NULL) {
+ if (base->opcode() == IrOpcode::kInt32Constant) {
+ displacement += OpParameter<int32_t>(base);
+ base = NULL;
+ }
+ }
+ if (base != NULL) {
+ inputs[(*input_count)++] = UseRegister(base);
+ if (index != NULL) {
+ DCHECK(scale >= 0 && scale <= 3);
+ inputs[(*input_count)++] = UseRegister(index);
+ if (displacement != 0) {
+ inputs[(*input_count)++] = TempImmediate(displacement);
+ static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
+ kMode_MR4I, kMode_MR8I};
+ mode = kMRnI_modes[scale];
+ } else {
+ static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
+ kMode_MR4, kMode_MR8};
+ mode = kMRn_modes[scale];
+ }
+ } else {
+ if (displacement == 0) {
+ mode = kMode_MR;
+ } else {
+ inputs[(*input_count)++] = TempImmediate(displacement);
+ mode = kMode_MRI;
+ }
+ }
+ } else {
+ DCHECK(scale >= 0 && scale <= 3);
+ if (index != NULL) {
+ inputs[(*input_count)++] = UseRegister(index);
+ if (displacement != 0) {
+ inputs[(*input_count)++] = TempImmediate(displacement);
+ static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+ kMode_M4I, kMode_M8I};
+ mode = kMnI_modes[scale];
+ } else {
+ static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
+ kMode_M4, kMode_M8};
+ mode = kMn_modes[scale];
+ }
+ } else {
+ inputs[(*input_count)++] = TempImmediate(displacement);
+ return kMode_MI;
+ }
+ }
+ return mode;
+ }
+
+ AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
+ InstructionOperand inputs[],
+ size_t* input_count) {
+ BaseWithIndexAndDisplacement32Matcher m(node, true);
+ DCHECK(m.matches());
+ if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+ return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
+ m.displacement(), inputs, input_count);
+ } else {
+ inputs[(*input_count)++] = UseRegister(node->InputAt(0));
+ inputs[(*input_count)++] = UseRegister(node->InputAt(1));
+ return kMode_MR1;
+ }
+ }
+
+ bool CanBeBetterLeftOperand(Node* node) const {
+ return !selector()->IsLive(node);
+ }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kX87Movss;
+ break;
+ case kRepFloat64:
+ opcode = kX87Movsd;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kX87Movsxbl : kX87Movzxbl;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kX87Movsxwl : kX87Movzxwl;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kX87Movl;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ X87OperandGenerator g(this);
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ X87OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK_EQ(kRepTagged, rep);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ if (g.CanBeImmediate(index)) {
+ InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister()};
+ Emit(kX87StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
+ g.UseImmediate(index), g.UseFixed(value, ecx), arraysize(temps),
+ temps);
+ } else {
+ InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
+ Emit(kX87StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
+ g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
+ temps);
+ }
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kX87Movss;
+ break;
+ case kRepFloat64:
+ opcode = kX87Movsd;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kX87Movb;
+ break;
+ case kRepWord16:
+ opcode = kX87Movw;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord32:
+ opcode = kX87Movl;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ InstructionOperand val;
+ if (g.CanBeImmediate(value)) {
+ val = g.UseImmediate(value);
+ } else if (rep == kRepWord8 || rep == kRepBit) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ inputs[input_count++] = val;
+ Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ X87OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand =
+ g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+ if (g.CanBeImmediate(buffer)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ offset_operand, g.UseImmediate(buffer));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MR1),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ g.UseRegister(buffer), offset_operand);
+ }
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ X87OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand value_operand =
+ g.CanBeImmediate(value)
+ ? g.UseImmediate(value)
+ : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
+ : g.UseRegister(value));
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand =
+ g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+ if (g.CanBeImmediate(buffer)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ offset_operand, length_operand, value_operand, offset_operand,
+ g.UseImmediate(buffer));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
+ offset_operand, length_operand, value_operand, g.UseRegister(buffer),
+ offset_operand);
+ }
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+
+ // TODO(turbofan): match complex addressing modes.
+ if (left == right) {
+ // If both inputs refer to the same operand, enforce allocating a register
+ // for both of them to ensure that we don't end up generating code like
+ // this:
+ //
+ // mov eax, [ebp-0x10]
+ // add eax, [ebp-0x10]
+ // jo label
+ InstructionOperand const input = g.UseRegister(left);
+ inputs[input_count++] = input;
+ inputs[input_count++] = input;
+ } else if (g.CanBeImmediate(right)) {
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.UseImmediate(right);
+ } else {
+ if (node->op()->HasProperty(Operator::kCommutative) &&
+ g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.Use(right);
+ }
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kX87And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kX87Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ X87OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kX87Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop(this, node, kX87Xor);
+ }
+}
+
+
+// Shared routine for multiple shift operations.
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X87OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ if (g.CanBeImmediate(right)) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseImmediate(right));
+ } else {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseFixed(right, ecx));
+ }
+}
+
+
+namespace {
+
+void VisitMulHigh(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X87OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsFixed(node, edx),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+
+void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+ X87OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempRegister(edx)};
+ selector->Emit(opcode, g.DefineAsFixed(node, eax),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+ X87OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsFixed(node, edx),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUnique(node->InputAt(1)));
+}
+
+void EmitLea(InstructionSelector* selector, Node* result, Node* index,
+ int scale, Node* base, Node* displacement) {
+ X87OperandGenerator g(selector);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode mode = g.GenerateMemoryOperandInputs(
+ index, scale, base, displacement, inputs, &input_count);
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(result);
+
+ InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
+
+ selector->Emit(opcode, 1, outputs, input_count, inputs);
+}
+
+} // namespace
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32ScaleMatcher m(node, true);
+ if (m.matches()) {
+ Node* index = node->InputAt(0);
+ Node* base = m.power_of_two_plus_one() ? index : NULL;
+ EmitLea(this, node, index, m.scale(), base, NULL);
+ return;
+ }
+ VisitShift(this, node, kX87Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitShift(this, node, kX87Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitShift(this, node, kX87Sar);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitShift(this, node, kX87Ror);
+}
+
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ X87OperandGenerator g(this);
+
+ // Try to match the Add to a lea pattern
+ BaseWithIndexAndDisplacement32Matcher m(node);
+ if (m.matches() &&
+ (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode mode = g.GenerateMemoryOperandInputs(
+ m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+
+ InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
+ Emit(opcode, 1, outputs, input_count, inputs);
+ return;
+ }
+
+ // No lea pattern match, use add
+ VisitBinop(this, node, kX87Add);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ X87OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kX87Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+ } else {
+ VisitBinop(this, node, kX87Sub);
+ }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ Int32ScaleMatcher m(node, true);
+ if (m.matches()) {
+ Node* index = node->InputAt(0);
+ Node* base = m.power_of_two_plus_one() ? index : NULL;
+ EmitLea(this, node, index, m.scale(), base, NULL);
+ return;
+ }
+ X87OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (g.CanBeImmediate(right)) {
+ Emit(kX87Imul, g.DefineAsRegister(node), g.Use(left),
+ g.UseImmediate(right));
+ } else {
+ if (g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ Emit(kX87Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.Use(right));
+ }
+}
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ VisitMulHigh(this, node, kX87ImulHigh);
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitMulHigh(this, node, kX87UmulHigh);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitDiv(this, node, kX87Idiv);
+}
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ VisitDiv(this, node, kX87Udiv);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitMod(this, node, kX87Idiv);
+}
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ VisitMod(this, node, kX87Udiv);
+}
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32ToFloat64, g.DefineAsFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Int32ToFloat64, g.DefineAsFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Uint32ToFloat64, g.DefineAsFixed(node, stX_0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ToFloat32, g.DefineAsFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ X87OperandGenerator g(this);
+
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+ return;
+ case TruncationMode::kRoundToZero:
+ Emit(kX87Float64ToInt32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ X87OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister(eax)};
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Mod, g.DefineAsFixed(node, stX_0), 1, temps)->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundDown),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundToZero),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+ X87OperandGenerator g(this);
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr, temp_count, temps);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
+ if (Node* node = buffer.pushed_nodes[n]) {
+ int const slot = static_cast<int>(n);
+ InstructionOperand value =
+ g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
+ Emit(kX87Poke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ // TODO(titzer): handle pushing double parameters.
+ InstructionOperand value =
+ g.CanBeImmediate(node)
+ ? g.UseImmediate(node)
+ : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ Emit(kX87Push, g.NoOutput(), value);
+ }
+ }
+
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ // Emit the call instruction.
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitTailCall(Node* node) {
+ X87OperandGenerator g(this);
+ CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ } else {
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ // TODO(titzer): Handle pushing double parameters.
+ InstructionOperand value =
+ g.CanBeImmediate(node)
+ ? g.UseImmediate(node)
+ : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ Emit(kX87Push, g.NoOutput(), value);
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t output_count = buffer.outputs.size();
+ auto* outputs = &buffer.outputs.front();
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
+ }
+}
+
+
+namespace {
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
+ left, right);
+ }
+}
+
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ Node* left, Node* right, FlagsContinuation* cont,
+ bool commutative) {
+ X87OperandGenerator g(selector);
+ if (commutative && g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+}
+
+
+// Shared routine for multiple float32 compare operations (inputs commuted).
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(cont->Encode(kX87Float32Cmp),
+ g.DefineAsByteRegister(cont->result()));
+ }
+}
+
+
+// Shared routine for multiple float64 compare operations (inputs commuted).
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(cont->Encode(kX87Float64Cmp),
+ g.DefineAsByteRegister(cont->result()));
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ Node* const left = node->InputAt(0);
+ Node* const right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right)) {
+ VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+ } else if (g.CanBeImmediate(left)) {
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, left, right, cont,
+ node->op()->HasProperty(Operator::kCommutative));
+ }
+}
+
+
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
+ LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
+ ExternalReference js_stack_limit =
+ ExternalReference::address_of_stack_limit(selector->isolate());
+ if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kX87StackCheck);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+ }
+ return;
+ }
+ }
+ VisitWordCompare(selector, node, kX87Cmp, cont);
+}
+
+
+// Shared routine for word comparison with zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ // Try to combine the branch with a comparison.
+ while (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal: {
+ // Try to combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWordCompare(selector, value, cont);
+ }
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == NULL || selector->IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX87Add, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX87Sub, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(selector, value, kX87Test, cont);
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
+ X87OperandGenerator g(selector);
+ VisitCompare(selector, kX87Cmp, g.Use(value), g.TempImmediate(0), cont);
+}
+
+} // namespace
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+ VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ X87OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kX87Lea | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kX87Add, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX87Add, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kX87Sub, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX87Sub, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ExtractLowWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ExtractHighWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ X87OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kX87Float64InsertLowWord32, g.UseFixed(node, stX_0), g.UseRegister(left),
+ g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ X87OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kX87Float64InsertHighWord32, g.UseFixed(node, stX_0),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kWord32ShiftIsSafe;
+ return flags;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/x87/linkage-x87.cc b/deps/v8/src/compiler/x87/linkage-x87.cc
new file mode 100644
index 0000000000..69e1b3de59
--- /dev/null
+++ b/deps/v8/src/compiler/x87/linkage-x87.cc
@@ -0,0 +1,65 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct X87LinkageHelperTraits {
+ static Register ReturnValueReg() { return eax; }
+ static Register ReturnValue2Reg() { return edx; }
+ static Register JSCallFunctionReg() { return edi; }
+ static Register ContextReg() { return esi; }
+ static Register RuntimeCallFunctionReg() { return ebx; }
+ static Register RuntimeCallArgCountReg() { return eax; }
+ static RegList CCalleeSaveRegisters() {
+ return esi.bit() | edi.bit() | ebx.bit();
+ }
+ static RegList CCalleeSaveFPRegisters() { return 0; }
+ static Register CRegisterParameter(int i) { return no_reg; }
+ static int CRegisterParametersLength() { return 0; }
+ static int CStackBackingStoreLength() { return 0; }
+};
+
+typedef LinkageHelper<X87LinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
+ CallDescriptor::Flags flags) {
+ return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties, MachineType return_type) {
+ return LH::GetStubCallDescriptor(isolate, zone, descriptor,
+ stack_parameter_count, flags, properties,
+ return_type);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ const MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index a6a61df624..b29405b505 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -40,10 +40,10 @@ bool ScriptContextTable::Lookup(Handle<ScriptContextTable> table,
DCHECK(context->IsScriptContext());
Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
int slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, name, &result->mode, &result->init_flag,
+ scope_info, name, &result->mode, &result->location, &result->init_flag,
&result->maybe_assigned_flag);
- if (slot_index >= 0) {
+ if (slot_index >= 0 && result->location == VariableLocation::CONTEXT) {
result->context_index = i;
result->slot_index = slot_index;
return true;
@@ -85,22 +85,13 @@ Context* Context::script_context() {
Context* Context::native_context() {
- // Fast case: the global object for this context has been set. In
- // that case, the global object has a direct pointer to the global
- // context.
- if (global_object()->IsGlobalObject()) {
- return global_object()->native_context();
- }
-
- // During bootstrapping, the global object might not be set and we
- // have to search the context chain to find the native context.
- DCHECK(this->GetIsolate()->bootstrapper()->IsActive());
- Context* current = this;
- while (!current->IsNativeContext()) {
- JSFunction* closure = JSFunction::cast(current->closure());
- current = Context::cast(closure->context());
- }
- return current;
+ // Fast case: the receiver context is already a native context.
+ if (IsNativeContext()) return this;
+ // The global object has a direct pointer to the native context. If the
+ // following DCHECK fails, the native context is probably being accessed
+ // indirectly during bootstrapping. This is unsupported.
+ DCHECK(global_object()->IsGlobalObject());
+ return global_object()->native_context();
}
@@ -258,8 +249,13 @@ Handle<Object> Context::Lookup(Handle<String> name,
object->IsJSContextExtensionObject()) {
maybe = JSReceiver::GetOwnPropertyAttributes(object, name);
} else if (context->IsWithContext()) {
- LookupIterator it(object, name);
- maybe = UnscopableLookup(&it);
+ // A with context will never bind "this".
+ if (name->Equals(*isolate->factory()->this_string())) {
+ maybe = Just(ABSENT);
+ } else {
+ LookupIterator it(object, name);
+ maybe = UnscopableLookup(&it);
+ }
} else {
maybe = JSReceiver::GetPropertyAttributes(object, name);
}
@@ -291,14 +287,15 @@ Handle<Object> Context::Lookup(Handle<String> name,
ScopeInfo::cast(context->extension()), isolate);
}
VariableMode mode;
+ VariableLocation location;
InitializationFlag init_flag;
// TODO(sigurds) Figure out whether maybe_assigned_flag should
// be used to compute binding_flags.
MaybeAssignedFlag maybe_assigned_flag;
int slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
+ scope_info, name, &mode, &location, &init_flag, &maybe_assigned_flag);
DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
- if (slot_index >= 0) {
+ if (slot_index >= 0 && location == VariableLocation::CONTEXT) {
if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n",
slot_index, mode);
@@ -356,6 +353,27 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
+void Context::InitializeGlobalSlots() {
+ DCHECK(IsScriptContext());
+ DisallowHeapAllocation no_gc;
+
+ ScopeInfo* scope_info = ScopeInfo::cast(extension());
+
+ int context_globals = scope_info->ContextGlobalCount();
+ if (context_globals > 0) {
+ PropertyCell* empty_cell = GetHeap()->empty_property_cell();
+
+ int context_locals = scope_info->ContextLocalCount();
+ int index = Context::MIN_CONTEXT_SLOTS + context_locals;
+ for (int i = 0; i < context_globals; i++) {
+ // Clear both read and write slots.
+ set(index++, empty_cell);
+ set(index++, empty_cell);
+ }
+ }
+}
+
+
void Context::AddOptimizedFunction(JSFunction* function) {
DCHECK(IsNativeContext());
#ifdef ENABLE_SLOW_DCHECKS
@@ -389,8 +407,9 @@ void Context::AddOptimizedFunction(JSFunction* function) {
DCHECK(function->next_function_link()->IsUndefined());
- function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
- set(OPTIMIZED_FUNCTIONS_LIST, function);
+ function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST),
+ UPDATE_WEAK_WRITE_BARRIER);
+ set(OPTIMIZED_FUNCTIONS_LIST, function, UPDATE_WEAK_WRITE_BARRIER);
}
@@ -404,11 +423,14 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
element_function->next_function_link()->IsJSFunction());
if (element_function == function) {
if (prev == NULL) {
- set(OPTIMIZED_FUNCTIONS_LIST, element_function->next_function_link());
+ set(OPTIMIZED_FUNCTIONS_LIST, element_function->next_function_link(),
+ UPDATE_WEAK_WRITE_BARRIER);
} else {
- prev->set_next_function_link(element_function->next_function_link());
+ prev->set_next_function_link(element_function->next_function_link(),
+ UPDATE_WEAK_WRITE_BARRIER);
}
- element_function->set_next_function_link(GetHeap()->undefined_value());
+ element_function->set_next_function_link(GetHeap()->undefined_value(),
+ UPDATE_WEAK_WRITE_BARRIER);
return;
}
prev = element_function;
@@ -420,7 +442,7 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
void Context::SetOptimizedFunctionsListHead(Object* head) {
DCHECK(IsNativeContext());
- set(OPTIMIZED_FUNCTIONS_LIST, head);
+ set(OPTIMIZED_FUNCTIONS_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
}
@@ -435,13 +457,13 @@ void Context::AddOptimizedCode(Code* code) {
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(code->next_code_link()->IsUndefined());
code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
- set(OPTIMIZED_CODE_LIST, code);
+ set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
}
void Context::SetOptimizedCodeListHead(Object* head) {
DCHECK(IsNativeContext());
- set(OPTIMIZED_CODE_LIST, head);
+ set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
}
@@ -453,7 +475,7 @@ Object* Context::OptimizedCodeListHead() {
void Context::SetDeoptimizedCodeListHead(Object* head) {
DCHECK(IsNativeContext());
- set(DEOPTIMIZED_CODE_LIST, head);
+ set(DEOPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
}
@@ -494,4 +516,5 @@ bool Context::IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object) {
}
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 2d04da29b3..1210848384 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -82,9 +82,11 @@ enum BindingFlags {
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
+ V(JS_OBJECT_STRONG_MAP_INDEX, Map, js_object_strong_map) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
+ V(JS_ARRAY_STRONG_MAPS_INDEX, Object, js_array_strong_maps) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@@ -101,6 +103,7 @@ enum BindingFlags {
V(TO_LENGTH_FUN_INDEX, JSFunction, to_length_fun) \
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
+ V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
@@ -135,7 +138,8 @@ enum BindingFlags {
V(BOUND_FUNCTION_MAP_INDEX, Map, bound_function_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
- V(ALIASED_ARGUMENTS_MAP_INDEX, Map, aliased_arguments_map) \
+ V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map) \
+ V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
@@ -152,11 +156,13 @@ enum BindingFlags {
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(MAP_CACHE_INDEX, Object, map_cache) \
+ V(STRONG_MAP_CACHE_INDEX, Object, strong_map_cache) \
V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
V(PROMISE_STATUS_INDEX, Symbol, promise_status) \
+ V(PROMISE_VALUE_INDEX, Symbol, promise_value) \
V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
@@ -182,10 +188,25 @@ enum BindingFlags {
V(STRONG_GENERATOR_FUNCTION_MAP_INDEX, Map, strong_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
+ V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
+ V(JS_MAP_MAP_INDEX, Map, js_map_map) \
+ V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
+ V(JS_SET_MAP_INDEX, Map, js_set_map) \
+ V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
+ V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
+ V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
+ V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
+ V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
+ V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
+ V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
+ V(MAP_FROM_ARRAY_INDEX, JSFunction, map_from_array) \
+ V(SET_FROM_ARRAY_INDEX, JSFunction, set_from_array) \
V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table)
+ V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
+ V(NATIVES_UTILS_OBJECT_INDEX, Object, natives_utils_object) \
+ V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_exports_object)
// A table of all script contexts. Every loaded top-level script with top-level
@@ -205,6 +226,7 @@ class ScriptContextTable : public FixedArray {
int context_index;
int slot_index;
VariableMode mode;
+ VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
};
@@ -317,7 +339,8 @@ class Context: public FixedArray {
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
SLOPPY_ARGUMENTS_MAP_INDEX,
- ALIASED_ARGUMENTS_MAP_INDEX,
+ FAST_ALIASED_ARGUMENTS_MAP_INDEX,
+ SLOW_ALIASED_ARGUMENTS_MAP_INDEX,
STRICT_ARGUMENTS_MAP_INDEX,
REGEXP_RESULT_MAP_INDEX,
SLOPPY_FUNCTION_MAP_INDEX,
@@ -336,9 +359,11 @@ class Context: public FixedArray {
STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
SYMBOL_FUNCTION_INDEX,
OBJECT_FUNCTION_INDEX,
+ JS_OBJECT_STRONG_MAP_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
JS_ARRAY_MAPS_INDEX,
+ JS_ARRAY_STRONG_MAPS_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX,
@@ -373,6 +398,7 @@ class Context: public FixedArray {
FLOAT64_ARRAY_EXTERNAL_MAP_INDEX,
UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX,
DATA_VIEW_FUN_INDEX,
+ SHARED_ARRAY_BUFFER_FUN_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX,
@@ -393,6 +419,7 @@ class Context: public FixedArray {
RUN_MICROTASKS_INDEX,
ENQUEUE_MICROTASK_INDEX,
PROMISE_STATUS_INDEX,
+ PROMISE_VALUE_INDEX,
PROMISE_CREATE_INDEX,
PROMISE_RESOLVE_INDEX,
PROMISE_REJECT_INDEX,
@@ -416,12 +443,28 @@ class Context: public FixedArray {
STRONG_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
ITERATOR_RESULT_MAP_INDEX,
+ JS_MAP_FUN_INDEX,
+ JS_MAP_MAP_INDEX,
+ JS_SET_FUN_INDEX,
+ JS_SET_MAP_INDEX,
+ MAP_GET_METHOD_INDEX,
+ MAP_SET_METHOD_INDEX,
+ MAP_HAS_METHOD_INDEX,
+ MAP_DELETE_METHOD_INDEX,
+ SET_ADD_METHOD_INDEX,
+ SET_HAS_METHOD_INDEX,
+ SET_DELETE_METHOD_INDEX,
+ MAP_FROM_ARRAY_INDEX,
+ SET_FROM_ARRAY_INDEX,
MAP_ITERATOR_MAP_INDEX,
SET_ITERATOR_MAP_INDEX,
ARRAY_VALUES_ITERATOR_INDEX,
SCRIPT_CONTEXT_TABLE_INDEX,
MAP_CACHE_INDEX,
+ STRONG_MAP_CACHE_INDEX,
TO_LENGTH_FUN_INDEX,
+ NATIVES_UTILS_OBJECT_INDEX,
+ EXTRAS_EXPORTS_OBJECT_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
@@ -516,6 +559,9 @@ class Context: public FixedArray {
that->global_object()->native_context()->security_token();
}
+ // Initializes global variable bindings in given script context.
+ void InitializeGlobalSlots();
+
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 32de34c6a7..5877473854 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -560,4 +560,5 @@ bool IsSpecialIndex(UnicodeCache* unicode_cache, String* string) {
}
return true;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 6972011e5f..2788ff7f9f 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -194,4 +194,5 @@ void Counters::ResetHistograms() {
#undef HM
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index 5d021ec535..f48499e5f0 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -544,4 +544,5 @@ void CpuProfiler::LogBuiltins() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index 7032415181..a356bd4b4e 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -25,7 +25,7 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
return;
}
- TryCatch try_catch;
+ TryCatch try_catch(isolate);
// Get the toJSONProtocol function on the event and get the JSON format.
Local<String> to_json_fun_name =
@@ -76,7 +76,7 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
// Ignore empty commands.
if (strlen(command) == 0) continue;
- TryCatch try_catch;
+ TryCatch try_catch(isolate);
// Convert the debugger command to a JSON debugger request.
Handle<Value> request = Shell::DebugCommandToJSONRequest(
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 7a300efe0e..7db6f3ed9e 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -46,6 +46,7 @@
#include "src/d8-debug.h"
#include "src/debug.h"
#include "src/snapshot/natives.h"
+#include "src/utils.h"
#include "src/v8.h"
#endif // !V8_SHARED
@@ -104,6 +105,19 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
v8::Platform* g_platform = NULL;
+
+#ifndef V8_SHARED
+bool FindInObjectList(Handle<Object> object, const Shell::ObjectList& list) {
+ for (int i = 0; i < list.length(); ++i) {
+ if (list[i]->StrictEquals(object)) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif // !V8_SHARED
+
+
} // namespace
@@ -191,9 +205,14 @@ base::Mutex Shell::context_mutex_;
const base::TimeTicks Shell::kInitialTicks =
base::TimeTicks::HighResolutionNow();
Persistent<Context> Shell::utility_context_;
+base::Mutex Shell::workers_mutex_;
+bool Shell::allow_new_workers_ = true;
+i::List<Worker*> Shell::workers_;
+i::List<SharedArrayBuffer::Contents> Shell::externalized_shared_contents_;
#endif // !V8_SHARED
Persistent<Context> Shell::evaluation_context_;
+ArrayBuffer::Allocator* Shell::array_buffer_allocator;
ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> ";
@@ -226,9 +245,8 @@ ScriptCompiler::CachedData* CompileForCachedData(
name_buffer = new uint16_t[name_length];
name_string->Write(name_buffer, 0, name_length);
}
- ShellArrayBufferAllocator allocator;
Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = &allocator;
+ create_params.array_buffer_allocator = Shell::array_buffer_allocator;
Isolate* temp_isolate = Isolate::New(create_params);
ScriptCompiler::CachedData* result = NULL;
{
@@ -307,7 +325,7 @@ bool Shell::ExecuteString(Isolate* isolate, Handle<String> source,
bool FLAG_debugger = false;
#endif // !V8_SHARED
HandleScope handle_scope(isolate);
- TryCatch try_catch;
+ TryCatch try_catch(isolate);
options.script_executed = true;
if (FLAG_debugger) {
// When debugging make exceptions appear to be uncaught.
@@ -352,7 +370,7 @@ bool Shell::ExecuteString(Isolate* isolate, Handle<String> source,
}
#if !defined(V8_SHARED)
} else {
- v8::TryCatch try_catch;
+ v8::TryCatch try_catch(isolate);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
@@ -474,17 +492,24 @@ void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Realm.create() creates a new realm and returns its index.
void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
+ TryCatch try_catch(isolate);
PerIsolateData* data = PerIsolateData::Get(isolate);
Persistent<Context>* old_realms = data->realms_;
int index = data->realm_count_;
data->realms_ = new Persistent<Context>[++data->realm_count_];
for (int i = 0; i < index; ++i) {
data->realms_[i].Reset(isolate, old_realms[i]);
+ old_realms[i].Reset();
}
delete[] old_realms;
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- data->realms_[index].Reset(
- isolate, Context::New(isolate, NULL, global_template));
+ Local<Context> context = Context::New(isolate, NULL, global_template);
+ if (context.IsEmpty()) {
+ DCHECK(try_catch.HasCaught());
+ try_catch.ReThrow();
+ return;
+ }
+ data->realms_[index].Reset(isolate, context);
args.GetReturnValue().Set(index);
}
@@ -571,7 +596,7 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
// Explicitly catch potential exceptions in toString().
- v8::TryCatch try_catch;
+ v8::TryCatch try_catch(args.GetIsolate());
Handle<String> str_obj = args[i]->ToString(args.GetIsolate());
if (try_catch.HasCaught()) {
try_catch.ReThrow();
@@ -661,6 +686,128 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
+#ifndef V8_SHARED
+void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope handle_scope(isolate);
+ if (args.Length() < 1 || !args[0]->IsString()) {
+ Throw(args.GetIsolate(), "1st argument must be string");
+ return;
+ }
+
+ {
+ base::LockGuard<base::Mutex> lock_guard(&workers_mutex_);
+ if (!allow_new_workers_) return;
+
+ Worker* worker = new Worker;
+ args.This()->SetInternalField(0, External::New(isolate, worker));
+ workers_.Add(worker);
+
+ String::Utf8Value script(args[0]);
+ if (!*script) {
+ Throw(args.GetIsolate(), "Can't get worker script");
+ return;
+ }
+ worker->StartExecuteInThread(isolate, *script);
+ }
+}
+
+
+void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope handle_scope(isolate);
+ Local<Context> context = isolate->GetCurrentContext();
+
+ if (args.Length() < 1) {
+ Throw(isolate, "Invalid argument");
+ return;
+ }
+
+ Local<Value> this_value = args.This()->GetInternalField(0);
+ if (!this_value->IsExternal()) {
+ Throw(isolate, "this is not a Worker");
+ return;
+ }
+
+ Worker* worker =
+ static_cast<Worker*>(Local<External>::Cast(this_value)->Value());
+
+ Handle<Value> message = args[0];
+ ObjectList to_transfer;
+ if (args.Length() >= 2) {
+ if (!args[1]->IsArray()) {
+ Throw(isolate, "Transfer list must be an Array");
+ return;
+ }
+
+ Handle<Array> transfer = Handle<Array>::Cast(args[1]);
+ uint32_t length = transfer->Length();
+ for (uint32_t i = 0; i < length; ++i) {
+ Handle<Value> element;
+ if (transfer->Get(context, i).ToLocal(&element)) {
+ if (!element->IsArrayBuffer() && !element->IsSharedArrayBuffer()) {
+ Throw(isolate,
+ "Transfer array elements must be an ArrayBuffer or "
+ "SharedArrayBuffer.");
+ break;
+ }
+
+ to_transfer.Add(Handle<Object>::Cast(element));
+ }
+ }
+ }
+
+ ObjectList seen_objects;
+ SerializationData* data = new SerializationData;
+ if (SerializeValue(isolate, message, to_transfer, &seen_objects, data)) {
+ worker->PostMessage(data);
+ } else {
+ delete data;
+ }
+}
+
+
+void Shell::WorkerGetMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope handle_scope(isolate);
+
+ Local<Value> this_value = args.This()->GetInternalField(0);
+ if (!this_value->IsExternal()) {
+ Throw(isolate, "this is not a Worker");
+ return;
+ }
+
+ Worker* worker =
+ static_cast<Worker*>(Local<External>::Cast(this_value)->Value());
+
+ SerializationData* data = worker->GetMessage();
+ if (data) {
+ int offset = 0;
+ Local<Value> data_value;
+ if (Shell::DeserializeValue(isolate, *data, &offset).ToLocal(&data_value)) {
+ args.GetReturnValue().Set(data_value);
+ }
+ delete data;
+ }
+}
+
+
+void Shell::WorkerTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope handle_scope(isolate);
+ Local<Value> this_value = args.This()->GetInternalField(0);
+ if (!this_value->IsExternal()) {
+ Throw(isolate, "this is not a Worker");
+ return;
+ }
+
+ Worker* worker =
+ static_cast<Worker*>(Local<External>::Cast(this_value)->Value());
+ worker->Terminate();
+}
+#endif // !V8_SHARED
+
+
void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
int exit_code = args[0]->Int32Value();
OnExit(args.GetIsolate());
@@ -992,6 +1139,21 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
FunctionTemplate::New(isolate, PerformanceNow));
global_template->Set(String::NewFromUtf8(isolate, "performance"),
performance_template);
+
+ Handle<FunctionTemplate> worker_fun_template =
+ FunctionTemplate::New(isolate, WorkerNew);
+ worker_fun_template->PrototypeTemplate()->Set(
+ String::NewFromUtf8(isolate, "terminate"),
+ FunctionTemplate::New(isolate, WorkerTerminate));
+ worker_fun_template->PrototypeTemplate()->Set(
+ String::NewFromUtf8(isolate, "postMessage"),
+ FunctionTemplate::New(isolate, WorkerPostMessage));
+ worker_fun_template->PrototypeTemplate()->Set(
+ String::NewFromUtf8(isolate, "getMessage"),
+ FunctionTemplate::New(isolate, WorkerGetMessage));
+ worker_fun_template->InstanceTemplate()->SetInternalFieldCount(1);
+ global_template->Set(String::NewFromUtf8(isolate, "Worker"),
+ worker_fun_template);
#endif // !V8_SHARED
Handle<ObjectTemplate> os_templ = ObjectTemplate::New(isolate);
@@ -1018,6 +1180,10 @@ void Shell::InitializeDebugger(Isolate* isolate) {
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
utility_context_.Reset(isolate,
Context::New(isolate, NULL, global_template));
+ if (utility_context_.IsEmpty()) {
+ printf("Failed to initialize debugger\n");
+ Shell::Exit(1);
+ }
#endif // !V8_SHARED
}
@@ -1321,9 +1487,8 @@ base::Thread::Options SourceGroup::GetThreadOptions() {
void SourceGroup::ExecuteInThread() {
- ShellArrayBufferAllocator allocator;
Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = &allocator;
+ create_params.array_buffer_allocator = Shell::array_buffer_allocator;
Isolate* isolate = Isolate::New(create_params);
do {
next_semaphore_.Wait();
@@ -1339,19 +1504,7 @@ void SourceGroup::ExecuteInThread() {
Execute(isolate);
}
}
- if (Shell::options.send_idle_notification) {
- const double kLongIdlePauseInSeconds = 1.0;
- isolate->ContextDisposedNotification();
- isolate->IdleNotificationDeadline(
- g_platform->MonotonicallyIncreasingTime() +
- kLongIdlePauseInSeconds);
- }
- if (Shell::options.invoke_weak_callbacks) {
- // By sending a low memory notifications, we will try hard to collect
- // all garbage and will therefore also invoke all weak callbacks of
- // actually unreachable persistent handles.
- isolate->LowMemoryNotification();
- }
+ Shell::CollectGarbage(isolate);
}
done_semaphore_.Signal();
} while (!Shell::options.last_run);
@@ -1377,6 +1530,262 @@ void SourceGroup::WaitForThread() {
done_semaphore_.Wait();
}
}
+
+
+SerializationData::~SerializationData() {
+ // Any ArrayBuffer::Contents are owned by this SerializationData object.
+ // SharedArrayBuffer::Contents may be used by other threads, so must be
+ // cleaned up by the main thread in Shell::CleanupWorkers().
+ for (int i = 0; i < array_buffer_contents.length(); ++i) {
+ ArrayBuffer::Contents& contents = array_buffer_contents[i];
+ Shell::array_buffer_allocator->Free(contents.Data(), contents.ByteLength());
+ }
+}
+
+
+void SerializationData::WriteTag(SerializationTag tag) { data.Add(tag); }
+
+
+void SerializationData::WriteMemory(const void* p, int length) {
+ if (length > 0) {
+ i::Vector<uint8_t> block = data.AddBlock(0, length);
+ memcpy(&block[0], p, length);
+ }
+}
+
+
+void SerializationData::WriteArrayBufferContents(
+ const ArrayBuffer::Contents& contents) {
+ array_buffer_contents.Add(contents);
+ WriteTag(kSerializationTagTransferredArrayBuffer);
+ int index = array_buffer_contents.length() - 1;
+ Write(index);
+}
+
+
+void SerializationData::WriteSharedArrayBufferContents(
+ const SharedArrayBuffer::Contents& contents) {
+ shared_array_buffer_contents.Add(contents);
+ WriteTag(kSerializationTagTransferredSharedArrayBuffer);
+ int index = shared_array_buffer_contents.length() - 1;
+ Write(index);
+}
+
+
+SerializationTag SerializationData::ReadTag(int* offset) const {
+ return static_cast<SerializationTag>(Read<uint8_t>(offset));
+}
+
+
+void SerializationData::ReadMemory(void* p, int length, int* offset) const {
+ if (length > 0) {
+ memcpy(p, &data[*offset], length);
+ (*offset) += length;
+ }
+}
+
+
+void SerializationData::ReadArrayBufferContents(ArrayBuffer::Contents* contents,
+ int* offset) const {
+ int index = Read<int>(offset);
+ DCHECK(index < array_buffer_contents.length());
+ *contents = array_buffer_contents[index];
+}
+
+
+void SerializationData::ReadSharedArrayBufferContents(
+ SharedArrayBuffer::Contents* contents, int* offset) const {
+ int index = Read<int>(offset);
+ DCHECK(index < shared_array_buffer_contents.length());
+ *contents = shared_array_buffer_contents[index];
+}
+
+
+void SerializationDataQueue::Enqueue(SerializationData* data) {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ data_.Add(data);
+}
+
+
+bool SerializationDataQueue::Dequeue(SerializationData** data) {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ if (data_.is_empty()) return false;
+ *data = data_.Remove(0);
+ return true;
+}
+
+
+bool SerializationDataQueue::IsEmpty() {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ return data_.is_empty();
+}
+
+
+void SerializationDataQueue::Clear() {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ for (int i = 0; i < data_.length(); ++i) {
+ delete data_[i];
+ }
+ data_.Clear();
+}
+
+
+Worker::Worker()
+ : in_semaphore_(0),
+ out_semaphore_(0),
+ thread_(NULL),
+ script_(NULL),
+ state_(IDLE) {}
+
+
+Worker::~Worker() { Cleanup(); }
+
+
+void Worker::StartExecuteInThread(Isolate* isolate, const char* script) {
+ if (base::NoBarrier_CompareAndSwap(&state_, IDLE, RUNNING) == IDLE) {
+ script_ = i::StrDup(script);
+ thread_ = new WorkerThread(this);
+ thread_->Start();
+ } else {
+ // Somehow the Worker was started twice.
+ UNREACHABLE();
+ }
+}
+
+
+void Worker::PostMessage(SerializationData* data) {
+ in_queue_.Enqueue(data);
+ in_semaphore_.Signal();
+}
+
+
+SerializationData* Worker::GetMessage() {
+ SerializationData* data = NULL;
+ while (!out_queue_.Dequeue(&data)) {
+ if (base::NoBarrier_Load(&state_) != RUNNING) break;
+ out_semaphore_.Wait();
+ }
+
+ return data;
+}
+
+
+void Worker::Terminate() {
+ if (base::NoBarrier_CompareAndSwap(&state_, RUNNING, TERMINATED) == RUNNING) {
+ // Post NULL to wake the Worker thread message loop.
+ PostMessage(NULL);
+ thread_->Join();
+ }
+}
+
+
+void Worker::ExecuteInThread() {
+ Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = Shell::array_buffer_allocator;
+ Isolate* isolate = Isolate::New(create_params);
+ {
+ Isolate::Scope iscope(isolate);
+ {
+ HandleScope scope(isolate);
+ PerIsolateData data(isolate);
+ Local<Context> context = Shell::CreateEvaluationContext(isolate);
+ {
+ Context::Scope cscope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
+
+ Handle<Object> global = context->Global();
+ Handle<Value> this_value = External::New(isolate, this);
+ Handle<FunctionTemplate> postmessage_fun_template =
+ FunctionTemplate::New(isolate, PostMessageOut, this_value);
+
+ Handle<Function> postmessage_fun;
+ if (postmessage_fun_template->GetFunction(context)
+ .ToLocal(&postmessage_fun)) {
+ global->Set(String::NewFromUtf8(isolate, "postMessage"),
+ postmessage_fun);
+ }
+
+ // First run the script
+ Handle<String> file_name = String::NewFromUtf8(isolate, "unnamed");
+ Handle<String> source = String::NewFromUtf8(isolate, script_);
+ if (Shell::ExecuteString(isolate, source, file_name, false, true)) {
+ // Get the message handler
+ Handle<Value> onmessage =
+ global->Get(String::NewFromUtf8(isolate, "onmessage"));
+ if (onmessage->IsFunction()) {
+ Handle<Function> onmessage_fun = Handle<Function>::Cast(onmessage);
+ // Now wait for messages
+ bool done = false;
+ while (!done) {
+ in_semaphore_.Wait();
+ SerializationData* data;
+ if (!in_queue_.Dequeue(&data)) continue;
+ if (data == NULL) {
+ done = true;
+ break;
+ }
+ int offset = 0;
+ Local<Value> data_value;
+ if (Shell::DeserializeValue(isolate, *data, &offset)
+ .ToLocal(&data_value)) {
+ Handle<Value> argv[] = {data_value};
+ (void)onmessage_fun->Call(context, global, 1, argv);
+ }
+ delete data;
+ }
+ }
+ }
+ }
+ }
+ Shell::CollectGarbage(isolate);
+ }
+ isolate->Dispose();
+
+ if (base::NoBarrier_CompareAndSwap(&state_, RUNNING, TERMINATED) == RUNNING) {
+ // Post NULL to wake the thread waiting on GetMessage() if there is one.
+ out_queue_.Enqueue(NULL);
+ out_semaphore_.Signal();
+ }
+}
+
+
+void Worker::Cleanup() {
+ delete thread_;
+ thread_ = NULL;
+ delete[] script_;
+ script_ = NULL;
+ in_queue_.Clear();
+ out_queue_.Clear();
+}
+
+
+void Worker::PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope handle_scope(isolate);
+
+ if (args.Length() < 1) {
+ Throw(isolate, "Invalid argument");
+ return;
+ }
+
+ Handle<Value> message = args[0];
+
+ // TODO(binji): Allow transferring from worker to main thread?
+ Shell::ObjectList to_transfer;
+
+ Shell::ObjectList seen_objects;
+ SerializationData* data = new SerializationData;
+ if (Shell::SerializeValue(isolate, message, to_transfer, &seen_objects,
+ data)) {
+ DCHECK(args.Data()->IsExternal());
+ Handle<External> this_value = Handle<External>::Cast(args.Data());
+ Worker* worker = static_cast<Worker*>(this_value->Value());
+ worker->out_queue_.Enqueue(data);
+ worker->out_semaphore_.Signal();
+ } else {
+ delete data;
+ }
+}
#endif // !V8_SHARED
@@ -1413,6 +1822,10 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--test") == 0) {
options.test_shell = true;
argv[i] = NULL;
+ } else if (strcmp(argv[i], "--notest") == 0 ||
+ strcmp(argv[i], "--no-test") == 0) {
+ options.test_shell = false;
+ argv[i] = NULL;
} else if (strcmp(argv[i], "--send-idle-notification") == 0) {
options.send_idle_notification = true;
argv[i] = NULL;
@@ -1542,6 +1955,18 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
options.isolate_sources[0].Execute(isolate);
}
}
+ CollectGarbage(isolate);
+#ifndef V8_SHARED
+ for (int i = 1; i < options.num_isolates; ++i) {
+ options.isolate_sources[i].WaitForThread();
+ }
+ CleanupWorkers();
+#endif // !V8_SHARED
+ return 0;
+}
+
+
+void Shell::CollectGarbage(Isolate* isolate) {
if (options.send_idle_notification) {
const double kLongIdlePauseInSeconds = 1.0;
isolate->ContextDisposedNotification();
@@ -1554,17 +1979,266 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
// unreachable persistent handles.
isolate->LowMemoryNotification();
}
+}
+
#ifndef V8_SHARED
- for (int i = 1; i < options.num_isolates; ++i) {
- options.isolate_sources[i].WaitForThread();
+bool Shell::SerializeValue(Isolate* isolate, Handle<Value> value,
+ const ObjectList& to_transfer,
+ ObjectList* seen_objects,
+ SerializationData* out_data) {
+ DCHECK(out_data);
+ Local<Context> context = isolate->GetCurrentContext();
+
+ if (value->IsUndefined()) {
+ out_data->WriteTag(kSerializationTagUndefined);
+ } else if (value->IsNull()) {
+ out_data->WriteTag(kSerializationTagNull);
+ } else if (value->IsTrue()) {
+ out_data->WriteTag(kSerializationTagTrue);
+ } else if (value->IsFalse()) {
+ out_data->WriteTag(kSerializationTagFalse);
+ } else if (value->IsNumber()) {
+ Handle<Number> num = Handle<Number>::Cast(value);
+ double value = num->Value();
+ out_data->WriteTag(kSerializationTagNumber);
+ out_data->Write(value);
+ } else if (value->IsString()) {
+ v8::String::Utf8Value str(value);
+ out_data->WriteTag(kSerializationTagString);
+ out_data->Write(str.length());
+ out_data->WriteMemory(*str, str.length());
+ } else if (value->IsArray()) {
+ Handle<Array> array = Handle<Array>::Cast(value);
+ if (FindInObjectList(array, *seen_objects)) {
+ Throw(isolate, "Duplicated arrays not supported");
+ return false;
+ }
+ seen_objects->Add(array);
+ out_data->WriteTag(kSerializationTagArray);
+ uint32_t length = array->Length();
+ out_data->Write(length);
+ for (uint32_t i = 0; i < length; ++i) {
+ Local<Value> element_value;
+ if (array->Get(context, i).ToLocal(&element_value)) {
+ if (!SerializeValue(isolate, element_value, to_transfer, seen_objects,
+ out_data))
+ return false;
+ }
+ }
+ } else if (value->IsArrayBuffer()) {
+ Handle<ArrayBuffer> array_buffer = Handle<ArrayBuffer>::Cast(value);
+ if (FindInObjectList(array_buffer, *seen_objects)) {
+ Throw(isolate, "Duplicated array buffers not supported");
+ return false;
+ }
+ seen_objects->Add(array_buffer);
+ if (FindInObjectList(array_buffer, to_transfer)) {
+ // Transfer ArrayBuffer
+ if (!array_buffer->IsNeuterable()) {
+ Throw(isolate, "Attempting to transfer an un-neuterable ArrayBuffer");
+ return false;
+ }
+
+ ArrayBuffer::Contents contents = array_buffer->Externalize();
+ array_buffer->Neuter();
+ out_data->WriteArrayBufferContents(contents);
+ } else {
+ ArrayBuffer::Contents contents = array_buffer->GetContents();
+ // Clone ArrayBuffer
+ if (contents.ByteLength() > i::kMaxUInt32) {
+ Throw(isolate, "ArrayBuffer is too big to clone");
+ return false;
+ }
+
+ int byte_length = static_cast<int>(contents.ByteLength());
+ out_data->WriteTag(kSerializationTagArrayBuffer);
+ out_data->Write(byte_length);
+ out_data->WriteMemory(contents.Data(),
+ static_cast<int>(contents.ByteLength()));
+ }
+ } else if (value->IsSharedArrayBuffer()) {
+ Handle<SharedArrayBuffer> sab = Handle<SharedArrayBuffer>::Cast(value);
+ if (FindInObjectList(sab, *seen_objects)) {
+ Throw(isolate, "Duplicated shared array buffers not supported");
+ return false;
+ }
+ seen_objects->Add(sab);
+ if (!FindInObjectList(sab, to_transfer)) {
+ Throw(isolate, "SharedArrayBuffer must be transferred");
+ return false;
+ }
+
+ SharedArrayBuffer::Contents contents = sab->Externalize();
+ out_data->WriteSharedArrayBufferContents(contents);
+ externalized_shared_contents_.Add(contents);
+ } else if (value->IsObject()) {
+ Handle<Object> object = Handle<Object>::Cast(value);
+ if (FindInObjectList(object, *seen_objects)) {
+ Throw(isolate, "Duplicated objects not supported");
+ return false;
+ }
+ seen_objects->Add(object);
+ Local<Array> property_names;
+ if (!object->GetOwnPropertyNames(context).ToLocal(&property_names)) {
+ Throw(isolate, "Unable to get property names");
+ return false;
+ }
+
+ uint32_t length = property_names->Length();
+ out_data->WriteTag(kSerializationTagObject);
+ out_data->Write(length);
+ for (uint32_t i = 0; i < length; ++i) {
+ Handle<Value> name;
+ Handle<Value> property_value;
+ if (property_names->Get(context, i).ToLocal(&name) &&
+ object->Get(context, name).ToLocal(&property_value)) {
+ if (!SerializeValue(isolate, name, to_transfer, seen_objects, out_data))
+ return false;
+ if (!SerializeValue(isolate, property_value, to_transfer, seen_objects,
+ out_data))
+ return false;
+ }
+ }
+ } else {
+ Throw(isolate, "Don't know how to serialize object");
+ return false;
}
-#endif // !V8_SHARED
- return 0;
+
+ return true;
+}
+
+
+MaybeLocal<Value> Shell::DeserializeValue(Isolate* isolate,
+ const SerializationData& data,
+ int* offset) {
+ DCHECK(offset);
+ EscapableHandleScope scope(isolate);
+ // This function should not use utility_context_ because it is running on a
+ // different thread.
+ Local<Value> result;
+ SerializationTag tag = data.ReadTag(offset);
+
+ switch (tag) {
+ case kSerializationTagUndefined:
+ result = Undefined(isolate);
+ break;
+ case kSerializationTagNull:
+ result = Null(isolate);
+ break;
+ case kSerializationTagTrue:
+ result = True(isolate);
+ break;
+ case kSerializationTagFalse:
+ result = False(isolate);
+ break;
+ case kSerializationTagNumber:
+ result = Number::New(isolate, data.Read<double>(offset));
+ break;
+ case kSerializationTagString: {
+ int length = data.Read<int>(offset);
+ static char s_buffer[128];
+ char* p = s_buffer;
+ bool allocated = false;
+ if (length > static_cast<int>(sizeof(s_buffer))) {
+ p = new char[length];
+ allocated = true;
+ }
+ data.ReadMemory(p, length, offset);
+ MaybeLocal<String> str =
+ String::NewFromUtf8(isolate, p, String::kNormalString, length);
+ if (!str.IsEmpty()) result = str.ToLocalChecked();
+ if (allocated) delete[] p;
+ break;
+ }
+ case kSerializationTagArray: {
+ uint32_t length = data.Read<uint32_t>(offset);
+ Handle<Array> array = Array::New(isolate, length);
+ for (uint32_t i = 0; i < length; ++i) {
+ Local<Value> element_value;
+ CHECK(DeserializeValue(isolate, data, offset).ToLocal(&element_value));
+ array->Set(i, element_value);
+ }
+ result = array;
+ break;
+ }
+ case kSerializationTagObject: {
+ int length = data.Read<int>(offset);
+ Handle<Object> object = Object::New(isolate);
+ for (int i = 0; i < length; ++i) {
+ Local<Value> property_name;
+ CHECK(DeserializeValue(isolate, data, offset).ToLocal(&property_name));
+ Local<Value> property_value;
+ CHECK(DeserializeValue(isolate, data, offset).ToLocal(&property_value));
+ object->Set(property_name, property_value);
+ }
+ result = object;
+ break;
+ }
+ case kSerializationTagArrayBuffer: {
+ int byte_length = data.Read<int>(offset);
+ Handle<ArrayBuffer> array_buffer = ArrayBuffer::New(isolate, byte_length);
+ ArrayBuffer::Contents contents = array_buffer->GetContents();
+ DCHECK(static_cast<size_t>(byte_length) == contents.ByteLength());
+ data.ReadMemory(contents.Data(), byte_length, offset);
+ result = array_buffer;
+ break;
+ }
+ case kSerializationTagTransferredArrayBuffer: {
+ ArrayBuffer::Contents contents;
+ data.ReadArrayBufferContents(&contents, offset);
+ result =
+ ArrayBuffer::New(isolate, contents.Data(), contents.ByteLength());
+ break;
+ }
+ case kSerializationTagTransferredSharedArrayBuffer: {
+ SharedArrayBuffer::Contents contents;
+ data.ReadSharedArrayBufferContents(&contents, offset);
+ result = SharedArrayBuffer::New(isolate, contents.Data(),
+ contents.ByteLength());
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ return scope.Escape(result);
+}
+
+
+void Shell::CleanupWorkers() {
+ // Make a copy of workers_, because we don't want to call Worker::Terminate
+ // while holding the workers_mutex_ lock. Otherwise, if a worker is about to
+ // create a new Worker, it would deadlock.
+ i::List<Worker*> workers_copy;
+ {
+ base::LockGuard<base::Mutex> lock_guard(&workers_mutex_);
+ allow_new_workers_ = false;
+ workers_copy.AddAll(workers_);
+ workers_.Clear();
+ }
+
+ for (int i = 0; i < workers_copy.length(); ++i) {
+ Worker* worker = workers_copy[i];
+ worker->Terminate();
+ delete worker;
+ }
+
+ // Now that all workers are terminated, we can re-enable Worker creation.
+ {
+ base::LockGuard<base::Mutex> lock_guard(&workers_mutex_);
+ allow_new_workers_ = true;
+ }
+
+ for (int i = 0; i < externalized_shared_contents_.length(); ++i) {
+ const SharedArrayBuffer::Contents& contents =
+ externalized_shared_contents_[i];
+ Shell::array_buffer_allocator->Free(contents.Data(), contents.ByteLength());
+ }
+ externalized_shared_contents_.Clear();
}
-#ifndef V8_SHARED
static void DumpHeapConstants(i::Isolate* isolate) {
i::Heap* heap = isolate->heap();
@@ -1651,13 +2325,14 @@ int Shell::Main(int argc, char* argv[]) {
SetFlagsFromString("--redirect-code-traces-to=code.asm");
int result = 0;
Isolate::CreateParams create_params;
- ShellArrayBufferAllocator array_buffer_allocator;
+ ShellArrayBufferAllocator shell_array_buffer_allocator;
MockArrayBufferAllocator mock_arraybuffer_allocator;
if (options.mock_arraybuffer_allocator) {
- create_params.array_buffer_allocator = &mock_arraybuffer_allocator;
+ Shell::array_buffer_allocator = &mock_arraybuffer_allocator;
} else {
- create_params.array_buffer_allocator = &array_buffer_allocator;
+ Shell::array_buffer_allocator = &shell_array_buffer_allocator;
}
+ create_params.array_buffer_allocator = Shell::array_buffer_allocator;
#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE)
if (i::FLAG_gdbjit) {
create_params.code_event_handler = i::GDBJITInterface::EventHandler;
@@ -1729,6 +2404,13 @@ int Shell::Main(int argc, char* argv[]) {
#endif // !V8_SHARED
RunShell(isolate);
}
+
+ // Shut down contexts and collect garbage.
+ evaluation_context_.Reset();
+#ifndef V8_SHARED
+ utility_context_.Reset();
+#endif // !V8_SHARED
+ CollectGarbage(isolate);
}
OnExit(isolate);
#ifndef V8_SHARED
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index a78551599f..548459a790 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -48,6 +48,7 @@
'..',
],
'sources': [
+ 'd8.h',
'd8.cc',
'startup-data-util.h',
'startup-data-util.cc'
@@ -69,7 +70,11 @@
'sources': [ 'd8-windows.cc', ]
}],
[ 'component!="shared_library"', {
- 'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ],
+ 'sources': [
+ 'd8-debug.h',
+ 'd8-debug.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
+ ],
'conditions': [
[ 'want_separate_host_toolset==1', {
'dependencies': [
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 1f4bee29ed..4d723473ea 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -8,6 +8,7 @@
#ifndef V8_SHARED
#include "src/allocation.h"
#include "src/hashmap.h"
+#include "src/list.h"
#include "src/smart-pointers.h"
#include "src/v8.h"
#else
@@ -167,28 +168,112 @@ class SourceGroup {
int end_offset_;
};
+#ifndef V8_SHARED
+enum SerializationTag {
+ kSerializationTagUndefined,
+ kSerializationTagNull,
+ kSerializationTagTrue,
+ kSerializationTagFalse,
+ kSerializationTagNumber,
+ kSerializationTagString,
+ kSerializationTagArray,
+ kSerializationTagObject,
+ kSerializationTagArrayBuffer,
+ kSerializationTagTransferredArrayBuffer,
+ kSerializationTagTransferredSharedArrayBuffer,
+};
-class BinaryResource : public v8::String::ExternalOneByteStringResource {
+
+class SerializationData {
public:
- BinaryResource(const char* string, int length)
- : data_(string),
- length_(length) { }
-
- ~BinaryResource() {
- delete[] data_;
- data_ = NULL;
- length_ = 0;
+ SerializationData() {}
+ ~SerializationData();
+
+ void WriteTag(SerializationTag tag);
+ void WriteMemory(const void* p, int length);
+ void WriteArrayBufferContents(const ArrayBuffer::Contents& contents);
+ void WriteSharedArrayBufferContents(
+ const SharedArrayBuffer::Contents& contents);
+
+ template <typename T>
+ void Write(const T& data) {
+ WriteMemory(&data, sizeof(data));
}
- virtual const char* data() const { return data_; }
- virtual size_t length() const { return length_; }
+ SerializationTag ReadTag(int* offset) const;
+ void ReadMemory(void* p, int length, int* offset) const;
+ void ReadArrayBufferContents(ArrayBuffer::Contents* contents,
+ int* offset) const;
+ void ReadSharedArrayBufferContents(SharedArrayBuffer::Contents* contents,
+ int* offset) const;
+
+ template <typename T>
+ T Read(int* offset) const {
+ T value;
+ ReadMemory(&value, sizeof(value), offset);
+ return value;
+ }
private:
- const char* data_;
- size_t length_;
+ i::List<uint8_t> data;
+ i::List<ArrayBuffer::Contents> array_buffer_contents;
+ i::List<SharedArrayBuffer::Contents> shared_array_buffer_contents;
};
+class SerializationDataQueue {
+ public:
+ void Enqueue(SerializationData* data);
+ bool Dequeue(SerializationData** data);
+ bool IsEmpty();
+ void Clear();
+
+ private:
+ base::Mutex mutex_;
+ i::List<SerializationData*> data_;
+};
+
+
+class Worker {
+ public:
+ Worker();
+ ~Worker();
+
+ void StartExecuteInThread(Isolate* isolate, const char* script);
+ void PostMessage(SerializationData* data);
+ SerializationData* GetMessage();
+ void Terminate();
+
+ private:
+ class WorkerThread : public base::Thread {
+ public:
+ explicit WorkerThread(Worker* worker)
+ : base::Thread(base::Thread::Options("WorkerThread")),
+ worker_(worker) {}
+
+ virtual void Run() { worker_->ExecuteInThread(); }
+
+ private:
+ Worker* worker_;
+ };
+
+ enum State { IDLE, RUNNING, TERMINATED };
+
+ void ExecuteInThread();
+ void Cleanup();
+ static void PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ base::Semaphore in_semaphore_;
+ base::Semaphore out_semaphore_;
+ SerializationDataQueue in_queue_;
+ SerializationDataQueue out_queue_;
+ base::Thread* thread_;
+ char* script_;
+ base::Atomic32 state_;
+};
+#endif // !V8_SHARED
+
+
class ShellOptions {
public:
ShellOptions()
@@ -264,8 +349,20 @@ class Shell : public i::AllStatic {
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
static void OnExit(Isolate* isolate);
+ static void CollectGarbage(Isolate* isolate);
#ifndef V8_SHARED
+ // TODO(binji): stupid implementation for now. Is there an easy way to hash an
+ // object for use in i::HashMap? By pointer?
+ typedef i::List<Handle<Object>> ObjectList;
+ static bool SerializeValue(Isolate* isolate, Handle<Value> value,
+ const ObjectList& to_transfer,
+ ObjectList* seen_objects,
+ SerializationData* out_data);
+ static MaybeLocal<Value> DeserializeValue(Isolate* isolate,
+ const SerializationData& data,
+ int* offset);
+ static void CleanupWorkers();
static Handle<Array> GetCompletions(Isolate* isolate,
Handle<String> text,
Handle<String> full);
@@ -309,20 +406,11 @@ class Shell : public i::AllStatic {
args.GetReturnValue().Set(ReadFromStdin(args.GetIsolate()));
}
static void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void ArrayBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Int8Array(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Uint8Array(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Int16Array(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Uint16Array(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Int32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Uint32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Float32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Float64Array(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void Uint8ClampedArray(
+ static void WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void WorkerPostMessage(
const v8::FunctionCallbackInfo<v8::Value>& args);
- static void ArrayBufferSlice(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void ArraySubArray(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void ArraySet(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void WorkerGetMessage(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void WorkerTerminate(const v8::FunctionCallbackInfo<v8::Value>& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@@ -349,7 +437,6 @@ class Shell : public i::AllStatic {
// with the current umask. Intermediate directories are created if necessary.
// An exception is not thrown if the directory already exists. Analogous to
// the "mkdir -p" command.
- static void OSObject(const v8::FunctionCallbackInfo<v8::Value>& args);
static void System(const v8::FunctionCallbackInfo<v8::Value>& args);
static void ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -363,6 +450,7 @@ class Shell : public i::AllStatic {
static const char* kPrompt;
static ShellOptions options;
+ static ArrayBuffer::Allocator* array_buffer_allocator;
private:
static Persistent<Context> evaluation_context_;
@@ -377,6 +465,11 @@ class Shell : public i::AllStatic {
static base::Mutex context_mutex_;
static const base::TimeTicks kInitialTicks;
+ static base::Mutex workers_mutex_;
+ static bool allow_new_workers_;
+ static i::List<Worker*> workers_;
+ static i::List<SharedArrayBuffer::Contents> externalized_shared_contents_;
+
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
#endif // !V8_SHARED
@@ -385,8 +478,6 @@ class Shell : public i::AllStatic {
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
- static Handle<FunctionTemplate> CreateArrayBufferTemplate(FunctionCallback);
- static Handle<FunctionTemplate> CreateArrayTemplate(FunctionCallback);
};
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 1f879de135..68c7507d08 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -359,4 +359,5 @@ DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
return result;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 0cd18250e4..118c8a69c9 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -2,21 +2,33 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// This file relies on the fact that the following declarations have been made
-// in v8natives.js:
-// var $isFinite = GlobalIsFinite;
-
var $createDate;
// -------------------------------------------------------------------
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalDate = global.Date;
+var InternalArray = utils.InternalArray;
+
+var IsFinite;
+var MathAbs;
+var MathFloor;
+
+utils.Import(function(from) {
+ IsFinite = from.IsFinite;
+ MathAbs = from.MathAbs;
+ MathFloor = from.MathFloor;
+});
+
+// -------------------------------------------------------------------
// This file contains date support implemented in JavaScript.
@@ -46,10 +58,10 @@ function UTC(time) {
// ECMA 262 - 15.9.1.11
function MakeTime(hour, min, sec, ms) {
- if (!$isFinite(hour)) return NAN;
- if (!$isFinite(min)) return NAN;
- if (!$isFinite(sec)) return NAN;
- if (!$isFinite(ms)) return NAN;
+ if (!IsFinite(hour)) return NAN;
+ if (!IsFinite(min)) return NAN;
+ if (!IsFinite(sec)) return NAN;
+ if (!IsFinite(ms)) return NAN;
return TO_INTEGER(hour) * msPerHour
+ TO_INTEGER(min) * msPerMinute
+ TO_INTEGER(sec) * msPerSecond
@@ -70,7 +82,7 @@ function TimeInYear(year) {
// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
function MakeDay(year, month, date) {
- if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return NAN;
+ if (!IsFinite(year) || !IsFinite(month) || !IsFinite(date)) return NAN;
// Convert to integer and map -0 to 0.
year = TO_INTEGER_MAP_MINUS_ZERO(year);
@@ -95,15 +107,15 @@ function MakeDate(day, time) {
// is no way that the time can be within range even after UTC
// conversion we return NaN immediately instead of relying on
// TimeClip to do it.
- if ($abs(time) > MAX_TIME_BEFORE_UTC) return NAN;
+ if (MathAbs(time) > MAX_TIME_BEFORE_UTC) return NAN;
return time;
}
// ECMA 262 - 15.9.1.14
function TimeClip(time) {
- if (!$isFinite(time)) return NAN;
- if ($abs(time) > MAX_TIME_MS) return NAN;
+ if (!IsFinite(time)) return NAN;
+ if (MathAbs(time) > MAX_TIME_MS) return NAN;
return TO_INTEGER(time);
}
@@ -121,7 +133,7 @@ var Date_cache = {
function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
if (!%_IsConstructCall()) {
// ECMA 262 - 15.9.2
- return (new GlobalDate()).toString();
+ return %_CallFunction(new GlobalDate(), DateToString);
}
// ECMA 262 - 15.9.3
@@ -190,6 +202,7 @@ function TwoDigitString(value) {
function DateString(date) {
+ CHECK_DATE(date);
return WeekDays[LOCAL_WEEKDAY(date)] + ' '
+ Months[LOCAL_MONTH(date)] + ' '
+ TwoDigitString(LOCAL_DAY(date)) + ' '
@@ -204,6 +217,7 @@ var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June',
function LongDateString(date) {
+ CHECK_DATE(date);
return LongWeekDays[LOCAL_WEEKDAY(date)] + ', '
+ LongMonths[LOCAL_MONTH(date)] + ' '
+ TwoDigitString(LOCAL_DAY(date)) + ', '
@@ -212,6 +226,7 @@ function LongDateString(date) {
function TimeString(date) {
+ CHECK_DATE(date);
return TwoDigitString(LOCAL_HOUR(date)) + ':'
+ TwoDigitString(LOCAL_MIN(date)) + ':'
+ TwoDigitString(LOCAL_SEC(date));
@@ -219,6 +234,7 @@ function TimeString(date) {
function TimeStringUTC(date) {
+ CHECK_DATE(date);
return TwoDigitString(UTC_HOUR(date)) + ':'
+ TwoDigitString(UTC_MIN(date)) + ':'
+ TwoDigitString(UTC_SEC(date));
@@ -226,12 +242,13 @@ function TimeStringUTC(date) {
function LocalTimezoneString(date) {
+ CHECK_DATE(date);
var timezone = LocalTimezone(UTC_DATE_VALUE(date));
var timezoneOffset = -TIMEZONE_OFFSET(date);
var sign = (timezoneOffset >= 0) ? 1 : -1;
- var hours = $floor((sign * timezoneOffset)/60);
- var min = $floor((sign * timezoneOffset)%60);
+ var hours = MathFloor((sign * timezoneOffset)/60);
+ var min = MathFloor((sign * timezoneOffset)%60);
var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
TwoDigitString(hours) + TwoDigitString(min);
return gmt + ' (' + timezone + ')';
@@ -239,6 +256,7 @@ function LocalTimezoneString(date) {
function DatePrintString(date) {
+ CHECK_DATE(date);
return DateString(date) + ' ' + TimeString(date);
}
@@ -291,6 +309,7 @@ function DateNow() {
// ECMA 262 - 15.9.5.2
function DateToString() {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this)
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var time_zone_string = LocalTimezoneString(this)
@@ -300,6 +319,7 @@ function DateToString() {
// ECMA 262 - 15.9.5.3
function DateToDateString() {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return DateString(this);
@@ -308,6 +328,7 @@ function DateToDateString() {
// ECMA 262 - 15.9.5.4
function DateToTimeString() {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var time_zone_string = LocalTimezoneString(this);
@@ -317,12 +338,14 @@ function DateToTimeString() {
// ECMA 262 - 15.9.5.5
function DateToLocaleString() {
+ CHECK_DATE(this);
return %_CallFunction(this, DateToString);
}
// ECMA 262 - 15.9.5.6
function DateToLocaleDateString() {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return LongDateString(this);
@@ -331,6 +354,7 @@ function DateToLocaleDateString() {
// ECMA 262 - 15.9.5.7
function DateToLocaleTimeString() {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return TimeString(this);
@@ -339,114 +363,133 @@ function DateToLocaleTimeString() {
// ECMA 262 - 15.9.5.8
function DateValueOf() {
+ CHECK_DATE(this);
return UTC_DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.9
function DateGetTime() {
+ CHECK_DATE(this);
return UTC_DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.10
function DateGetFullYear() {
+ CHECK_DATE(this);
return LOCAL_YEAR(this);
}
// ECMA 262 - 15.9.5.11
function DateGetUTCFullYear() {
+ CHECK_DATE(this);
return UTC_YEAR(this);
}
// ECMA 262 - 15.9.5.12
function DateGetMonth() {
+ CHECK_DATE(this);
return LOCAL_MONTH(this);
}
// ECMA 262 - 15.9.5.13
function DateGetUTCMonth() {
+ CHECK_DATE(this);
return UTC_MONTH(this);
}
// ECMA 262 - 15.9.5.14
function DateGetDate() {
+ CHECK_DATE(this);
return LOCAL_DAY(this);
}
// ECMA 262 - 15.9.5.15
function DateGetUTCDate() {
+ CHECK_DATE(this);
return UTC_DAY(this);
}
// ECMA 262 - 15.9.5.16
function DateGetDay() {
+ CHECK_DATE(this);
return LOCAL_WEEKDAY(this);
}
// ECMA 262 - 15.9.5.17
function DateGetUTCDay() {
+ CHECK_DATE(this);
return UTC_WEEKDAY(this);
}
// ECMA 262 - 15.9.5.18
function DateGetHours() {
+ CHECK_DATE(this);
return LOCAL_HOUR(this);
}
// ECMA 262 - 15.9.5.19
function DateGetUTCHours() {
+ CHECK_DATE(this);
return UTC_HOUR(this);
}
// ECMA 262 - 15.9.5.20
function DateGetMinutes() {
+ CHECK_DATE(this);
return LOCAL_MIN(this);
}
// ECMA 262 - 15.9.5.21
function DateGetUTCMinutes() {
+ CHECK_DATE(this);
return UTC_MIN(this);
}
// ECMA 262 - 15.9.5.22
function DateGetSeconds() {
+ CHECK_DATE(this);
return LOCAL_SEC(this);
}
// ECMA 262 - 15.9.5.23
function DateGetUTCSeconds() {
+ CHECK_DATE(this);
return UTC_SEC(this)
}
// ECMA 262 - 15.9.5.24
function DateGetMilliseconds() {
+ CHECK_DATE(this);
return LOCAL_MS(this);
}
// ECMA 262 - 15.9.5.25
function DateGetUTCMilliseconds() {
+ CHECK_DATE(this);
return UTC_MS(this);
}
// ECMA 262 - 15.9.5.26
function DateGetTimezoneOffset() {
+ CHECK_DATE(this);
return TIMEZONE_OFFSET(this);
}
@@ -461,6 +504,7 @@ function DateSetTime(ms) {
// ECMA 262 - 15.9.5.28
function DateSetMilliseconds(ms) {
+ CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
ms = $toNumber(ms);
var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
@@ -470,6 +514,7 @@ function DateSetMilliseconds(ms) {
// ECMA 262 - 15.9.5.29
function DateSetUTCMilliseconds(ms) {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
ms = $toNumber(ms);
var time = MakeTime(UTC_HOUR(this),
@@ -482,6 +527,7 @@ function DateSetUTCMilliseconds(ms) {
// ECMA 262 - 15.9.5.30
function DateSetSeconds(sec, ms) {
+ CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
sec = $toNumber(sec);
ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : $toNumber(ms);
@@ -492,6 +538,7 @@ function DateSetSeconds(sec, ms) {
// ECMA 262 - 15.9.5.31
function DateSetUTCSeconds(sec, ms) {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
sec = $toNumber(sec);
ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : $toNumber(ms);
@@ -502,6 +549,7 @@ function DateSetUTCSeconds(sec, ms) {
// ECMA 262 - 15.9.5.33
function DateSetMinutes(min, sec, ms) {
+ CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
min = $toNumber(min);
var argc = %_ArgumentsLength();
@@ -514,6 +562,7 @@ function DateSetMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCMinutes(min, sec, ms) {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
min = $toNumber(min);
var argc = %_ArgumentsLength();
@@ -526,6 +575,7 @@ function DateSetUTCMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.35
function DateSetHours(hour, min, sec, ms) {
+ CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
hour = $toNumber(hour);
var argc = %_ArgumentsLength();
@@ -539,6 +589,7 @@ function DateSetHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCHours(hour, min, sec, ms) {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
hour = $toNumber(hour);
var argc = %_ArgumentsLength();
@@ -552,6 +603,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.36
function DateSetDate(date) {
+ CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
date = $toNumber(date);
var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
@@ -561,6 +613,7 @@ function DateSetDate(date) {
// ECMA 262 - 15.9.5.37
function DateSetUTCDate(date) {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
date = $toNumber(date);
var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
@@ -570,6 +623,7 @@ function DateSetUTCDate(date) {
// ECMA 262 - 15.9.5.38
function DateSetMonth(month, date) {
+ CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
month = $toNumber(month);
date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : $toNumber(date);
@@ -580,6 +634,7 @@ function DateSetMonth(month, date) {
// ECMA 262 - 15.9.5.39
function DateSetUTCMonth(month, date) {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
month = $toNumber(month);
date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : $toNumber(date);
@@ -590,6 +645,7 @@ function DateSetUTCMonth(month, date) {
// ECMA 262 - 15.9.5.40
function DateSetFullYear(year, month, date) {
+ CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
year = $toNumber(year);
var argc = %_ArgumentsLength();
@@ -610,6 +666,7 @@ function DateSetFullYear(year, month, date) {
// ECMA 262 - 15.9.5.41
function DateSetUTCFullYear(year, month, date) {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
year = $toNumber(year);
var argc = %_ArgumentsLength();
@@ -630,6 +687,7 @@ function DateSetUTCFullYear(year, month, date) {
// ECMA 262 - 15.9.5.42
function DateToUTCString() {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
@@ -643,6 +701,7 @@ function DateToUTCString() {
// ECMA 262 - B.2.4
function DateGetYear() {
+ CHECK_DATE(this);
return LOCAL_YEAR(this) - 1900;
}
@@ -688,11 +747,12 @@ function PadInt(n, digits) {
}
-// ECMA 262 - 15.9.5.43
+// ECMA 262 - 20.3.4.36
function DateToISOString() {
+ CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) throw MakeRangeError(kInvalidTimeValue);
- var year = this.getUTCFullYear();
+ var year = UTC_YEAR(this);
var year_string;
if (year >= 0 && year <= 9999) {
year_string = PadInt(year, 4);
@@ -704,12 +764,12 @@ function DateToISOString() {
}
}
return year_string +
- '-' + PadInt(this.getUTCMonth() + 1, 2) +
- '-' + PadInt(this.getUTCDate(), 2) +
- 'T' + PadInt(this.getUTCHours(), 2) +
- ':' + PadInt(this.getUTCMinutes(), 2) +
- ':' + PadInt(this.getUTCSeconds(), 2) +
- '.' + PadInt(this.getUTCMilliseconds(), 3) +
+ '-' + PadInt(UTC_MONTH(this) + 1, 2) +
+ '-' + PadInt(UTC_DAY(this), 2) +
+ 'T' + PadInt(UTC_HOUR(this), 2) +
+ ':' + PadInt(UTC_MIN(this), 2) +
+ ':' + PadInt(UTC_SEC(this), 2) +
+ '.' + PadInt(UTC_MS(this), 3) +
'Z';
}
@@ -760,7 +820,7 @@ function CreateDate(time) {
%FunctionSetPrototype(GlobalDate, new GlobalDate(NAN));
// Set up non-enumerable properties of the Date object itself.
-$installFunctions(GlobalDate, DONT_ENUM, [
+utils.InstallFunctions(GlobalDate, DONT_ENUM, [
"UTC", DateUTC,
"parse", DateParse,
"now", DateNow
@@ -771,7 +831,7 @@ $installFunctions(GlobalDate, DONT_ENUM, [
// Set up non-enumerable functions of the Date prototype object and
// set their names.
-$installFunctions(GlobalDate.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalDate.prototype, DONT_ENUM, [
"toString", DateToString,
"toDateString", DateToDateString,
"toTimeString", DateToTimeString,
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index 5db0391a67..0e5cc8c3ef 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -187,4 +187,5 @@ int DateParser::ReadMilliseconds(DateToken token) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 89334fa708..e952fe7ebb 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -48,7 +48,7 @@ Debug::Debug(Isolate* isolate)
}
-static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
+static v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
// Isolate::context() may have been NULL when "script collected" event
// occured.
@@ -58,6 +58,21 @@ static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
}
+BreakLocation::BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo,
+ RelocInfo* original_rinfo, int position,
+ int statement_position)
+ : debug_info_(debug_info),
+ pc_offset_(static_cast<int>(rinfo->pc() - debug_info->code()->entry())),
+ original_pc_offset_(static_cast<int>(
+ original_rinfo->pc() - debug_info->original_code()->entry())),
+ rmode_(rinfo->rmode()),
+ original_rmode_(original_rinfo->rmode()),
+ data_(rinfo->data()),
+ original_data_(original_rinfo->data()),
+ position_(position),
+ statement_position_(statement_position) {}
+
+
BreakLocation::Iterator::Iterator(Handle<DebugInfo> debug_info,
BreakLocatorType type)
: debug_info_(debug_info),
@@ -99,6 +114,7 @@ void BreakLocation::Iterator::Next() {
debug_info_->shared()->start_position());
DCHECK(position_ >= 0);
DCHECK(statement_position_ >= 0);
+ continue;
}
// Check for break at return.
@@ -112,7 +128,7 @@ void BreakLocation::Iterator::Next() {
}
statement_position_ = position_;
break_index_++;
- return;
+ break;
}
if (RelocInfo::IsCodeTarget(rmode())) {
@@ -124,32 +140,28 @@ void BreakLocation::Iterator::Next() {
if (RelocInfo::IsConstructCall(rmode()) || code->is_call_stub()) {
break_index_++;
- return;
+ break;
}
- // Skip below if we only want locations for calls and returns.
- if (type_ == CALLS_AND_RETURNS) continue;
-
- if ((code->is_inline_cache_stub() && !code->is_binary_op_stub() &&
- !code->is_compare_ic_stub() && !code->is_to_boolean_ic_stub())) {
+ if (code->kind() == Code::STUB &&
+ CodeStub::GetMajorKey(code) == CodeStub::CallFunction) {
break_index_++;
- return;
- }
- if (code->kind() == Code::STUB) {
- if (RelocInfo::IsDebuggerStatement(rmode())) {
- break_index_++;
- return;
- } else if (CodeStub::GetMajorKey(code) == CodeStub::CallFunction) {
- break_index_++;
- return;
- }
+ break;
}
}
+ // Skip below if we only want locations for calls and returns.
+ if (type_ == CALLS_AND_RETURNS) continue;
+
+ if (RelocInfo::IsDebuggerStatement(rmode())) {
+ break_index_++;
+ break;
+ }
+
if (RelocInfo::IsDebugBreakSlot(rmode()) && type_ != CALLS_AND_RETURNS) {
// There is always a possible break point at a debug break slot.
break_index_++;
- return;
+ break;
}
}
}
@@ -232,6 +244,7 @@ BreakLocation BreakLocation::FromPosition(Handle<DebugInfo> debug_info,
void BreakLocation::SetBreakPoint(Handle<Object> break_point_object) {
// If there is not already a real break point here patch code with debug
// break.
+ DCHECK(code()->has_debug_break_slots());
if (!HasBreakPoint()) SetDebugBreak();
DCHECK(IsDebugBreak() || IsDebuggerStatement());
// Set the break point information.
@@ -363,28 +376,8 @@ static Handle<Code> DebugBreakForIC(Handle<Code> code, RelocInfo::Mode mode) {
// Find the builtin debug break function matching the calling convention
// used by the call site.
if (code->is_inline_cache_stub()) {
- switch (code->kind()) {
- case Code::CALL_IC:
- return isolate->builtins()->CallICStub_DebugBreak();
-
- case Code::LOAD_IC:
- return isolate->builtins()->LoadIC_DebugBreak();
-
- case Code::STORE_IC:
- return isolate->builtins()->StoreIC_DebugBreak();
-
- case Code::KEYED_LOAD_IC:
- return isolate->builtins()->KeyedLoadIC_DebugBreak();
-
- case Code::KEYED_STORE_IC:
- return isolate->builtins()->KeyedStoreIC_DebugBreak();
-
- case Code::COMPARE_NIL_IC:
- return isolate->builtins()->CompareNilIC_DebugBreak();
-
- default:
- UNREACHABLE();
- }
+ DCHECK(code->kind() == Code::CALL_IC);
+ return isolate->builtins()->CallICStub_DebugBreak();
}
if (RelocInfo::IsConstructCall(mode)) {
if (code->has_function_cache()) {
@@ -493,101 +486,63 @@ int Debug::ArchiveSpacePerThread() {
}
-ScriptCache::ScriptCache(Isolate* isolate) : HashMap(HashMap::PointersMatch),
- isolate_(isolate) {
+ScriptCache::ScriptCache(Isolate* isolate) : isolate_(isolate) {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
+ DCHECK(isolate_->debug()->is_active());
+
// Perform a GC to get rid of all unreferenced scripts.
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "ScriptCache");
// Scan heap for Script objects.
- HeapIterator iterator(heap);
- DisallowHeapAllocation no_allocation;
-
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
- Add(Handle<Script>(Script::cast(obj)));
+ List<Handle<Script> > scripts;
+ {
+ HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
+ DisallowHeapAllocation no_allocation;
+ for (HeapObject* obj = iterator.next(); obj != NULL;
+ obj = iterator.next()) {
+ if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
+ scripts.Add(Handle<Script>(Script::cast(obj)));
+ }
}
}
+
+ GlobalHandles* global_handles = isolate_->global_handles();
+ table_ = Handle<WeakValueHashTable>::cast(global_handles->Create(
+ Object::cast(*WeakValueHashTable::New(isolate_, scripts.length()))));
+ for (int i = 0; i < scripts.length(); i++) Add(scripts[i]);
}
void ScriptCache::Add(Handle<Script> script) {
- GlobalHandles* global_handles = isolate_->global_handles();
- // Create an entry in the hash map for the script.
- int id = script->id()->value();
- HashMap::Entry* entry =
- HashMap::LookupOrInsert(reinterpret_cast<void*>(id), Hash(id));
- if (entry->value != NULL) {
+ HandleScope scope(isolate_);
+ Handle<Smi> id(script->id(), isolate_);
+
#ifdef DEBUG
- // The code deserializer may introduce duplicate Script objects.
- // Assert that the Script objects with the same id have the same name.
- Handle<Script> found(reinterpret_cast<Script**>(entry->value));
+ Handle<Object> lookup(table_->LookupWeak(id), isolate_);
+ if (!lookup->IsTheHole()) {
+ Handle<Script> found = Handle<Script>::cast(lookup);
DCHECK(script->id() == found->id());
DCHECK(!script->name()->IsString() ||
String::cast(script->name())->Equals(String::cast(found->name())));
-#endif
- return;
- }
- // Globalize the script object, make it weak and use the location of the
- // global handle as the value in the hash map.
- Handle<Script> script_ =
- Handle<Script>::cast(global_handles->Create(*script));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
- this,
- ScriptCache::HandleWeakScript);
- entry->value = script_.location();
-}
-
-
-Handle<FixedArray> ScriptCache::GetScripts() {
- Factory* factory = isolate_->factory();
- Handle<FixedArray> instances = factory->NewFixedArray(occupancy());
- int count = 0;
- for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
- DCHECK(entry->value != NULL);
- if (entry->value != NULL) {
- instances->set(count, *reinterpret_cast<Script**>(entry->value));
- count++;
- }
}
- return instances;
-}
+#endif
+ Handle<WeakValueHashTable> new_table =
+ WeakValueHashTable::PutWeak(table_, id, script);
-void ScriptCache::Clear() {
- // Iterate the script cache to get rid of all the weak handles.
- for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
- DCHECK(entry != NULL);
- Object** location = reinterpret_cast<Object**>(entry->value);
- DCHECK((*location)->IsScript());
- GlobalHandles::ClearWeakness(location);
- GlobalHandles::Destroy(location);
- }
- // Clear the content of the hash map.
- HashMap::Clear();
+ if (new_table.is_identical_to(table_)) return;
+ GlobalHandles* global_handles = isolate_->global_handles();
+ global_handles->Destroy(Handle<Object>::cast(table_).location());
+ table_ = Handle<WeakValueHashTable>::cast(
+ global_handles->Create(Object::cast(*new_table)));
}
-void ScriptCache::HandleWeakScript(
- const v8::WeakCallbackData<v8::Value, void>& data) {
- // Retrieve the script identifier.
- Handle<Object> object = Utils::OpenHandle(*data.GetValue());
- int id = Handle<Script>::cast(object)->id()->value();
- void* key = reinterpret_cast<void*>(id);
- uint32_t hash = Hash(id);
-
- // Remove the corresponding entry from the cache.
- ScriptCache* script_cache =
- reinterpret_cast<ScriptCache*>(data.GetParameter());
- HashMap::Entry* entry = script_cache->Lookup(key, hash);
- DCHECK_NOT_NULL(entry);
- Object** location = reinterpret_cast<Object**>(entry->value);
- script_cache->Remove(key, hash);
-
- // Clear the weak handle.
- GlobalHandles::Destroy(location);
+ScriptCache::~ScriptCache() {
+ isolate_->global_handles()->Destroy(Handle<Object>::cast(table_).location());
+ table_ = Handle<WeakValueHashTable>();
}
@@ -645,15 +600,10 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
// Compile the script.
Handle<SharedFunctionInfo> function_info;
function_info = Compiler::CompileScript(
- source_code, script_name, 0, 0, false, false, Handle<Object>(), context,
- NULL, NULL, ScriptCompiler::kNoCompileOptions, NATIVES_CODE, false);
-
- // Silently ignore stack overflows during compilation.
- if (function_info.is_null()) {
- DCHECK(isolate->has_pending_exception());
- isolate->clear_pending_exception();
- return false;
- }
+ source_code, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
+ context, NULL, NULL, ScriptCompiler::kNoCompileOptions, NATIVES_CODE,
+ false);
+ if (function_info.is_null()) return false;
// Execute the shared function in the debugger context.
Handle<JSFunction> function =
@@ -668,16 +618,16 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
DCHECK(!isolate->has_pending_exception());
MessageLocation computed_location;
isolate->ComputeLocation(&computed_location);
- Handle<Object> message = MessageHandler::MakeMessageObject(
- isolate, "error_loading_debugger", &computed_location,
- Vector<Handle<Object> >::empty(), Handle<JSArray>());
+ Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
+ isolate, MessageTemplate::kDebuggerLoading, &computed_location,
+ isolate->factory()->undefined_value(), Handle<JSArray>());
DCHECK(!isolate->has_pending_exception());
Handle<Object> exception;
if (maybe_exception.ToHandle(&exception)) {
isolate->set_pending_exception(*exception);
MessageHandler::ReportMessage(isolate, NULL, message);
- isolate->clear_pending_exception();
}
+ DCHECK(!maybe_exception.is_null());
return false;
}
@@ -705,11 +655,9 @@ bool Debug::Load() {
// Create the debugger context.
HandleScope scope(isolate_);
ExtensionConfiguration no_extensions;
- Handle<Context> context =
- isolate_->bootstrapper()->CreateEnvironment(
- MaybeHandle<JSGlobalProxy>(),
- v8::Handle<ObjectTemplate>(),
- &no_extensions);
+ Handle<Context> context = isolate_->bootstrapper()->CreateEnvironment(
+ MaybeHandle<JSGlobalProxy>(), v8::Local<ObjectTemplate>(),
+ &no_extensions);
// Fail if no context could be created.
if (context.is_null()) return false;
@@ -1105,8 +1053,8 @@ void Debug::ClearAllBreakPoints() {
void Debug::FloodWithOneShot(Handle<JSFunction> function,
BreakLocatorType type) {
- // Do not ever break in native functions.
- if (function->IsFromNativeScript()) return;
+ // Do not ever break in native and extension functions.
+ if (!function->IsSubjectToDebugging()) return;
PrepareForBreakPoints();
@@ -1131,7 +1079,7 @@ void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
isolate_);
if (!bindee.is_null() && bindee->IsJSFunction() &&
- !JSFunction::cast(*bindee)->IsFromNativeScript()) {
+ JSFunction::cast(*bindee)->IsSubjectToDebugging()) {
Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
FloodWithOneShotGeneric(bindee_function);
}
@@ -1189,7 +1137,7 @@ void Debug::FloodHandlerWithOneShot() {
for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
int stack_slots = 0; // The computed stack slot count is not used.
- if (frame->LookupExceptionHandlerInTable(&stack_slots) > 0) {
+ if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) {
// Flood the function with the catch/finally block with break points.
FloodWithOneShot(Handle<JSFunction>(frame->function()));
return;
@@ -1281,8 +1229,6 @@ void Debug::PrepareStep(StepAction step_action,
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
// Compute whether or not the target is a call target.
- bool is_load_or_store = false;
- bool is_inline_cache_stub = false;
bool is_at_restarted_function = false;
Handle<Code> call_function_stub;
@@ -1295,8 +1241,6 @@ void Debug::PrepareStep(StepAction step_action,
if (thread_local_.restarter_frame_function_pointer_ == NULL) {
if (location.IsCodeTarget()) {
Handle<Code> target_code = location.CodeTarget();
- is_inline_cache_stub = target_code->is_inline_cache_stub();
- is_load_or_store = is_inline_cache_stub && !target_code->is_call_stub();
// Check if target code is CallFunction stub.
Handle<Code> maybe_call_function_stub = target_code;
@@ -1329,9 +1273,9 @@ void Debug::PrepareStep(StepAction step_action,
DCHECK(location.IsExit());
frames_it.Advance();
}
- // Skip builtin functions on the stack.
+ // Skip native and extension functions on the stack.
while (!frames_it.done() &&
- frames_it.frame()->function()->IsFromNativeScript()) {
+ !frames_it.frame()->function()->IsSubjectToDebugging()) {
frames_it.Advance();
}
// Step out: If there is a JavaScript caller frame, we need to
@@ -1343,21 +1287,10 @@ void Debug::PrepareStep(StepAction step_action,
// Set target frame pointer.
ActivateStepOut(frames_it.frame());
}
- } else if (!(is_inline_cache_stub || location.IsConstructCall() ||
- !call_function_stub.is_null() || is_at_restarted_function) ||
- step_action == StepNext || step_action == StepMin) {
- // Step next or step min.
-
- // Fill the current function with one-shot break points.
- // If we are stepping into another frame, only fill calls and returns.
- FloodWithOneShot(function, step_action == StepFrame ? CALLS_AND_RETURNS
- : ALL_BREAK_LOCATIONS);
-
- // Remember source position and frame to handle step next.
- thread_local_.last_statement_position_ =
- debug_info->code()->SourceStatementPosition(summary.pc());
- thread_local_.last_fp_ = frame->UnpaddedFP();
- } else {
+ return;
+ }
+
+ if (step_action != StepNext && step_action != StepMin) {
// If there's restarter frame on top of the stack, just get the pointer
// to function which is going to be restarted.
if (is_at_restarted_function) {
@@ -1399,11 +1332,16 @@ void Debug::PrepareStep(StepAction step_action,
Isolate* isolate = JSFunction::cast(fun)->GetIsolate();
Code* apply = isolate->builtins()->builtin(Builtins::kFunctionApply);
Code* call = isolate->builtins()->builtin(Builtins::kFunctionCall);
+ // Find target function on the expression stack for expression like
+ // Function.call.call...apply(...)
+ int i = 1;
while (fun->IsJSFunction()) {
Code* code = JSFunction::cast(fun)->shared()->code();
if (code != apply && code != call) break;
- fun = frame->GetExpression(
- expressions_count - 1 - call_function_arg_count);
+ DCHECK(expressions_count - i - call_function_arg_count >= 0);
+ fun = frame->GetExpression(expressions_count - i -
+ call_function_arg_count);
+ i -= 1;
}
}
@@ -1413,36 +1351,21 @@ void Debug::PrepareStep(StepAction step_action,
}
}
- // Fill the current function with one-shot break points even for step in on
- // a call target as the function called might be a native function for
- // which step in will not stop. It also prepares for stepping in
- // getters/setters.
- // If we are stepping into another frame, only fill calls and returns.
- FloodWithOneShot(function, step_action == StepFrame ? CALLS_AND_RETURNS
- : ALL_BREAK_LOCATIONS);
-
- if (is_load_or_store) {
- // Remember source position and frame to handle step in getter/setter. If
- // there is a custom getter/setter it will be handled in
- // Object::Get/SetPropertyWithAccessor, otherwise the step action will be
- // propagated on the next Debug::Break.
- thread_local_.last_statement_position_ =
- debug_info->code()->SourceStatementPosition(summary.pc());
- thread_local_.last_fp_ = frame->UnpaddedFP();
- }
-
- // Step in or Step in min
- // Step in through construct call requires no changes to the running code.
- // Step in through getters/setters should already be prepared as well
- // because caller of this function (Debug::PrepareStep) is expected to
- // flood the top frame's function with one shot breakpoints.
- // Step in through CallFunction stub should also be prepared by caller of
- // this function (Debug::PrepareStep) which should flood target function
- // with breakpoints.
- DCHECK(location.IsConstructCall() || is_inline_cache_stub ||
- !call_function_stub.is_null() || is_at_restarted_function);
- ActivateStepIn(frame);
+ ActivateStepIn(function, frame);
}
+
+ // Fill the current function with one-shot break points even for step in on
+ // a call target as the function called might be a native function for
+ // which step in will not stop. It also prepares for stepping in
+ // getters/setters.
+ // If we are stepping into another frame, only fill calls and returns.
+ FloodWithOneShot(function, step_action == StepFrame ? CALLS_AND_RETURNS
+ : ALL_BREAK_LOCATIONS);
+
+ // Remember source position and frame to handle step next.
+ thread_local_.last_statement_position_ =
+ debug_info->code()->SourceStatementPosition(summary.pc());
+ thread_local_.last_fp_ = frame->UnpaddedFP();
}
@@ -1532,30 +1455,27 @@ Handle<Object> Debug::GetSourceBreakLocations(
// Handle stepping into a function.
-void Debug::HandleStepIn(Handle<Object> function_obj, Handle<Object> holder,
- Address fp, bool is_constructor) {
+void Debug::HandleStepIn(Handle<Object> function_obj, bool is_constructor) {
// Flood getter/setter if we either step in or step to another frame.
bool step_frame = thread_local_.last_step_action_ == StepFrame;
if (!StepInActive() && !step_frame) return;
if (!function_obj->IsJSFunction()) return;
Handle<JSFunction> function = Handle<JSFunction>::cast(function_obj);
Isolate* isolate = function->GetIsolate();
- // If the frame pointer is not supplied by the caller find it.
- if (fp == 0) {
- StackFrameIterator it(isolate);
+
+ StackFrameIterator it(isolate);
+ it.Advance();
+ // For constructor functions skip another frame.
+ if (is_constructor) {
+ DCHECK(it.frame()->is_construct());
it.Advance();
- // For constructor functions skip another frame.
- if (is_constructor) {
- DCHECK(it.frame()->is_construct());
- it.Advance();
- }
- fp = it.frame()->fp();
}
+ Address fp = it.frame()->fp();
// Flood the function with one-shot break points if it is called from where
// step into was requested, or when stepping into a new frame.
if (fp == thread_local_.step_into_fp_ || step_frame) {
- FloodWithOneShotGeneric(function, holder);
+ FloodWithOneShotGeneric(function, Handle<Object>());
}
}
@@ -1589,8 +1509,12 @@ void Debug::ClearOneShot() {
}
-void Debug::ActivateStepIn(StackFrame* frame) {
+void Debug::ActivateStepIn(Handle<JSFunction> function, StackFrame* frame) {
DCHECK(!StepOutActive());
+ // Make sure IC state is clean. This is so that we correct flood
+ // accessor pairs when stepping in.
+ function->code()->ClearInlineCaches();
+ function->shared()->feedback_vector()->ClearICSlots(function->shared());
thread_local_.step_into_fp_ = frame->UnpaddedFP();
}
@@ -1748,7 +1672,7 @@ static void RedirectActivationsToRecompiledCodeOnThread(
reinterpret_cast<intptr_t>(new_pc));
}
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
// Update constant pool pointer for new code.
frame->set_constant_pool(new_code->constant_pool());
}
@@ -1921,7 +1845,8 @@ void Debug::PrepareForBreakPoints() {
if (kind == Code::OPTIMIZED_FUNCTION) {
// Optimized code can only get here if DeoptimizeAll did not
// deoptimize turbo fan code.
- DCHECK(!FLAG_turbo_deoptimization);
+ DCHECK(!FLAG_turbo_asm_deoptimization);
+ DCHECK(function->shared()->asm_function());
DCHECK(function->code()->is_turbofanned());
function->ReplaceCode(fallback);
}
@@ -1981,10 +1906,12 @@ void Debug::PrepareForBreakPoints() {
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
Handle<SharedFunctionInfo> shared(function->shared());
-
- // If recompilation is not possible just skip it.
- if (shared->is_toplevel()) continue;
- if (!shared->allows_lazy_compilation()) continue;
+ if (!shared->allows_lazy_compilation()) {
+ // Ignore functions that cannot be recompiled. Fortunately, those are
+ // only ones that are not subject to debugging in the first place.
+ DCHECK(!function->IsSubjectToDebugging());
+ continue;
+ }
if (shared->code()->kind() == Code::BUILTIN) continue;
EnsureFunctionHasDebugBreakSlots(function);
@@ -2000,126 +1927,126 @@ void Debug::PrepareForBreakPoints() {
}
+class SharedFunctionInfoFinder {
+ public:
+ explicit SharedFunctionInfoFinder(int target_position)
+ : current_candidate_(NULL),
+ current_candidate_closure_(NULL),
+ current_start_position_(RelocInfo::kNoPosition),
+ target_position_(target_position) {}
+
+ void NewCandidate(SharedFunctionInfo* shared, JSFunction* closure = NULL) {
+ int start_position = shared->function_token_position();
+ if (start_position == RelocInfo::kNoPosition) {
+ start_position = shared->start_position();
+ }
+
+ if (start_position > target_position_) return;
+ if (target_position_ > shared->end_position()) return;
+
+ if (current_candidate_ != NULL) {
+ if (current_start_position_ == start_position &&
+ shared->end_position() == current_candidate_->end_position()) {
+ // If a top-level function contains only one function
+ // declaration the source for the top-level and the function
+ // is the same. In that case prefer the non top-level function.
+ if (shared->is_toplevel()) return;
+ } else if (start_position < current_start_position_ ||
+ current_candidate_->end_position() < shared->end_position()) {
+ return;
+ }
+ }
+
+ current_start_position_ = start_position;
+ current_candidate_ = shared;
+ current_candidate_closure_ = closure;
+ }
+
+ SharedFunctionInfo* Result() { return current_candidate_; }
+
+ JSFunction* ResultClosure() { return current_candidate_closure_; }
+
+ private:
+ SharedFunctionInfo* current_candidate_;
+ JSFunction* current_candidate_closure_;
+ int current_start_position_;
+ int target_position_;
+ DisallowHeapAllocation no_gc_;
+};
+
+
+template <typename C>
+bool Debug::CompileToRevealInnerFunctions(C* compilable) {
+ HandleScope scope(isolate_);
+ // Force compiling inner functions that require context.
+ // TODO(yangguo): remove this hack.
+ bool has_break_points = has_break_points_;
+ has_break_points_ = true;
+ Handle<C> compilable_handle(compilable);
+ bool result = !Compiler::GetUnoptimizedCode(compilable_handle).is_null();
+ has_break_points_ = has_break_points;
+ return result;
+}
+
+
Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
int position) {
- // Iterate the heap looking for SharedFunctionInfo generated from the
- // script. The inner most SharedFunctionInfo containing the source position
- // for the requested break point is found.
- // NOTE: This might require several heap iterations. If the SharedFunctionInfo
- // which is found is not compiled it is compiled and the heap is iterated
- // again as the compilation might create inner functions from the newly
- // compiled function and the actual requested break point might be in one of
- // these functions.
- // NOTE: The below fix-point iteration depends on all functions that cannot be
- // compiled lazily without a context to not be compiled at all. Compilation
- // will be triggered at points where we do not need a context.
- bool done = false;
- // The current candidate for the source position:
- int target_start_position = RelocInfo::kNoPosition;
- Handle<JSFunction> target_function;
- Handle<SharedFunctionInfo> target;
- Heap* heap = isolate_->heap();
- while (!done) {
- { // Extra scope for iterator.
- // If lazy compilation is off, we won't have duplicate shared function
- // infos that need to be filtered.
- HeapIterator iterator(heap, FLAG_lazy ? HeapIterator::kNoFiltering
- : HeapIterator::kFilterUnreachable);
- for (HeapObject* obj = iterator.next();
- obj != NULL; obj = iterator.next()) {
- bool found_next_candidate = false;
- Handle<JSFunction> function;
- Handle<SharedFunctionInfo> shared;
- if (obj->IsJSFunction()) {
- function = Handle<JSFunction>(JSFunction::cast(obj));
- shared = Handle<SharedFunctionInfo>(function->shared());
- DCHECK(shared->allows_lazy_compilation() || shared->is_compiled());
- found_next_candidate = true;
- } else if (obj->IsSharedFunctionInfo()) {
- shared = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
- // Skip functions that we cannot compile lazily without a context,
- // which is not available here, because there is no closure.
- found_next_candidate = shared->is_compiled() ||
- shared->allows_lazy_compilation_without_context();
+ while (true) {
+ // Go through all shared function infos associated with this script to
+ // find the inner most function containing this position.
+ if (!script->shared_function_infos()->IsWeakFixedArray()) break;
+ WeakFixedArray* array =
+ WeakFixedArray::cast(script->shared_function_infos());
+
+ SharedFunctionInfo* shared;
+ {
+ SharedFunctionInfoFinder finder(position);
+ for (int i = 0; i < array->Length(); i++) {
+ Object* item = array->Get(i);
+ if (!item->IsSharedFunctionInfo()) continue;
+ finder.NewCandidate(SharedFunctionInfo::cast(item));
+ }
+ shared = finder.Result();
+ if (shared == NULL) break;
+ // We found it if it's already compiled.
+ if (shared->is_compiled()) return handle(shared);
+ }
+ // If not, compile to reveal inner functions, if possible.
+ if (shared->allows_lazy_compilation_without_context()) {
+ if (!CompileToRevealInnerFunctions(shared)) break;
+ continue;
+ }
+
+ // If not possible, comb the heap for the best suitable compile target.
+ JSFunction* closure;
+ {
+ HeapIterator it(isolate_->heap(), HeapIterator::kNoFiltering);
+ SharedFunctionInfoFinder finder(position);
+ while (HeapObject* object = it.next()) {
+ JSFunction* candidate_closure = NULL;
+ SharedFunctionInfo* candidate = NULL;
+ if (object->IsJSFunction()) {
+ candidate_closure = JSFunction::cast(object);
+ candidate = candidate_closure->shared();
+ } else if (object->IsSharedFunctionInfo()) {
+ candidate = SharedFunctionInfo::cast(object);
+ if (!candidate->allows_lazy_compilation_without_context()) continue;
+ } else {
+ continue;
}
- if (!found_next_candidate) continue;
- if (shared->script() == *script) {
- // If the SharedFunctionInfo found has the requested script data and
- // contains the source position it is a candidate.
- int start_position = shared->function_token_position();
- if (start_position == RelocInfo::kNoPosition) {
- start_position = shared->start_position();
- }
- if (start_position <= position &&
- position <= shared->end_position()) {
- // If there is no candidate or this function is within the current
- // candidate this is the new candidate.
- if (target.is_null()) {
- target_start_position = start_position;
- target_function = function;
- target = shared;
- } else {
- if (target_start_position == start_position &&
- shared->end_position() == target->end_position()) {
- // If a top-level function contains only one function
- // declaration the source for the top-level and the function
- // is the same. In that case prefer the non top-level function.
- if (!shared->is_toplevel()) {
- target_start_position = start_position;
- target_function = function;
- target = shared;
- }
- } else if (target_start_position <= start_position &&
- shared->end_position() <= target->end_position()) {
- // This containment check includes equality as a function
- // inside a top-level function can share either start or end
- // position with the top-level function.
- target_start_position = start_position;
- target_function = function;
- target = shared;
- }
- }
- }
+ if (candidate->script() == *script) {
+ finder.NewCandidate(candidate, candidate_closure);
}
- } // End for loop.
- } // End no-allocation scope.
-
- if (target.is_null()) return isolate_->factory()->undefined_value();
-
- // There will be at least one break point when we are done.
- has_break_points_ = true;
-
- // If the candidate found is compiled we are done.
- done = target->is_compiled();
- if (!done) {
- // If the candidate is not compiled, compile it to reveal any inner
- // functions which might contain the requested source position. This
- // will compile all inner functions that cannot be compiled without a
- // context, because Compiler::BuildFunctionInfo checks whether the
- // debugger is active.
- MaybeHandle<Code> maybe_result = target_function.is_null()
- ? Compiler::GetUnoptimizedCode(target)
- : Compiler::GetUnoptimizedCode(target_function);
- if (maybe_result.is_null()) return isolate_->factory()->undefined_value();
+ }
+ closure = finder.ResultClosure();
+ shared = finder.Result();
}
- } // End while loop.
-
- // JSFunctions from the same literal may not have the same shared function
- // info. Find those JSFunctions and deduplicate the shared function info.
- HeapIterator iterator(heap, FLAG_lazy ? HeapIterator::kNoFiltering
- : HeapIterator::kFilterUnreachable);
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsJSFunction()) continue;
- JSFunction* function = JSFunction::cast(obj);
- SharedFunctionInfo* shared = function->shared();
- if (shared != *target && shared->script() == target->script() &&
- shared->start_position_and_type() ==
- target->start_position_and_type()) {
- function->set_shared(*target);
+ if (closure == NULL ? !CompileToRevealInnerFunctions(shared)
+ : !CompileToRevealInnerFunctions(closure)) {
+ break;
}
}
-
- return target;
+ return isolate_->factory()->undefined_value();
}
@@ -2143,10 +2070,6 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
return false;
}
- // Make sure IC state is clean.
- shared->code()->ClearInlineCaches();
- shared->feedback_vector()->ClearICSlots(*shared);
-
// Create the debug info object.
Handle<DebugInfo> debug_info = isolate->factory()->NewDebugInfo(shared);
@@ -2507,7 +2430,7 @@ void Debug::OnPromiseReject(Handle<JSObject> promise, Handle<Object> value) {
HandleScope scope(isolate_);
// Check whether the promise has been marked as having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
- if (JSObject::GetDataProperty(promise, key)->IsUndefined()) {
+ if (JSReceiver::GetDataProperty(promise, key)->IsUndefined()) {
OnException(value, promise);
}
}
@@ -2516,14 +2439,15 @@ void Debug::OnPromiseReject(Handle<JSObject> promise, Handle<Object> value) {
MaybeHandle<Object> Debug::PromiseHasUserDefinedRejectHandler(
Handle<JSObject> promise) {
Handle<JSFunction> fun = Handle<JSFunction>::cast(
- JSObject::GetDataProperty(isolate_->js_builtins_object(),
- isolate_->factory()->NewStringFromStaticChars(
- "$promiseHasUserDefinedRejectHandler")));
+ JSReceiver::GetDataProperty(isolate_->js_builtins_object(),
+ isolate_->factory()->NewStringFromStaticChars(
+ "$promiseHasUserDefinedRejectHandler")));
return Execution::Call(isolate_, fun, promise, 0, NULL);
}
void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
+ // In our prediction, try-finally is not considered to catch.
Isolate::CatchType catch_type = isolate_->PredictExceptionCatcher();
bool uncaught = (catch_type == Isolate::NOT_CAUGHT);
if (promise->IsJSObject()) {
@@ -2803,6 +2727,7 @@ void Debug::ProcessCompileEventInDebugScope(v8::DebugEvent event,
Handle<Context> Debug::GetDebugContext() {
+ if (!is_loaded()) return Handle<Context>();
DebugScope debug_scope(this);
if (debug_scope.failed()) return Handle<Context>();
// The global handle may be destroyed soon after. Return it reboxed.
@@ -2977,16 +2902,17 @@ void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
void Debug::UpdateState() {
- is_active_ = message_handler_ != NULL || !event_listener_.is_null();
- if (is_active_ || in_debug_scope()) {
+ bool is_active = message_handler_ != NULL || !event_listener_.is_null();
+ if (is_active || in_debug_scope()) {
// Note that the debug context could have already been loaded to
// bootstrap test cases.
isolate_->compilation_cache()->Disable();
- is_active_ = Load();
+ is_active = Load();
} else if (is_loaded()) {
isolate_->compilation_cache()->Enable();
Unload();
}
+ is_active_ = is_active;
}
@@ -3211,7 +3137,7 @@ bool MessageImpl::WillStartRunning() const {
}
-v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
+v8::Local<v8::Object> MessageImpl::GetExecutionState() const {
return v8::Utils::ToLocal(exec_state_);
}
@@ -3221,12 +3147,12 @@ v8::Isolate* MessageImpl::GetIsolate() const {
}
-v8::Handle<v8::Object> MessageImpl::GetEventData() const {
+v8::Local<v8::Object> MessageImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
-v8::Handle<v8::String> MessageImpl::GetJSON() const {
+v8::Local<v8::String> MessageImpl::GetJSON() const {
Isolate* isolate = event_data_->GetIsolate();
v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate));
@@ -3235,14 +3161,14 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
Handle<Object> fun = Object::GetProperty(
isolate, event_data_, "toJSONProtocol").ToHandleChecked();
if (!fun->IsJSFunction()) {
- return v8::Handle<v8::String>();
+ return v8::Local<v8::String>();
}
MaybeHandle<Object> maybe_json =
Execution::TryCall(Handle<JSFunction>::cast(fun), event_data_, 0, NULL);
Handle<Object> json;
if (!maybe_json.ToHandle(&json) || !json->IsString()) {
- return v8::Handle<v8::String>();
+ return v8::Local<v8::String>();
}
return scope.Escape(v8::Utils::ToLocal(Handle<String>::cast(json)));
} else {
@@ -3251,9 +3177,9 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
}
-v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
+v8::Local<v8::Context> MessageImpl::GetEventContext() const {
Isolate* isolate = event_data_->GetIsolate();
- v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
+ v8::Local<v8::Context> context = GetDebugEventContext(isolate);
// Isolate::context() may be NULL when "script collected" event occurs.
DCHECK(!context.IsEmpty());
return context;
@@ -3282,22 +3208,22 @@ DebugEvent EventDetailsImpl::GetEvent() const {
}
-v8::Handle<v8::Object> EventDetailsImpl::GetExecutionState() const {
+v8::Local<v8::Object> EventDetailsImpl::GetExecutionState() const {
return v8::Utils::ToLocal(exec_state_);
}
-v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
+v8::Local<v8::Object> EventDetailsImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
-v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
+v8::Local<v8::Context> EventDetailsImpl::GetEventContext() const {
return GetDebugEventContext(exec_state_->GetIsolate());
}
-v8::Handle<v8::Value> EventDetailsImpl::GetCallbackData() const {
+v8::Local<v8::Value> EventDetailsImpl::GetCallbackData() const {
return v8::Utils::ToLocal(callback_data_);
}
@@ -3405,4 +3331,5 @@ void LockingCommandMessageQueue::Clear() {
queue_.Clear();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 9a9a3ba923..fd5f67d6f5 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -129,17 +129,8 @@ class BreakLocation {
private:
BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo,
- RelocInfo* original_rinfo, int position, int statement_position)
- : debug_info_(debug_info),
- pc_offset_(static_cast<int>(rinfo->pc() - debug_info->code()->entry())),
- original_pc_offset_(static_cast<int>(
- original_rinfo->pc() - debug_info->original_code()->entry())),
- rmode_(rinfo->rmode()),
- original_rmode_(original_rinfo->rmode()),
- data_(rinfo->data()),
- original_data_(original_rinfo->data()),
- position_(position),
- statement_position_(statement_position) {}
+ RelocInfo* original_rinfo, int position,
+ int statement_position);
class Iterator {
public:
@@ -228,31 +219,22 @@ class BreakLocation {
// to it is created and that weak handle is stored in the cache. The weak handle
// callback takes care of removing the script from the cache. The key used in
// the cache is the script id.
-class ScriptCache : private HashMap {
+class ScriptCache {
public:
explicit ScriptCache(Isolate* isolate);
- virtual ~ScriptCache() { Clear(); }
+ ~ScriptCache();
// Add script to the cache.
void Add(Handle<Script> script);
// Return the scripts in the cache.
- Handle<FixedArray> GetScripts();
-
- private:
- // Calculate the hash value from the key (script id).
- static uint32_t Hash(int key) {
- return ComputeIntegerHash(key, v8::internal::kZeroHashSeed);
+ Handle<FixedArray> GetScripts() {
+ return WeakValueHashTable::GetWeakValues(table_);
}
- // Clear the cache releasing all the weak handles.
- void Clear();
-
- // Weak handle callback for scripts in the cache.
- static void HandleWeakScript(
- const v8::WeakCallbackData<v8::Value, void>& data);
-
+ private:
Isolate* isolate_;
+ Handle<WeakValueHashTable> table_;
};
@@ -302,10 +284,10 @@ class MessageImpl: public v8::Debug::Message {
virtual bool IsResponse() const;
virtual DebugEvent GetEvent() const;
virtual bool WillStartRunning() const;
- virtual v8::Handle<v8::Object> GetExecutionState() const;
- virtual v8::Handle<v8::Object> GetEventData() const;
- virtual v8::Handle<v8::String> GetJSON() const;
- virtual v8::Handle<v8::Context> GetEventContext() const;
+ virtual v8::Local<v8::Object> GetExecutionState() const;
+ virtual v8::Local<v8::Object> GetEventData() const;
+ virtual v8::Local<v8::String> GetJSON() const;
+ virtual v8::Local<v8::Context> GetEventContext() const;
virtual v8::Debug::ClientData* GetClientData() const;
virtual v8::Isolate* GetIsolate() const;
@@ -337,10 +319,10 @@ class EventDetailsImpl : public v8::Debug::EventDetails {
Handle<Object> callback_data,
v8::Debug::ClientData* client_data);
virtual DebugEvent GetEvent() const;
- virtual v8::Handle<v8::Object> GetExecutionState() const;
- virtual v8::Handle<v8::Object> GetEventData() const;
- virtual v8::Handle<v8::Context> GetEventContext() const;
- virtual v8::Handle<v8::Value> GetCallbackData() const;
+ virtual v8::Local<v8::Object> GetExecutionState() const;
+ virtual v8::Local<v8::Object> GetEventData() const;
+ virtual v8::Local<v8::Context> GetEventContext() const;
+ virtual v8::Local<v8::Value> GetCallbackData() const;
virtual v8::Debug::ClientData* GetClientData() const;
private:
DebugEvent event_; // Debug event causing the break.
@@ -483,8 +465,7 @@ class Debug {
bool IsStepping() { return thread_local_.step_count_ > 0; }
bool StepNextContinue(BreakLocation* location, JavaScriptFrame* frame);
bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
- void HandleStepIn(Handle<Object> function_obj, Handle<Object> holder,
- Address fp, bool is_constructor);
+ void HandleStepIn(Handle<Object> function_obj, bool is_constructor);
bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
// Purge all code objects that have no debug break slots.
@@ -498,6 +479,9 @@ class Debug {
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
+ template <typename C>
+ bool CompileToRevealInnerFunctions(C* compilable);
+
// This function is used in FunctionNameUsing* tests.
Handle<Object> FindSharedFunctionInfoInScript(Handle<Script> script,
int position);
@@ -534,7 +518,8 @@ class Debug {
static void RecordEvalCaller(Handle<Script> script);
bool CheckExecutionState(int id) {
- return !debug_context().is_null() && break_id() != 0 && break_id() == id;
+ return is_active() && !debug_context().is_null() && break_id() != 0 &&
+ break_id() == id;
}
// Flags and states.
@@ -639,7 +624,7 @@ class Debug {
static bool CompileDebuggerScript(Isolate* isolate, int index);
void ClearOneShot();
- void ActivateStepIn(StackFrame* frame);
+ void ActivateStepIn(Handle<JSFunction> function, StackFrame* frame);
void ClearStepIn();
void ActivateStepOut(StackFrame* frame);
void ClearStepNext();
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index ed49133673..684a37ff87 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -194,26 +194,12 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
has_construct_stub);
isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
- // Get the "simulated" top and size for the requested frame.
- FrameDescription* parameters_frame =
- deoptimizer->output_[
- has_arguments_adaptor ? (frame_index - 1) : frame_index];
-
- uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize;
- Address parameters_top = reinterpret_cast<Address>(
- parameters_frame->GetTop() + (parameters_frame->GetFrameSize() -
- parameters_size));
-
- uint32_t expressions_size = info->expression_count() * kPointerSize;
- Address expressions_top = reinterpret_cast<Address>(
- deoptimizer->output_[frame_index]->GetTop());
-
// Done with the GC-unsafe frame descriptions. This re-enables allocation.
deoptimizer->DeleteFrameDescriptions();
// Allocate a heap number for the doubles belonging to this frame.
deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
- parameters_top, parameters_size, expressions_top, expressions_size, info);
+ frame_index, info->parameters_count(), info->expression_count(), info);
// Finished using the deoptimizer instance.
delete deoptimizer;
@@ -260,7 +246,7 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
// changed the code to which it refers to no longer be optimized code.
// Remove the function from this list.
if (prev != NULL) {
- prev->set_next_function_link(next);
+ prev->set_next_function_link(next, UPDATE_WEAK_WRITE_BARRIER);
} else {
context->SetOptimizedFunctionsListHead(next);
}
@@ -268,7 +254,8 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
CHECK_EQ(function->next_function_link(), next);
// Set the next function link to undefined to indicate it is no longer
// in the optimized functions list.
- function->set_next_function_link(context->GetHeap()->undefined_value());
+ function->set_next_function_link(context->GetHeap()->undefined_value(),
+ SKIP_WRITE_BARRIER);
} else {
// The visitor should not alter the link directly.
CHECK_EQ(function->next_function_link(), next);
@@ -342,9 +329,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
StackFrame::Type type = it.frame()->type();
if (type == StackFrame::OPTIMIZED) {
Code* code = it.frame()->LookupCode();
+ JSFunction* function =
+ static_cast<OptimizedFrame*>(it.frame())->function();
if (FLAG_trace_deopt) {
- JSFunction* function =
- static_cast<OptimizedFrame*>(it.frame())->function();
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimizer found activation of function: ");
function->PrintName(scope.file());
@@ -354,7 +341,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
int deopt_index = safepoint.deoptimization_index();
// Turbofan deopt is checked when we are patching addresses on stack.
- bool turbofanned = code->is_turbofanned() && !FLAG_turbo_deoptimization;
+ bool turbofanned = code->is_turbofanned() &&
+ function->shared()->asm_function() &&
+ !FLAG_turbo_asm_deoptimization;
bool safe_to_deopt =
deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned);
@@ -380,7 +369,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
Object* next = code->next_code_link();
if (code->marked_for_deoptimization()) {
- DCHECK(!code->is_turbofanned() || FLAG_turbo_deoptimization);
// Put the code into the list for later patching.
codes.Add(code, &zone);
@@ -423,14 +411,14 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code");
// Do platform-specific patching to force any activations to lazy deopt.
- if (!codes[i]->is_turbofanned() || FLAG_turbo_deoptimization) {
- PatchCodeForDeoptimization(isolate, codes[i]);
+ PatchCodeForDeoptimization(isolate, codes[i]);
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
- }
+ // We might be in the middle of incremental marking with compaction.
+ // Ignore all slots that might have been recorded in the body of the
+ // deoptimized code object.
+ Code* code = codes[i];
+ isolate->heap()->mark_compact_collector()->RemoveObjectSlots(
+ code->instruction_start(), code->address() + code->Size());
}
}
@@ -468,28 +456,6 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
}
-void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
- if (FLAG_trace_deopt) {
- CodeTracer::Scope scope(object->GetHeap()->isolate()->GetCodeTracer());
- PrintF(scope.file(), "[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
- reinterpret_cast<intptr_t>(object));
- }
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(object->GetIsolate(), object);
- // TODO(verwaest): This CHECK will be hit if the global proxy is detached.
- CHECK(iter.GetCurrent()->IsJSGlobalObject());
- Context* native_context =
- GlobalObject::cast(iter.GetCurrent())->native_context();
- MarkAllCodeForContext(native_context);
- DeoptimizeMarkedCodeForContext(native_context);
- } else if (object->IsGlobalObject()) {
- Context* native_context = GlobalObject::cast(object)->native_context();
- MarkAllCodeForContext(native_context);
- DeoptimizeMarkedCodeForContext(native_context);
- }
-}
-
-
void Deoptimizer::MarkAllCodeForContext(Context* context) {
Object* element = context->OptimizedCodeListHead();
while (!element->IsUndefined()) {
@@ -546,13 +512,9 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
}
-Deoptimizer::Deoptimizer(Isolate* isolate,
- JSFunction* function,
- BailoutType type,
- unsigned bailout_id,
- Address from,
- int fp_to_sp_delta,
- Code* optimized_code)
+Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
+ BailoutType type, unsigned bailout_id, Address from,
+ int fp_to_sp_delta, Code* optimized_code)
: isolate_(isolate),
function_(function),
bailout_id_(bailout_id),
@@ -560,28 +522,18 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
has_alignment_padding_(0),
- input_(NULL),
+ input_(nullptr),
output_count_(0),
jsframe_count_(0),
- output_(NULL),
- deferred_objects_tagged_values_(0),
- deferred_objects_double_values_(0),
- deferred_objects_(0),
- deferred_heap_numbers_(0),
- jsframe_functions_(0),
- jsframe_has_adapted_arguments_(0),
- materialized_values_(NULL),
- materialized_objects_(NULL),
- materialization_value_index_(0),
- materialization_object_index_(0),
- trace_scope_(NULL) {
+ output_(nullptr),
+ trace_scope_(nullptr) {
// For COMPILED_STUBs called from builtins, the function pointer is a SMI
// indicating an internal frame.
if (function->IsSmi()) {
- function = NULL;
+ function = nullptr;
}
- DCHECK(from != NULL);
- if (function != NULL && function->IsOptimized()) {
+ DCHECK(from != nullptr);
+ if (function != nullptr && function->IsOptimized()) {
function->shared()->increment_deopt_count();
if (bailout_type_ == Deoptimizer::SOFT) {
isolate->counters()->soft_deopts_executed()->Increment();
@@ -723,6 +675,8 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
<< "[method: " << shared->DebugName()->ToCString().get() << "]\n"
<< "[source:\n" << SourceCodeOf(shared) << "\n]" << std::endl;
+ shared->GetHeap()->isolate()->PushStackTraceAndDie(0xfefefefe, data, shared,
+ 0xfefefeff);
FATAL("unable to find pc offset during deoptimization");
return -1;
}
@@ -778,22 +732,20 @@ void Deoptimizer::DoComputeOutputFrames() {
unsigned translation_index =
input_data->TranslationIndex(bailout_id_)->value();
+ TranslationIterator state_iterator(translations, translation_index);
+ translated_state_.Init(
+ input_->GetFramePointerAddress(), function_, &state_iterator,
+ input_data->LiteralArray(), input_->GetRegisterValues(),
+ trace_scope_ == nullptr ? nullptr : trace_scope_->file());
+
// Do the input frame to output frame(s) translation.
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- DCHECK(Translation::BEGIN == opcode);
- USE(opcode);
- // Read the number of output frames and allocate an array for their
- // descriptions.
- int count = iterator.Next();
- iterator.Next(); // Drop JS frames count.
+ size_t count = translated_state_.frames().size();
DCHECK(output_ == NULL);
output_ = new FrameDescription*[count];
- for (int i = 0; i < count; ++i) {
+ for (size_t i = 0; i < count; ++i) {
output_[i] = NULL;
}
- output_count_ = count;
+ output_count_ = static_cast<int>(count);
Register fp_reg = JavaScriptFrame::fp_register();
stack_fp_ = reinterpret_cast<Address>(
@@ -801,46 +753,31 @@ void Deoptimizer::DoComputeOutputFrames() {
has_alignment_padding_ * kPointerSize);
// Translate each output frame.
- for (int i = 0; i < count; ++i) {
+ for (size_t i = 0; i < count; ++i) {
// Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- switch (opcode) {
- case Translation::JS_FRAME:
- DoComputeJSFrame(&iterator, i);
+ int frame_index = static_cast<int>(i);
+ switch (translated_state_.frames()[i].kind()) {
+ case TranslatedFrame::kFunction:
+ DoComputeJSFrame(nullptr, frame_index);
jsframe_count_++;
break;
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- DoComputeArgumentsAdaptorFrame(&iterator, i);
+ case TranslatedFrame::kArgumentsAdaptor:
+ DoComputeArgumentsAdaptorFrame(nullptr, frame_index);
break;
- case Translation::CONSTRUCT_STUB_FRAME:
- DoComputeConstructStubFrame(&iterator, i);
+ case TranslatedFrame::kConstructStub:
+ DoComputeConstructStubFrame(nullptr, frame_index);
break;
- case Translation::GETTER_STUB_FRAME:
- DoComputeAccessorStubFrame(&iterator, i, false);
+ case TranslatedFrame::kGetter:
+ DoComputeAccessorStubFrame(nullptr, frame_index, false);
break;
- case Translation::SETTER_STUB_FRAME:
- DoComputeAccessorStubFrame(&iterator, i, true);
+ case TranslatedFrame::kSetter:
+ DoComputeAccessorStubFrame(nullptr, frame_index, true);
break;
- case Translation::COMPILED_STUB_FRAME:
- DoComputeCompiledStubFrame(&iterator, i);
+ case TranslatedFrame::kCompiledStub:
+ DoComputeCompiledStubFrame(nullptr, frame_index);
break;
- case Translation::BEGIN:
- case Translation::REGISTER:
- case Translation::INT32_REGISTER:
- case Translation::UINT32_REGISTER:
- case Translation::BOOL_REGISTER:
- case Translation::DOUBLE_REGISTER:
- case Translation::STACK_SLOT:
- case Translation::INT32_STACK_SLOT:
- case Translation::UINT32_STACK_SLOT:
- case Translation::BOOL_STACK_SLOT:
- case Translation::DOUBLE_STACK_SLOT:
- case Translation::LITERAL:
- case Translation::ARGUMENTS_OBJECT:
- case Translation::DUPLICATED_OBJECT:
- case Translation::CAPTURED_OBJECT:
- FATAL("Unsupported translation");
+ case TranslatedFrame::kInvalid:
+ FATAL("invalid frame");
break;
}
}
@@ -869,20 +806,20 @@ void Deoptimizer::DoComputeOutputFrames() {
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- CHECK_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next() - 1; // Do not count the context.
+ TranslatedFrame* translated_frame =
+ &(translated_state_.frames()[frame_index]);
+ TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ int input_index = 0;
+
+ BailoutId node_id = translated_frame->node_id();
+ unsigned height =
+ translated_frame->height() - 1; // Do not count the context.
unsigned height_in_bytes = height * kPointerSize;
+ JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
+ value_iterator++;
+ input_index++;
if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(), " translating ");
+ PrintF(trace_scope_->file(), " translating frame ");
function->PrintName(trace_scope_->file());
PrintF(trace_scope_->file(),
" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -936,7 +873,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset);
}
input_offset -= (parameter_count * kPointerSize);
@@ -957,12 +895,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetCallerPc(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
// The caller's frame pointer for the bottommost output frame is the same
// as in the input frame. For all subsequent output frames, it can be
@@ -981,16 +914,11 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
has_alignment_padding_ * kPointerSize) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
DCHECK(!is_bottommost || !has_alignment_padding_ ||
(fp_value & kPointerSize) != 0);
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
// For the bottommost output frame the constant pool pointer can be gotten
// from the input frame. For subsequent output frames, it can be read from
// the previous frame.
@@ -1002,11 +930,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetConstantPool();
}
output_frame->SetCallerConstantPool(output_offset, value);
- if (trace_scope_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; caller's constant_pool\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "caller's constant_pool\n");
}
// For the bottommost output frame the context can be gotten from the input
@@ -1016,31 +941,25 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
input_offset -= kPointerSize;
// Read the context from the translations.
- DoTranslateCommand(iterator, frame_index, output_offset);
- value = output_frame->GetFrameSlot(output_offset);
+ Object* context = value_iterator->GetRawValue();
// The context should not be a placeholder for a materialized object.
- CHECK(value !=
- reinterpret_cast<intptr_t>(isolate_->heap()->arguments_marker()));
- if (value ==
- reinterpret_cast<intptr_t>(isolate_->heap()->undefined_value())) {
+ CHECK(context != isolate_->heap()->arguments_marker());
+ if (context == isolate_->heap()->undefined_value()) {
// If the context was optimized away, just use the context from
// the activation. This should only apply to Crankshaft code.
CHECK(!compiled_code_->is_turbofanned());
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
+ context =
+ is_bottommost
+ ? reinterpret_cast<Object*>(input_->GetFrameSlot(input_offset))
+ : function->context();
}
+ value = reinterpret_cast<intptr_t>(context);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; context\n",
- top_address + output_offset, output_offset, value);
- }
+ WriteValueToOutput(context, input_index, frame_index, output_offset,
+ "context ");
+ value_iterator++;
+ input_index++;
// The function was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
@@ -1049,18 +968,13 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// The function for the bottommost output frame should also agree with the
// input frame.
DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; function\n",
- top_address + output_offset, output_offset, value);
- }
+ WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
// Translate the rest of the frame.
for (unsigned i = 0; i < height; ++i) {
output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset);
}
CHECK_EQ(0u, output_offset);
@@ -1075,7 +989,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetPc(pc_value);
// Update constant pool.
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
@@ -1109,9 +1023,16 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
+ TranslatedFrame* translated_frame =
+ &(translated_state_.frames()[frame_index]);
+ TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ int input_index = 0;
+
+ unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
+ JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
+ value_iterator++;
+ input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
" translating arguments adaptor => height=%d\n", height_in_bytes);
@@ -1141,19 +1062,15 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset);
}
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
+ DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kFPOnStackSize;
@@ -1161,23 +1078,15 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
output_frame->SetCallerConstantPool(output_offset, value);
- if (trace_scope_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; caller's constant_pool\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "caller's constant_pool\n");
}
// A marker value is used in place of the context.
@@ -1185,33 +1094,21 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
- }
+ DebugPrintOutputSlot(context, frame_index, output_offset,
+ "context (adaptor sentinel)\n");
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function\n",
- top_address + output_offset, output_offset, value);
- }
+ WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
+ DebugPrintOutputSlot(value, frame_index, output_offset, "argc ");
+ if (trace_scope_ != nullptr) {
+ PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
DCHECK(0 == output_offset);
@@ -1223,7 +1120,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc_value);
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(adaptor_trampoline->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
@@ -1233,11 +1130,18 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
+ TranslatedFrame* translated_frame =
+ &(translated_state_.frames()[frame_index]);
+ TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ int input_index = 0;
+
Builtins* builtins = isolate_->builtins();
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
+ unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
+ JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
+ value_iterator++;
+ input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
" translating construct stub => height=%d\n", height_in_bytes);
@@ -1267,27 +1171,19 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
- int deferred_object_index = deferred_objects_.length();
- DoTranslateCommand(iterator, frame_index, output_offset);
// The allocated receiver of a construct stub frame is passed as the
// receiver parameter through the translation. It might be encoding
- // a captured object, patch the slot address for a captured object.
- if (i == 0 && deferred_objects_.length() > deferred_object_index) {
- CHECK(!deferred_objects_[deferred_object_index].is_arguments());
- deferred_objects_[deferred_object_index].patch_slot_address(top_address);
- }
+ // a captured object, override the slot address for a captured object.
+ WriteTranslatedValueToOutput(
+ &value_iterator, &input_index, frame_index, output_offset, nullptr,
+ (i == 0) ? reinterpret_cast<Address>(top_address) : nullptr);
}
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
+ DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kFPOnStackSize;
@@ -1295,81 +1191,43 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
output_frame->SetCallerConstantPool(output_offset, value);
- if (trace_scope_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's constant pool\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "caller's constant_pool\n");
}
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
// A marker value is used in place of the function.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "function (construct sentinel)\n");
// The output frame reflects a JSConstructStubGeneric frame.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(construct_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // Constructor function being invoked by the stub (only present on some
- // architectures, indicated by kConstructorOffset).
- if (ConstructFrameConstants::kConstructorOffset != kMinInt) {
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constructor function\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "argc ");
+ if (trace_scope_ != nullptr) {
+ PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
// The newly allocated object was passed as receiver in the artificial
@@ -1377,12 +1235,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "allocated receiver\n");
CHECK_EQ(0u, output_offset);
@@ -1390,7 +1244,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
output_frame->SetPc(pc);
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(construct_stub->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
@@ -1401,7 +1255,14 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ TranslatedFrame* translated_frame =
+ &(translated_state_.frames()[frame_index]);
+ TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ int input_index = 0;
+
+ JSFunction* accessor = JSFunction::cast(value_iterator->GetRawValue());
+ value_iterator++;
+ input_index++;
// The receiver (and the implicit return value, if any) are expected in
// registers by the LoadIC/StoreIC, so they don't belong to the output stack
// frame. This means that we have to use a height of 0.
@@ -1415,7 +1276,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
// We need 1 stack entry for the return address and enough entries for the
// StackFrame::INTERNAL (FP, context, frame type, code object and constant
- // pool (if FLAG_enable_ool_constant_pool)- see MacroAssembler::EnterFrame).
+ // pool (if enabled)- see MacroAssembler::EnterFrame).
// For a setter stub frame we need one additional entry for the implicit
// return value, see StoreStubCompiler::CompileStoreViaSetter.
unsigned fixed_frame_entries =
@@ -1445,12 +1306,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
+ DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kFPOnStackSize;
@@ -1458,45 +1314,30 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
output_frame->SetCallerConstantPool(output_offset, value);
- if (trace_scope_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's constant pool\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "caller's constant_pool\n");
}
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
// A marker value is used in place of the function.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
+ DebugPrintOutputSlot(value, frame_index, output_offset, "function ");
+ if (trace_scope_ != nullptr) {
+ PrintF(trace_scope_->file(), "(%s sentinel)\n", kind);
}
// Get Code object from accessor stub.
@@ -1507,21 +1348,18 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
Code* accessor_stub = isolate_->builtins()->builtin(name);
value = reinterpret_cast<intptr_t>(accessor_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
// Skip receiver.
- DoTranslateObjectAndSkip(iterator);
+ value_iterator++;
+ input_index++;
if (is_setter_stub_frame) {
// The implicit return value was part of the artificial setter stub
// environment.
output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset);
}
CHECK_EQ(0u, output_offset);
@@ -1532,7 +1370,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
intptr_t pc = reinterpret_cast<intptr_t>(
accessor_stub->instruction_start() + offset->value());
output_frame->SetPc(pc);
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
@@ -1574,6 +1412,11 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// reg = JSFunction context
//
+ TranslatedFrame* translated_frame =
+ &(translated_state_.frames()[frame_index]);
+ TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ int input_index = 0;
+
CHECK(compiled_code_->is_hydrogen_stub());
int major_key = CodeStub::GetMajorKey(compiled_code_);
CodeStubDescriptor descriptor(isolate_, compiled_code_->stub_key());
@@ -1582,7 +1425,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// and the standard stack frame slots. Include space for an argument
// object to the callee and optionally the space to pass the argument
// object to the stub failure handler.
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
+ CHECK_EQ(translated_frame->height(), param_count);
CHECK_GE(param_count, 0);
int height_in_bytes = kPointerSize * param_count + sizeof(Arguments) +
@@ -1617,12 +1461,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
intptr_t value = input_->GetFrameSlot(input_frame_offset);
output_frame->SetCallerPc(output_frame_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ "caller's pc\n");
// Read caller's FP from the input frame, and set this frame's FP.
input_frame_offset -= kFPOnStackSize;
@@ -1632,24 +1472,17 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
output_frame->SetRegister(fp_reg.code(), frame_ptr);
output_frame->SetFp(frame_ptr);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ "caller's fp\n");
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the input frame.
input_frame_offset -= kPointerSize;
value = input_->GetFrameSlot(input_frame_offset);
output_frame_offset -= kPointerSize;
output_frame->SetCallerConstantPool(output_frame_offset, value);
- if (trace_scope_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's constant_pool\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ "caller's constant_pool\n");
}
// The context can be gotten from the input frame.
@@ -1660,24 +1493,15 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
output_frame_offset -= kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
CHECK(reinterpret_cast<Object*>(value)->IsContext());
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset, "context\n");
// A marker value is used in place of the function.
output_frame_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function (stub failure sentinel)\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ "function (stub failure sentinel)\n");
intptr_t caller_arg_count = 0;
bool arg_count_known = !descriptor.stack_parameter_count().is_valid();
@@ -1695,44 +1519,34 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(args_arguments_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args.arguments %s\n",
- top_address + args_arguments_offset, args_arguments_offset, value,
- arg_count_known ? "" : "(the hole)");
- }
+ DebugPrintOutputSlot(
+ value, frame_index, args_arguments_offset,
+ arg_count_known ? "args.arguments\n" : "args.arguments (the hole)\n");
output_frame_offset -= kPointerSize;
int length_frame_offset = output_frame_offset;
value = arg_count_known ? caller_arg_count : the_hole;
output_frame->SetFrameSlot(length_frame_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args.length %s\n",
- top_address + length_frame_offset, length_frame_offset, value,
- arg_count_known ? "" : "(the hole)");
- }
+ DebugPrintOutputSlot(
+ value, frame_index, length_frame_offset,
+ arg_count_known ? "args.length\n" : "args.length (the hole)\n");
output_frame_offset -= kPointerSize;
value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
(output_frame_size - output_frame_offset) + kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args*\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset, "args*\n");
// Copy the register parameters to the failure frame.
int arguments_length_offset = -1;
for (int i = 0; i < param_count; ++i) {
output_frame_offset -= kPointerSize;
- DoTranslateCommand(iterator, 0, output_frame_offset);
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, 0,
+ output_frame_offset);
- if (!arg_count_known && descriptor.IsEnvironmentParameterCountRegister(i)) {
+ if (!arg_count_known &&
+ descriptor.GetRegisterParameter(i)
+ .is(descriptor.stack_parameter_count())) {
arguments_length_offset = output_frame_offset;
}
}
@@ -1748,23 +1562,13 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
output_frame->GetFrameSlot(arguments_length_offset));
caller_arg_count = smi_caller_arg_count->value();
output_frame->SetFrameSlot(length_frame_offset, caller_arg_count);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args.length\n",
- top_address + length_frame_offset, length_frame_offset,
- caller_arg_count);
- }
+ DebugPrintOutputSlot(caller_arg_count, frame_index, length_frame_offset,
+ "args.length\n");
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
output_frame->SetFrameSlot(args_arguments_offset, value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args.arguments\n",
- top_address + args_arguments_offset, args_arguments_offset,
- value);
- }
+ DebugPrintOutputSlot(value, frame_index, args_arguments_offset,
+ "args.arguments");
}
// Copy the double registers from the input into the output frame.
@@ -1781,7 +1585,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
DCHECK(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
Register constant_pool_reg =
StubFailureTrampolineFrame::constant_pool_pointer_register();
intptr_t constant_pool_value =
@@ -1797,1000 +1601,135 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
-Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
- int object_index = materialization_object_index_++;
- ObjectMaterializationDescriptor desc = deferred_objects_[object_index];
- const int length = desc.object_length();
-
- if (desc.duplicate_object() >= 0) {
- // Found a previously materialized object by de-duplication.
- object_index = desc.duplicate_object();
- materialized_objects_->Add(Handle<Object>());
- } else if (desc.is_arguments() && ArgumentsObjectIsAdapted(object_index)) {
- // Use the arguments adapter frame we just built to materialize the
- // arguments object. FunctionGetArguments can't throw an exception.
- Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
- Handle<JSObject> arguments = Handle<JSObject>::cast(
- Accessors::FunctionGetArguments(function));
- materialized_objects_->Add(arguments);
- // To keep consistent object counters, we still materialize the
- // nested values (but we throw them away).
- for (int i = 0; i < length; ++i) {
- MaterializeNextValue();
- }
- } else if (desc.is_arguments()) {
- // Construct an arguments object and copy the parameters to a newly
- // allocated arguments object backing store.
- Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
- Handle<JSObject> arguments =
- isolate_->factory()->NewArgumentsObject(function, length);
- Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
- DCHECK_EQ(array->length(), length);
- arguments->set_elements(*array);
- materialized_objects_->Add(arguments);
- for (int i = 0; i < length; ++i) {
- Handle<Object> value = MaterializeNextValue();
- array->set(i, *value);
- }
- } else {
- // Dispatch on the instance type of the object to be materialized.
- // We also need to make sure that the representation of all fields
- // in the given object are general enough to hold a tagged value.
- Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
- Handle<Map>::cast(MaterializeNextValue()));
- switch (map->instance_type()) {
- case MUTABLE_HEAP_NUMBER_TYPE:
- case HEAP_NUMBER_TYPE: {
- // Reuse the HeapNumber value directly as it is already properly
- // tagged and skip materializing the HeapNumber explicitly. Turn mutable
- // heap numbers immutable.
- Handle<Object> object = MaterializeNextValue();
- if (object_index < prev_materialized_count_) {
- materialized_objects_->Add(Handle<Object>(
- previously_materialized_objects_->get(object_index), isolate_));
- } else {
- materialized_objects_->Add(object);
- }
- materialization_value_index_ += kDoubleSize / kPointerSize - 1;
- break;
- }
- case JS_OBJECT_TYPE: {
- Handle<JSObject> object =
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
- if (object_index < prev_materialized_count_) {
- materialized_objects_->Add(Handle<Object>(
- previously_materialized_objects_->get(object_index), isolate_));
- } else {
- materialized_objects_->Add(object);
- }
- Handle<Object> properties = MaterializeNextValue();
- Handle<Object> elements = MaterializeNextValue();
- object->set_properties(FixedArray::cast(*properties));
- object->set_elements(FixedArrayBase::cast(*elements));
- for (int i = 0; i < length - 3; ++i) {
- Handle<Object> value = MaterializeNextValue();
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- break;
- }
- case JS_ARRAY_TYPE: {
- Handle<JSArray> object =
- isolate_->factory()->NewJSArray(0, map->elements_kind());
- if (object_index < prev_materialized_count_) {
- materialized_objects_->Add(Handle<Object>(
- previously_materialized_objects_->get(object_index), isolate_));
- } else {
- materialized_objects_->Add(object);
- }
- Handle<Object> properties = MaterializeNextValue();
- Handle<Object> elements = MaterializeNextValue();
- Handle<Object> length = MaterializeNextValue();
- object->set_properties(FixedArray::cast(*properties));
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_length(*length);
- break;
- }
- default:
- PrintF(stderr,
- "[couldn't handle instance type %d]\n", map->instance_type());
- FATAL("Unsupported instance type");
- }
- }
-
- return materialized_objects_->at(object_index);
-}
-
-
-Handle<Object> Deoptimizer::MaterializeNextValue() {
- int value_index = materialization_value_index_++;
- Handle<Object> value = materialized_values_->at(value_index);
- if (value->IsMutableHeapNumber()) {
- HeapNumber::cast(*value)->set_map(isolate_->heap()->heap_number_map());
- }
- if (*value == isolate_->heap()->arguments_marker()) {
- value = MaterializeNextHeapObject();
- }
- return value;
-}
-
-
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
DCHECK_NE(DEBUGGER, bailout_type_);
- MaterializedObjectStore* materialized_store =
- isolate_->materialized_object_store();
- previously_materialized_objects_ = materialized_store->Get(stack_fp_);
- prev_materialized_count_ = previously_materialized_objects_.is_null() ?
- 0 : previously_materialized_objects_->length();
-
- // Walk all JavaScript output frames with the given frame iterator.
+ // Walk to the last JavaScript output frame to find out if it has
+ // adapted arguments.
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
- JavaScriptFrame* frame = it->frame();
- jsframe_functions_.Add(handle(frame->function(), isolate_));
- jsframe_has_adapted_arguments_.Add(frame->has_adapted_arguments());
- }
-
- // Handlify all tagged object values before triggering any allocation.
- List<Handle<Object> > values(deferred_objects_tagged_values_.length());
- for (int i = 0; i < deferred_objects_tagged_values_.length(); ++i) {
- values.Add(Handle<Object>(deferred_objects_tagged_values_[i], isolate_));
- }
-
- // Play it safe and clear all unhandlified values before we continue.
- deferred_objects_tagged_values_.Clear();
-
- // Materialize all heap numbers before looking at arguments because when the
- // output frames are used to materialize arguments objects later on they need
- // to already contain valid heap numbers.
- for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
- Handle<Object> num = isolate_->factory()->NewNumber(d.value());
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materialized a new heap number %p [%e] in slot %p\n",
- reinterpret_cast<void*>(*num),
- d.value(),
- d.destination());
- }
- Memory::Object_at(d.destination()) = *num;
- }
-
- // Materialize all heap numbers required for arguments/captured objects.
- for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
- HeapNumberMaterializationDescriptor<int> d =
- deferred_objects_double_values_[i];
- Handle<Object> num = isolate_->factory()->NewNumber(d.value());
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materialized a new heap number %p [%e] for object at %d\n",
- reinterpret_cast<void*>(*num),
- d.value(),
- d.destination());
- }
- DCHECK(values.at(d.destination())->IsTheHole());
- values.Set(d.destination(), num);
- }
-
- // Play it safe and clear all object double values before we continue.
- deferred_objects_double_values_.Clear();
-
- // Materialize arguments/captured objects.
- if (!deferred_objects_.is_empty()) {
- List<Handle<Object> > materialized_objects(deferred_objects_.length());
- materialized_objects_ = &materialized_objects;
- materialized_values_ = &values;
-
- while (materialization_object_index_ < deferred_objects_.length()) {
- int object_index = materialization_object_index_;
- ObjectMaterializationDescriptor descriptor =
- deferred_objects_.at(object_index);
-
- // Find a previously materialized object by de-duplication or
- // materialize a new instance of the object if necessary. Store
- // the materialized object into the frame slot.
- Handle<Object> object = MaterializeNextHeapObject();
- if (descriptor.slot_address() != NULL) {
- Memory::Object_at(descriptor.slot_address()) = *object;
- }
- if (trace_scope_ != NULL) {
- if (descriptor.is_arguments()) {
- PrintF(trace_scope_->file(),
- "Materialized %sarguments object of length %d for %p: ",
- ArgumentsObjectIsAdapted(object_index) ? "(adapted) " : "",
- Handle<JSObject>::cast(object)->elements()->length(),
- reinterpret_cast<void*>(descriptor.slot_address()));
- } else {
- PrintF(trace_scope_->file(),
- "Materialized captured object of size %d for %p: ",
- Handle<HeapObject>::cast(object)->Size(),
- reinterpret_cast<void*>(descriptor.slot_address()));
- }
- object->ShortPrint(trace_scope_->file());
- PrintF(trace_scope_->file(), "\n");
- }
+ }
+ translated_state_.Prepare(it->frame()->has_adapted_arguments(), stack_fp_);
+
+ for (auto& materialization : values_to_materialize_) {
+ Handle<Object> value = materialization.value_->GetValue();
+
+ if (trace_scope_ != nullptr) {
+ PrintF("Materialization [0x%08" V8PRIxPTR "] <- 0x%08" V8PRIxPTR " ; ",
+ reinterpret_cast<intptr_t>(materialization.output_slot_address_),
+ reinterpret_cast<intptr_t>(*value));
+ value->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(), "\n");
}
- CHECK_EQ(materialization_object_index_, materialized_objects_->length());
- CHECK_EQ(materialization_value_index_, materialized_values_->length());
+ *(reinterpret_cast<intptr_t*>(materialization.output_slot_address_)) =
+ reinterpret_cast<intptr_t>(*value);
}
- if (prev_materialized_count_ > 0) {
- bool removed = materialized_store->Remove(stack_fp_);
- CHECK(removed);
- }
+ isolate_->materialized_object_store()->Remove(stack_fp_);
}
void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
- Address parameters_top,
- uint32_t parameters_size,
- Address expressions_top,
- uint32_t expressions_size,
+ int frame_index, int parameter_count, int expression_count,
DeoptimizedFrameInfo* info) {
CHECK_EQ(DEBUGGER, bailout_type_);
- Address parameters_bottom = parameters_top + parameters_size;
- Address expressions_bottom = expressions_top + expressions_size;
- for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
-
- // Check of the heap number to materialize actually belong to the frame
- // being extracted.
- Address slot = d.destination();
- if (parameters_top <= slot && slot < parameters_bottom) {
- Handle<Object> num = isolate_->factory()->NewNumber(d.value());
-
- int index = (info->parameters_count() - 1) -
- static_cast<int>(slot - parameters_top) / kPointerSize;
-
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materializing a new heap number %p [%e] in slot %p"
- "for parameter slot #%d\n",
- reinterpret_cast<void*>(*num),
- d.value(),
- d.destination(),
- index);
- }
- info->SetParameter(index, *num);
- } else if (expressions_top <= slot && slot < expressions_bottom) {
- Handle<Object> num = isolate_->factory()->NewNumber(d.value());
-
- int index = info->expression_count() - 1 -
- static_cast<int>(slot - expressions_top) / kPointerSize;
-
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- "Materializing a new heap number %p [%e] in slot %p"
- "for expression slot #%d\n",
- reinterpret_cast<void*>(*num),
- d.value(),
- d.destination(),
- index);
- }
-
- info->SetExpression(index, *num);
+ translated_state_.Prepare(false, nullptr);
+
+ TranslatedFrame* frame = &(translated_state_.frames()[frame_index]);
+ CHECK(frame->kind() == TranslatedFrame::kFunction);
+ int frame_arg_count = frame->shared_info()->internal_formal_parameter_count();
+
+ // The height is #expressions + 1 for context.
+ CHECK_EQ(expression_count + 1, frame->height());
+ TranslatedFrame* argument_frame = frame;
+ if (frame_index > 0) {
+ TranslatedFrame* previous_frame =
+ &(translated_state_.frames()[frame_index - 1]);
+ if (previous_frame->kind() == TranslatedFrame::kArgumentsAdaptor) {
+ argument_frame = previous_frame;
+ CHECK_EQ(parameter_count, argument_frame->height() - 1);
+ } else {
+ CHECK_EQ(frame_arg_count, parameter_count);
}
+ } else {
+ CHECK_EQ(frame_arg_count, parameter_count);
}
-}
-
-
-static const char* TraceValueType(bool is_smi) {
- if (is_smi) {
- return "smi";
- }
-
- return "heap number";
-}
-
-
-void Deoptimizer::DoTranslateObjectAndSkip(TranslationIterator* iterator) {
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::JS_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
- case Translation::COMPILED_STUB_FRAME: {
- FATAL("Unexpected frame start translation opcode");
- return;
- }
-
- case Translation::REGISTER:
- case Translation::INT32_REGISTER:
- case Translation::UINT32_REGISTER:
- case Translation::BOOL_REGISTER:
- case Translation::DOUBLE_REGISTER:
- case Translation::STACK_SLOT:
- case Translation::INT32_STACK_SLOT:
- case Translation::UINT32_STACK_SLOT:
- case Translation::BOOL_STACK_SLOT:
- case Translation::DOUBLE_STACK_SLOT:
- case Translation::LITERAL: {
- // The value is not part of any materialized object, so we can ignore it.
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
- return;
- }
- case Translation::DUPLICATED_OBJECT: {
- int object_index = iterator->Next();
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(), " skipping object ");
- PrintF(trace_scope_->file(),
- " ; duplicate of object #%d\n", object_index);
- }
- AddObjectDuplication(0, object_index);
- return;
+ TranslatedFrame::iterator arg_iter = argument_frame->begin();
+ arg_iter++; // Skip the function.
+ arg_iter++; // Skip the receiver.
+ for (int i = 0; i < parameter_count; i++, arg_iter++) {
+ if (!arg_iter->IsMaterializedObject()) {
+ info->SetParameter(i, *(arg_iter->GetValue()));
}
+ }
- case Translation::ARGUMENTS_OBJECT:
- case Translation::CAPTURED_OBJECT: {
- int length = iterator->Next();
- bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(), " skipping object ");
- PrintF(trace_scope_->file(),
- " ; object (length = %d, is_args = %d)\n", length, is_args);
- }
-
- AddObjectStart(0, length, is_args);
+ TranslatedFrame::iterator iter = frame->begin();
+ // Skip the function, receiver, context and arguments.
+ for (int i = 0; i < frame_arg_count + 3; i++, iter++) {
+ }
- // We save the object values on the side and materialize the actual
- // object after the deoptimized frame is built.
- int object_index = deferred_objects_.length() - 1;
- for (int i = 0; i < length; i++) {
- DoTranslateObject(iterator, object_index, i);
- }
- return;
+ for (int i = 0; i < expression_count; i++, iter++) {
+ if (!iter->IsMaterializedObject()) {
+ info->SetExpression(i, *(iter->GetValue()));
}
}
-
- FATAL("Unexpected translation opcode");
}
-void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
- int object_index,
- int field_index) {
- disasm::NameConverter converter;
- Address object_slot = deferred_objects_[object_index].slot_address();
-
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
+void Deoptimizer::WriteTranslatedValueToOutput(
+ TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
+ unsigned output_offset, const char* debug_hint_string,
+ Address output_address_for_materialization) {
+ Object* value = (*iterator)->GetRawValue();
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::JS_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
- case Translation::COMPILED_STUB_FRAME:
- FATAL("Unexpected frame start translation opcode");
- return;
+ WriteValueToOutput(value, *input_index, frame_index, output_offset,
+ debug_hint_string);
- case Translation::REGISTER: {
- int input_reg = iterator->Next();
- intptr_t input_value = input_->GetRegister(input_reg);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "0x%08" V8PRIxPTR " ; %s ", input_value,
- converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(input_value)->ShortPrint(
- trace_scope_->file());
- PrintF(trace_scope_->file(),
- "\n");
- }
- AddObjectTaggedValue(input_value);
- return;
- }
-
- case Translation::INT32_REGISTER: {
- int input_reg = iterator->Next();
- intptr_t value = input_->GetRegister(input_reg);
- bool is_smi = Smi::IsValid(value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "%" V8PRIdPTR " ; %s (%s)\n", value,
- converter.NameOfCPURegister(input_reg),
- TraceValueType(is_smi));
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- AddObjectTaggedValue(tagged_value);
- } else {
- double double_value = static_cast<double>(static_cast<int32_t>(value));
- AddObjectDoubleValue(double_value);
- }
- return;
- }
-
- case Translation::UINT32_REGISTER: {
- int input_reg = iterator->Next();
- uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
- bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(), "%" V8PRIuPTR " ; uint %s (%s)\n", value,
- converter.NameOfCPURegister(input_reg), TraceValueType(is_smi));
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- AddObjectTaggedValue(tagged_value);
- } else {
- double double_value = static_cast<double>(static_cast<uint32_t>(value));
- AddObjectDoubleValue(double_value);
- }
- return;
- }
-
- case Translation::BOOL_REGISTER: {
- int input_reg = iterator->Next();
- uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
- bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot), field_index);
- PrintF(trace_scope_->file(), "%" V8PRIuPTR " ; bool %s (%s)\n", value,
- converter.NameOfCPURegister(input_reg), TraceValueType(is_smi));
- }
- if (value == 0) {
- AddObjectTaggedValue(
- reinterpret_cast<intptr_t>(isolate_->heap()->false_value()));
- } else {
- DCHECK_EQ(1U, value);
- AddObjectTaggedValue(
- reinterpret_cast<intptr_t>(isolate_->heap()->true_value()));
- }
- return;
- }
-
- case Translation::DOUBLE_REGISTER: {
- int input_reg = iterator->Next();
- double value = input_->GetDoubleRegister(input_reg);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "%e ; %s\n", value,
- DoubleRegister::AllocationIndexToString(input_reg));
- }
- AddObjectDoubleValue(value);
- return;
- }
-
- case Translation::STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "0x%08" V8PRIxPTR " ; [sp + %d] ", input_value, input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint(
- trace_scope_->file());
- PrintF(trace_scope_->file(),
- "\n");
- }
- AddObjectTaggedValue(input_value);
- return;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- intptr_t value = input_->GetFrameSlot(input_offset);
- bool is_smi = Smi::IsValid(value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "%" V8PRIdPTR " ; [sp + %d] (%s)\n",
- value, input_offset, TraceValueType(is_smi));
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- AddObjectTaggedValue(tagged_value);
- } else {
- double double_value = static_cast<double>(static_cast<int32_t>(value));
- AddObjectDoubleValue(double_value);
- }
- return;
- }
-
- case Translation::UINT32_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- uintptr_t value =
- static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
- bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(), "%" V8PRIuPTR " ; [sp + %d] (uint %s)\n",
- value, input_offset, TraceValueType(is_smi));
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- AddObjectTaggedValue(tagged_value);
- } else {
- double double_value = static_cast<double>(static_cast<uint32_t>(value));
- AddObjectDoubleValue(double_value);
- }
- return;
- }
-
- case Translation::BOOL_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- uintptr_t value =
- static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
- bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot), field_index);
- PrintF(trace_scope_->file(), "%" V8PRIuPTR " ; [sp + %d] (bool %s)\n",
- value, input_offset, TraceValueType(is_smi));
- }
- if (value == 0) {
- AddObjectTaggedValue(
- reinterpret_cast<intptr_t>(isolate_->heap()->false_value()));
- } else {
- DCHECK_EQ(1U, value);
- AddObjectTaggedValue(
- reinterpret_cast<intptr_t>(isolate_->heap()->true_value()));
- }
- return;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- double value = input_->GetDoubleFrameSlot(input_offset);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- PrintF(trace_scope_->file(),
- "%e ; [sp + %d]\n", value, input_offset);
- }
- AddObjectDoubleValue(value);
- return;
- }
-
- case Translation::LITERAL: {
- Object* literal = ComputeLiteral(iterator->Next());
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- literal->ShortPrint(trace_scope_->file());
- PrintF(trace_scope_->file(),
- " ; literal\n");
- }
- intptr_t value = reinterpret_cast<intptr_t>(literal);
- AddObjectTaggedValue(value);
- return;
- }
-
- case Translation::DUPLICATED_OBJECT: {
- int object_index = iterator->Next();
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
- PrintF(trace_scope_->file(),
- " ; duplicate of object #%d\n", object_index);
- }
- // Use the materialization marker value as a sentinel and fill in
- // the object after the deoptimized frame is built.
- intptr_t value = reinterpret_cast<intptr_t>(
- isolate_->heap()->arguments_marker());
- AddObjectDuplication(0, object_index);
- AddObjectTaggedValue(value);
- return;
- }
-
- case Translation::ARGUMENTS_OBJECT:
- case Translation::CAPTURED_OBJECT: {
- int length = iterator->Next();
- bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
- reinterpret_cast<intptr_t>(object_slot),
- field_index);
- isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
- PrintF(trace_scope_->file(),
- " ; object (length = %d, is_args = %d)\n", length, is_args);
- }
- // Use the materialization marker value as a sentinel and fill in
- // the object after the deoptimized frame is built.
- intptr_t value = reinterpret_cast<intptr_t>(
- isolate_->heap()->arguments_marker());
- AddObjectStart(0, length, is_args);
- AddObjectTaggedValue(value);
- // We save the object values on the side and materialize the actual
- // object after the deoptimized frame is built.
- int object_index = deferred_objects_.length() - 1;
- for (int i = 0; i < length; i++) {
- DoTranslateObject(iterator, object_index, i);
- }
- return;
+ if (value == isolate_->heap()->arguments_marker()) {
+ Address output_address =
+ reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+ output_offset;
+ if (output_address_for_materialization == nullptr) {
+ output_address_for_materialization = output_address;
}
+ values_to_materialize_.push_back(
+ {output_address_for_materialization, *iterator});
}
- FATAL("Unexpected translation opcode");
+ (*iterator)++;
+ (*input_index)++;
}
-void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset) {
- disasm::NameConverter converter;
- // A GC-safe temporary placeholder that we can put in the output frame.
- const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
-
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::JS_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
- case Translation::COMPILED_STUB_FRAME:
- FATAL("Unexpected translation opcode");
- return;
-
- case Translation::REGISTER: {
- int input_reg = iterator->Next();
- intptr_t input_value = input_->GetRegister(input_reg);
- if (trace_scope_ != NULL) {
- PrintF(
- trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- input_value,
- converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(input_value)->ShortPrint(
- trace_scope_->file());
- PrintF(trace_scope_->file(), "\n");
- }
- output_[frame_index]->SetFrameSlot(output_offset, input_value);
- return;
- }
-
- case Translation::INT32_REGISTER: {
- int input_reg = iterator->Next();
- intptr_t value = input_->GetRegister(input_reg);
- bool is_smi = Smi::IsValid(value);
- if (trace_scope_ != NULL) {
- PrintF(
- trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- converter.NameOfCPURegister(input_reg),
- TraceValueType(is_smi));
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
- static_cast<double>(static_cast<int32_t>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::UINT32_REGISTER: {
- int input_reg = iterator->Next();
- uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
- bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
- if (trace_scope_ != NULL) {
- PrintF(
- trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
- " ; uint %s (%s)\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- converter.NameOfCPURegister(input_reg),
- TraceValueType(is_smi));
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
- static_cast<double>(static_cast<uint32_t>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::BOOL_REGISTER: {
- int input_reg = iterator->Next();
- uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
- bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
- " ; bool %s (%s)\n",
- output_[frame_index]->GetTop() + output_offset, output_offset,
- value, converter.NameOfCPURegister(input_reg),
- TraceValueType(is_smi));
- }
- if (value == 0) {
- output_[frame_index]->SetFrameSlot(
- output_offset,
- reinterpret_cast<intptr_t>(isolate_->heap()->false_value()));
- } else {
- DCHECK_EQ(1U, value);
- output_[frame_index]->SetFrameSlot(
- output_offset,
- reinterpret_cast<intptr_t>(isolate_->heap()->true_value()));
- }
- return;
- }
-
- case Translation::DOUBLE_REGISTER: {
- int input_reg = iterator->Next();
- double value = input_->GetDoubleRegister(input_reg);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- DoubleRegister::AllocationIndexToString(input_reg));
- }
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- return;
- }
-
- case Translation::STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": ",
- output_[frame_index]->GetTop() + output_offset);
- PrintF(trace_scope_->file(),
- "[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
- output_offset,
- input_value,
- input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint(
- trace_scope_->file());
- PrintF(trace_scope_->file(), "\n");
- }
- output_[frame_index]->SetFrameSlot(output_offset, input_value);
- return;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- intptr_t value = input_->GetFrameSlot(input_offset);
- bool is_smi = Smi::IsValid(value);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": ",
- output_[frame_index]->GetTop() + output_offset);
- PrintF(trace_scope_->file(),
- "[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
- output_offset,
- value,
- input_offset,
- TraceValueType(is_smi));
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
- static_cast<double>(static_cast<int32_t>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::UINT32_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- uintptr_t value =
- static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
- bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": ",
- output_[frame_index]->GetTop() + output_offset);
- PrintF(trace_scope_->file(),
- "[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
- output_offset,
- value,
- input_offset,
- TraceValueType(is_smi));
- }
- if (is_smi) {
- intptr_t tagged_value =
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else {
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
- static_cast<double>(static_cast<uint32_t>(value)));
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- }
- return;
- }
-
- case Translation::BOOL_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- uintptr_t value =
- static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
- bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(), " 0x%08" V8PRIxPTR ": ",
- output_[frame_index]->GetTop() + output_offset);
- PrintF(trace_scope_->file(),
- "[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
- output_offset, value, input_offset, TraceValueType(is_smi));
- }
- if (value == 0) {
- output_[frame_index]->SetFrameSlot(
- output_offset,
- reinterpret_cast<intptr_t>(isolate_->heap()->false_value()));
- } else {
- DCHECK_EQ(1U, value);
- output_[frame_index]->SetFrameSlot(
- output_offset,
- reinterpret_cast<intptr_t>(isolate_->heap()->true_value()));
- }
- return;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- int input_slot_index = iterator->Next();
- unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
- double value = input_->GetDoubleFrameSlot(input_offset);
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
- output_[frame_index]->GetTop() + output_offset,
- output_offset,
- value,
- input_offset);
- }
- // We save the untagged value on the side and store a GC-safe
- // temporary placeholder in the frame.
- AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
- output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
- return;
- }
+void Deoptimizer::WriteValueToOutput(Object* value, int input_index,
+ int frame_index, unsigned output_offset,
+ const char* debug_hint_string) {
+ output_[frame_index]->SetFrameSlot(output_offset,
+ reinterpret_cast<intptr_t>(value));
- case Translation::LITERAL: {
- Object* literal = ComputeLiteral(iterator->Next());
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- ",
- output_[frame_index]->GetTop() + output_offset,
- output_offset);
- literal->ShortPrint(trace_scope_->file());
- PrintF(trace_scope_->file(), " ; literal\n");
- }
- intptr_t value = reinterpret_cast<intptr_t>(literal);
- output_[frame_index]->SetFrameSlot(output_offset, value);
- return;
- }
+ if (trace_scope_ != nullptr) {
+ DebugPrintOutputSlot(reinterpret_cast<intptr_t>(value), frame_index,
+ output_offset, debug_hint_string);
+ value->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(), " (input #%d)\n", input_index);
+ }
+}
- case Translation::DUPLICATED_OBJECT: {
- int object_index = iterator->Next();
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- ",
- output_[frame_index]->GetTop() + output_offset,
- output_offset);
- isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
- PrintF(trace_scope_->file(),
- " ; duplicate of object #%d\n", object_index);
- }
- // Use the materialization marker value as a sentinel and fill in
- // the object after the deoptimized frame is built.
- intptr_t value = reinterpret_cast<intptr_t>(
- isolate_->heap()->arguments_marker());
- AddObjectDuplication(output_[frame_index]->GetTop() + output_offset,
- object_index);
- output_[frame_index]->SetFrameSlot(output_offset, value);
- return;
- }
- case Translation::ARGUMENTS_OBJECT:
- case Translation::CAPTURED_OBJECT: {
- int length = iterator->Next();
- bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " 0x%08" V8PRIxPTR ": [top + %d] <- ",
- output_[frame_index]->GetTop() + output_offset,
- output_offset);
- isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
- PrintF(trace_scope_->file(),
- " ; object (length = %d, is_args = %d)\n", length, is_args);
- }
- // Use the materialization marker value as a sentinel and fill in
- // the object after the deoptimized frame is built.
- intptr_t value = reinterpret_cast<intptr_t>(
- isolate_->heap()->arguments_marker());
- AddObjectStart(output_[frame_index]->GetTop() + output_offset,
- length, is_args);
- output_[frame_index]->SetFrameSlot(output_offset, value);
- // We save the object values on the side and materialize the actual
- // object after the deoptimized frame is built.
- int object_index = deferred_objects_.length() - 1;
- for (int i = 0; i < length; i++) {
- DoTranslateObject(iterator, object_index, i);
- }
- return;
- }
+void Deoptimizer::DebugPrintOutputSlot(intptr_t value, int frame_index,
+ unsigned output_offset,
+ const char* debug_hint_string) {
+ if (trace_scope_ != nullptr) {
+ Address output_address =
+ reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+ output_offset;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s",
+ reinterpret_cast<intptr_t>(output_address), output_offset, value,
+ debug_hint_string == nullptr ? "" : debug_hint_string);
}
}
@@ -2800,7 +1739,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// The fp-to-sp delta already takes the context, constant pool pointer and the
// function into account so we have to avoid double counting them.
unsigned result = fixed_size + fp_to_sp_delta_ -
- StandardFrameConstants::kFixedFrameSizeFromFp;
+ StandardFrameConstants::kFixedFrameSizeFromFp;
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size = ComputeOutgoingArgumentSize();
@@ -2814,7 +1753,7 @@ unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, and all the incoming arguments.
return ComputeIncomingArgumentSize(function) +
- StandardFrameConstants::kFixedFrameSize;
+ StandardFrameConstants::kFixedFrameSize;
}
@@ -2832,55 +1771,21 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
return height * kPointerSize;
}
Object* Deoptimizer::ComputeLiteral(int index) const {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
FixedArray* literals = data->LiteralArray();
return literals->get(index);
}
-void Deoptimizer::AddObjectStart(intptr_t slot, int length, bool is_args) {
- ObjectMaterializationDescriptor object_desc(
- reinterpret_cast<Address>(slot), jsframe_count_, length, -1, is_args);
- deferred_objects_.Add(object_desc);
-}
-
-
-void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) {
- ObjectMaterializationDescriptor object_desc(
- reinterpret_cast<Address>(slot), jsframe_count_, -1, object_index, false);
- deferred_objects_.Add(object_desc);
-}
-
-
-void Deoptimizer::AddObjectTaggedValue(intptr_t value) {
- deferred_objects_tagged_values_.Add(reinterpret_cast<Object*>(value));
-}
-
-
-void Deoptimizer::AddObjectDoubleValue(double value) {
- deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
- HeapNumberMaterializationDescriptor<int> value_desc(
- deferred_objects_tagged_values_.length() - 1, value);
- deferred_objects_double_values_.Add(value_desc);
-}
-
-
-void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
- HeapNumberMaterializationDescriptor<Address> value_desc(
- reinterpret_cast<Address>(slot_address), value);
- deferred_heap_numbers_.Add(value_desc);
-}
-
-
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type,
int max_entry_id) {
@@ -2911,7 +1816,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
"Deoptimizer::EnsureCodeForDeoptimizationEntry");
}
CopyBytes(chunk->area_start(), desc.buffer,
- static_cast<size_t>(desc.instr_size));
+ static_cast<size_t>(desc.instr_size));
CpuFeatures::FlushICache(chunk->area_start(), desc.instr_size);
data->deopt_entry_code_entries_[type] = entry_count;
@@ -3082,8 +1987,9 @@ void Translation::BeginJSFrame(BailoutId node_id,
}
-void Translation::BeginCompiledStubFrame() {
+void Translation::BeginCompiledStubFrame(int height) {
buffer_->Add(COMPILED_STUB_FRAME, zone());
+ buffer_->Add(height, zone());
}
@@ -3181,8 +2087,15 @@ void Translation::StoreArgumentsObject(bool args_known,
}
+void Translation::StoreJSFrameFunction() {
+ buffer_->Add(JS_FRAME_FUNCTION, zone());
+}
+
+
int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) {
+ case JS_FRAME_FUNCTION:
+ return 0;
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
case DUPLICATED_OBJECT:
@@ -3228,14 +2141,661 @@ const char* Translation::StringFor(Opcode opcode) {
#endif
+Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
+ int index = StackIdToIndex(fp);
+ if (index == -1) {
+ return Handle<FixedArray>::null();
+ }
+ Handle<FixedArray> array = GetStackEntries();
+ CHECK_GT(array->length(), index);
+ return Handle<FixedArray>::cast(Handle<Object>(array->get(index), isolate()));
+}
+
+
+void MaterializedObjectStore::Set(Address fp,
+ Handle<FixedArray> materialized_objects) {
+ int index = StackIdToIndex(fp);
+ if (index == -1) {
+ index = frame_fps_.length();
+ frame_fps_.Add(fp);
+ }
+
+ Handle<FixedArray> array = EnsureStackEntries(index + 1);
+ array->set(index, *materialized_objects);
+}
+
+
+bool MaterializedObjectStore::Remove(Address fp) {
+ int index = StackIdToIndex(fp);
+ if (index == -1) {
+ return false;
+ }
+ CHECK_GE(index, 0);
+
+ frame_fps_.Remove(index);
+ FixedArray* array = isolate()->heap()->materialized_objects();
+ CHECK_LT(index, array->length());
+ for (int i = index; i < frame_fps_.length(); i++) {
+ array->set(i, array->get(i + 1));
+ }
+ array->set(frame_fps_.length(), isolate()->heap()->undefined_value());
+ return true;
+}
+
+
+int MaterializedObjectStore::StackIdToIndex(Address fp) {
+ for (int i = 0; i < frame_fps_.length(); i++) {
+ if (frame_fps_[i] == fp) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+
+Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
+ return Handle<FixedArray>(isolate()->heap()->materialized_objects());
+}
+
+
+Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
+ Handle<FixedArray> array = GetStackEntries();
+ if (array->length() >= length) {
+ return array;
+ }
+
+ int new_length = length > 10 ? length : 10;
+ if (new_length < 2 * array->length()) {
+ new_length = 2 * array->length();
+ }
+
+ Handle<FixedArray> new_array =
+ isolate()->factory()->NewFixedArray(new_length, TENURED);
+ for (int i = 0; i < array->length(); i++) {
+ new_array->set(i, array->get(i));
+ }
+ for (int i = array->length(); i < length; i++) {
+ new_array->set(i, isolate()->heap()->undefined_value());
+ }
+ isolate()->heap()->public_set_materialized_objects(*new_array);
+ return new_array;
+}
+
+
+DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
+ int frame_index,
+ bool has_arguments_adaptor,
+ bool has_construct_stub) {
+ FrameDescription* output_frame = deoptimizer->output_[frame_index];
+ function_ = output_frame->GetFunction();
+ context_ = reinterpret_cast<Object*>(output_frame->GetContext());
+ has_construct_stub_ = has_construct_stub;
+ expression_count_ = output_frame->GetExpressionCount();
+ expression_stack_ = new Object* [expression_count_];
+ // Get the source position using the unoptimized code.
+ Address pc = reinterpret_cast<Address>(output_frame->GetPc());
+ Code* code = Code::cast(deoptimizer->isolate()->FindCodeObject(pc));
+ source_position_ = code->SourcePosition(pc);
+
+ for (int i = 0; i < expression_count_; i++) {
+ SetExpression(i, output_frame->GetExpression(i));
+ }
+
+ if (has_arguments_adaptor) {
+ output_frame = deoptimizer->output_[frame_index - 1];
+ CHECK_EQ(output_frame->GetFrameType(), StackFrame::ARGUMENTS_ADAPTOR);
+ }
+
+ parameters_count_ = output_frame->ComputeParametersCount();
+ parameters_ = new Object* [parameters_count_];
+ for (int i = 0; i < parameters_count_; i++) {
+ SetParameter(i, output_frame->GetParameter(i));
+ }
+}
+
+
+DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
+ delete[] expression_stack_;
+ delete[] parameters_;
+}
+
+
+void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
+ v->VisitPointer(bit_cast<Object**>(&function_));
+ v->VisitPointer(&context_);
+ v->VisitPointers(parameters_, parameters_ + parameters_count_);
+ v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
+}
+
+
+const char* Deoptimizer::GetDeoptReason(DeoptReason deopt_reason) {
+ DCHECK(deopt_reason < kLastDeoptReason);
+#define DEOPT_MESSAGES_TEXTS(C, T) T,
+ static const char* deopt_messages_[] = {
+ DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_TEXTS)};
+#undef DEOPT_MESSAGES_TEXTS
+ return deopt_messages_[deopt_reason];
+}
+
+
+Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
+ SourcePosition last_position = SourcePosition::Unknown();
+ Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
+ int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
+ RelocInfo::ModeMask(RelocInfo::POSITION);
+ for (RelocIterator it(code, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->pc() >= pc) return DeoptInfo(last_position, NULL, last_reason);
+ if (info->rmode() == RelocInfo::POSITION) {
+ int raw_position = static_cast<int>(info->data());
+ last_position = raw_position ? SourcePosition::FromRaw(raw_position)
+ : SourcePosition::Unknown();
+ } else if (info->rmode() == RelocInfo::DEOPT_REASON) {
+ last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
+ }
+ }
+ return DeoptInfo(SourcePosition::Unknown(), NULL, Deoptimizer::kNoReason);
+}
+
+
+// static
+TranslatedValue TranslatedValue::NewArgumentsObject(TranslatedState* container,
+ int length,
+ int object_index) {
+ TranslatedValue slot(container, kArgumentsObject);
+ slot.materialization_info_ = {object_index, length};
+ return slot;
+}
+
+
+// static
+TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container,
+ int length,
+ int object_index) {
+ TranslatedValue slot(container, kCapturedObject);
+ slot.materialization_info_ = {object_index, length};
+ return slot;
+}
+
+
+// static
+TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container,
+ int id) {
+ TranslatedValue slot(container, kDuplicatedObject);
+ slot.materialization_info_ = {id, -1};
+ return slot;
+}
+
+
+// static
+TranslatedValue TranslatedValue::NewDouble(TranslatedState* container,
+ double value) {
+ TranslatedValue slot(container, kDouble);
+ slot.double_value_ = value;
+ return slot;
+}
+
+
+// static
+TranslatedValue TranslatedValue::NewInt32(TranslatedState* container,
+ int32_t value) {
+ TranslatedValue slot(container, kInt32);
+ slot.int32_value_ = value;
+ return slot;
+}
+
+
+// static
+TranslatedValue TranslatedValue::NewUInt32(TranslatedState* container,
+ uint32_t value) {
+ TranslatedValue slot(container, kUInt32);
+ slot.uint32_value_ = value;
+ return slot;
+}
+
+
+// static
+TranslatedValue TranslatedValue::NewBool(TranslatedState* container,
+ uint32_t value) {
+ TranslatedValue slot(container, kBoolBit);
+ slot.uint32_value_ = value;
+ return slot;
+}
+
+
+// static
+TranslatedValue TranslatedValue::NewTagged(TranslatedState* container,
+ Object* literal) {
+ TranslatedValue slot(container, kTagged);
+ slot.raw_literal_ = literal;
+ return slot;
+}
+
+
+// static
+TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) {
+ return TranslatedValue(container, kInvalid);
+}
+
+
+Isolate* TranslatedValue::isolate() const { return container_->isolate(); }
+
+
+Object* TranslatedValue::raw_literal() const {
+ DCHECK_EQ(kTagged, kind());
+ return raw_literal_;
+}
+
+
+int32_t TranslatedValue::int32_value() const {
+ DCHECK_EQ(kInt32, kind());
+ return int32_value_;
+}
+
+
+uint32_t TranslatedValue::uint32_value() const {
+ DCHECK(kind() == kUInt32 || kind() == kBoolBit);
+ return uint32_value_;
+}
+
+
+double TranslatedValue::double_value() const {
+ DCHECK_EQ(kDouble, kind());
+ return double_value_;
+}
+
+
+int TranslatedValue::object_length() const {
+ DCHECK(kind() == kArgumentsObject || kind() == kCapturedObject);
+ return materialization_info_.length_;
+}
+
+
+int TranslatedValue::object_index() const {
+ DCHECK(kind() == kArgumentsObject || kind() == kCapturedObject ||
+ kind() == kDuplicatedObject);
+ return materialization_info_.id_;
+}
+
+
+Object* TranslatedValue::GetRawValue() const {
+ // If we have a value, return it.
+ Handle<Object> result_handle;
+ if (value_.ToHandle(&result_handle)) {
+ return *result_handle;
+ }
+
+ // Otherwise, do a best effort to get the value without allocation.
+ switch (kind()) {
+ case kTagged:
+ return raw_literal();
+
+ case kInt32: {
+ bool is_smi = Smi::IsValid(int32_value());
+ if (is_smi) {
+ return Smi::FromInt(int32_value());
+ }
+ break;
+ }
+
+ case kUInt32: {
+ bool is_smi = (uint32_value() <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (is_smi) {
+ return Smi::FromInt(static_cast<int32_t>(uint32_value()));
+ }
+ break;
+ }
+
+ case kDouble: {
+ int int_value = FastD2IChecked(double_value());
+ bool is_smi = !IsMinusZero(double_value()) &&
+ double_value() == int_value && Smi::IsValid(int_value);
+ if (is_smi) {
+ return Smi::FromInt(static_cast<int32_t>(int_value));
+ }
+ break;
+ }
+
+ case kBoolBit: {
+ if (uint32_value() == 0) {
+ return isolate()->heap()->false_value();
+ } else {
+ CHECK_EQ(1U, uint32_value());
+ return isolate()->heap()->true_value();
+ }
+ }
+
+ default:
+ break;
+ }
+
+ // If we could not get the value without allocation, return the arguments
+ // marker.
+ return isolate()->heap()->arguments_marker();
+}
+
+
+Handle<Object> TranslatedValue::GetValue() {
+ Handle<Object> result;
+ // If we already have a value, then get it.
+ if (value_.ToHandle(&result)) return result;
+
+ // Otherwise we have to materialize.
+ switch (kind()) {
+ case TranslatedValue::kTagged:
+ case TranslatedValue::kInt32:
+ case TranslatedValue::kUInt32:
+ case TranslatedValue::kBoolBit:
+ case TranslatedValue::kDouble: {
+ MaterializeSimple();
+ return value_.ToHandleChecked();
+ }
+
+ case TranslatedValue::kArgumentsObject:
+ case TranslatedValue::kCapturedObject:
+ case TranslatedValue::kDuplicatedObject:
+ return container_->MaterializeObjectAt(object_index());
+
+ case TranslatedValue::kInvalid:
+ FATAL("unexpected case");
+ return Handle<Object>::null();
+ }
+
+ FATAL("internal error: value missing");
+ return Handle<Object>::null();
+}
+
+
+void TranslatedValue::MaterializeSimple() {
+ // If we already have materialized, return.
+ if (!value_.is_null()) return;
+
+ Object* raw_value = GetRawValue();
+ if (raw_value != isolate()->heap()->arguments_marker()) {
+ // We can get the value without allocation, just return it here.
+ value_ = Handle<Object>(raw_value, isolate());
+ return;
+ }
+
+ switch (kind()) {
+ case kInt32: {
+ value_ = Handle<Object>(isolate()->factory()->NewNumber(int32_value()));
+ return;
+ }
+
+ case kUInt32:
+ value_ = Handle<Object>(isolate()->factory()->NewNumber(uint32_value()));
+ return;
+
+ case kDouble:
+ value_ = Handle<Object>(isolate()->factory()->NewNumber(double_value()));
+ return;
+
+ case kCapturedObject:
+ case kDuplicatedObject:
+ case kArgumentsObject:
+ case kInvalid:
+ case kTagged:
+ case kBoolBit:
+ FATAL("internal error: unexpected materialization.");
+ break;
+ }
+}
+
+
+bool TranslatedValue::IsMaterializedObject() const {
+ switch (kind()) {
+ case kCapturedObject:
+ case kDuplicatedObject:
+ case kArgumentsObject:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+int TranslatedValue::GetChildrenCount() const {
+ if (kind() == kCapturedObject || kind() == kArgumentsObject) {
+ return object_length();
+ } else {
+ return 0;
+ }
+}
+
+
+int TranslatedState::SlotOffsetFp(int slot_index) {
+ if (slot_index >= 0) {
+ const int offset = StandardFrameConstants::kExpressionsOffset;
+ return offset - (slot_index * kPointerSize);
+ } else {
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return offset - ((slot_index + 1) * kPointerSize);
+ }
+}
+
+
+Address TranslatedState::SlotAddress(Address fp, int slot_index) {
+ return fp + SlotOffsetFp(slot_index);
+}
+
+
+uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) {
+ Address address = fp + slot_offset;
+#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
+ return Memory::uint32_at(address + kIntSize);
+#else
+ return Memory::uint32_at(address);
+#endif
+}
+
+
+void TranslatedValue::Handlify() {
+ if (kind() == kTagged) {
+ value_ = Handle<Object>(raw_literal(), isolate());
+ raw_literal_ = nullptr;
+ }
+}
+
+
+TranslatedFrame TranslatedFrame::JSFrame(BailoutId node_id,
+ SharedFunctionInfo* shared_info,
+ int height) {
+ TranslatedFrame frame(kFunction, shared_info->GetIsolate(), shared_info,
+ height);
+ frame.node_id_ = node_id;
+ return frame;
+}
+
+
+TranslatedFrame TranslatedFrame::AccessorFrame(
+ Kind kind, SharedFunctionInfo* shared_info) {
+ DCHECK(kind == kSetter || kind == kGetter);
+ return TranslatedFrame(kind, shared_info->GetIsolate(), shared_info);
+}
+
+
+TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame(
+ SharedFunctionInfo* shared_info, int height) {
+ return TranslatedFrame(kArgumentsAdaptor, shared_info->GetIsolate(),
+ shared_info, height);
+}
+
+
+TranslatedFrame TranslatedFrame::ConstructStubFrame(
+ SharedFunctionInfo* shared_info, int height) {
+ return TranslatedFrame(kConstructStub, shared_info->GetIsolate(), shared_info,
+ height);
+}
+
+
+int TranslatedFrame::GetValueCount() {
+ switch (kind()) {
+ case kFunction: {
+ int parameter_count =
+ raw_shared_info_->internal_formal_parameter_count() + 1;
+ return height_ + parameter_count + 1;
+ }
+
+ case kGetter:
+ return 2; // Function and receiver.
+
+ case kSetter:
+ return 3; // Function, receiver and the value to set.
+
+ case kArgumentsAdaptor:
+ case kConstructStub:
+ return 1 + height_;
+
+ case kCompiledStub:
+ return height_;
+
+ case kInvalid:
+ UNREACHABLE();
+ break;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+void TranslatedFrame::Handlify() {
+ if (raw_shared_info_ != nullptr) {
+ shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_);
+ raw_shared_info_ = nullptr;
+ }
+ for (auto& value : values_) {
+ value.Handlify();
+ }
+}
+
+
+TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
+ TranslationIterator* iterator, FixedArray* literal_array, Address fp,
+ JSFunction* frame_function, FILE* trace_file) {
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ switch (opcode) {
+ case Translation::JS_FRAME: {
+ BailoutId node_id = BailoutId(iterator->Next());
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ PrintF(trace_file, " reading input frame %s", name.get());
+ int arg_count = shared_info->internal_formal_parameter_count() + 1;
+ PrintF(trace_file, " => node=%d, args=%d, height=%d; inputs:\n",
+ arg_count, node_id.ToInt(), height);
+ }
+ return TranslatedFrame::JSFrame(node_id, shared_info, height);
+ }
+
+ case Translation::ARGUMENTS_ADAPTOR_FRAME: {
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ PrintF(trace_file, " reading arguments adaptor frame %s", name.get());
+ PrintF(trace_file, " => height=%d; inputs:\n", height);
+ }
+ return TranslatedFrame::ArgumentsAdaptorFrame(shared_info, height);
+ }
+
+ case Translation::CONSTRUCT_STUB_FRAME: {
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ PrintF(trace_file, " reading construct stub frame %s", name.get());
+ PrintF(trace_file, " => height=%d; inputs:\n", height);
+ }
+ return TranslatedFrame::ConstructStubFrame(shared_info, height);
+ }
+
+ case Translation::GETTER_STUB_FRAME: {
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ if (trace_file != nullptr) {
+ SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ PrintF(trace_file, " reading getter frame %s; inputs:\n", name.get());
+ }
+ return TranslatedFrame::AccessorFrame(TranslatedFrame::kGetter,
+ shared_info);
+ }
+
+ case Translation::SETTER_STUB_FRAME: {
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ if (trace_file != nullptr) {
+ SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ PrintF(trace_file, " reading setter frame %s; inputs:\n", name.get());
+ }
+ return TranslatedFrame::AccessorFrame(TranslatedFrame::kSetter,
+ shared_info);
+ }
+
+ case Translation::COMPILED_STUB_FRAME: {
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ PrintF(trace_file,
+ " reading compiler stub frame => height=%d; inputs:\n", height);
+ }
+ return TranslatedFrame::CompiledStubFrame(height,
+ literal_array->GetIsolate());
+ }
+
+ case Translation::BEGIN:
+ case Translation::DUPLICATED_OBJECT:
+ case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT:
+ case Translation::REGISTER:
+ case Translation::INT32_REGISTER:
+ case Translation::UINT32_REGISTER:
+ case Translation::BOOL_REGISTER:
+ case Translation::DOUBLE_REGISTER:
+ case Translation::STACK_SLOT:
+ case Translation::INT32_STACK_SLOT:
+ case Translation::UINT32_STACK_SLOT:
+ case Translation::BOOL_STACK_SLOT:
+ case Translation::DOUBLE_STACK_SLOT:
+ case Translation::LITERAL:
+ case Translation::JS_FRAME_FUNCTION:
+ break;
+ }
+ FATAL("We should never get here - unexpected deopt info.");
+ return TranslatedFrame::InvalidFrame();
+}
+
+
+// static
+void TranslatedFrame::AdvanceIterator(
+ std::deque<TranslatedValue>::iterator* iter) {
+ int values_to_skip = 1;
+ while (values_to_skip > 0) {
+ // Consume the current element.
+ values_to_skip--;
+ // Add all the children.
+ values_to_skip += (*iter)->GetChildrenCount();
+
+ (*iter)++;
+ }
+}
+
+
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.
-SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
- Translation::Opcode opcode,
- TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame) {
+TranslatedValue TranslatedState::CreateNextTranslatedValue(
+ int frame_index, int value_index, TranslationIterator* iterator,
+ FixedArray* literal_array, Address fp, RegisterValues* registers,
+ FILE* trace_file) {
+ disasm::NameConverter converter;
+
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
case Translation::BEGIN:
case Translation::JS_FRAME:
@@ -3243,356 +2803,376 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME:
// Peeled off before getting here.
break;
case Translation::DUPLICATED_OBJECT: {
- return SlotRef::NewDuplicateObject(iterator->Next());
+ int object_id = iterator->Next();
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "duplicated object #%d", object_id);
+ }
+ object_positions_.push_back(object_positions_[object_id]);
+ return TranslatedValue::NewDuplicateObject(this, object_id);
}
- case Translation::ARGUMENTS_OBJECT:
- return SlotRef::NewArgumentsObject(iterator->Next());
+ case Translation::ARGUMENTS_OBJECT: {
+ int arg_count = iterator->Next();
+ int object_index = static_cast<int>(object_positions_.size());
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "argumets object #%d (length = %d)", object_index,
+ arg_count);
+ }
+ object_positions_.push_back({frame_index, value_index});
+ return TranslatedValue::NewArgumentsObject(this, arg_count, object_index);
+ }
case Translation::CAPTURED_OBJECT: {
- return SlotRef::NewDeferredObject(iterator->Next());
+ int field_count = iterator->Next();
+ int object_index = static_cast<int>(object_positions_.size());
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "captured object #%d (length = %d)", object_index,
+ field_count);
+ }
+ object_positions_.push_back({frame_index, value_index});
+ return TranslatedValue::NewDeferredObject(this, field_count,
+ object_index);
}
- case Translation::REGISTER:
- case Translation::INT32_REGISTER:
- case Translation::UINT32_REGISTER:
- case Translation::BOOL_REGISTER:
- case Translation::DOUBLE_REGISTER:
- // We are at safepoint which corresponds to call. All registers are
- // saved by caller so there would be no live registers at this
- // point. Thus these translation commands should not be used.
- break;
+ case Translation::REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) return TranslatedValue::NewInvalid(this);
+ intptr_t value = registers->GetRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "0x%08" V8PRIxPTR " ; %s ", value,
+ converter.NameOfCPURegister(input_reg));
+ reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
+ }
+ return TranslatedValue::NewTagged(this, reinterpret_cast<Object*>(value));
+ }
+
+ case Translation::INT32_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) return TranslatedValue::NewInvalid(this);
+ intptr_t value = registers->GetRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIdPTR " ; %s ", value,
+ converter.NameOfCPURegister(input_reg));
+ }
+ return TranslatedValue::NewInt32(this, static_cast<int32_t>(value));
+ }
+
+ case Translation::UINT32_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) return TranslatedValue::NewInvalid(this);
+ intptr_t value = registers->GetRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIuPTR " ; %s (uint)", value,
+ converter.NameOfCPURegister(input_reg));
+ reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
+ }
+ return TranslatedValue::NewUInt32(this, static_cast<uint32_t>(value));
+ }
+
+ case Translation::BOOL_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) return TranslatedValue::NewInvalid(this);
+ intptr_t value = registers->GetRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%" V8PRIdPTR " ; %s (bool)", value,
+ converter.NameOfCPURegister(input_reg));
+ }
+ return TranslatedValue::NewBool(this, static_cast<uint32_t>(value));
+ }
+
+ case Translation::DOUBLE_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) return TranslatedValue::NewInvalid(this);
+ double value = registers->GetDoubleRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%e ; %s (bool)", value,
+ DoubleRegister::AllocationIndexToString(input_reg));
+ }
+ return TranslatedValue::NewDouble(this, value);
+ }
case Translation::STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::TAGGED);
+ int slot_offset = SlotOffsetFp(iterator->Next());
+ intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "0x%08" V8PRIxPTR " ; [fp %c %d] ", value,
+ slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+ reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
+ }
+ return TranslatedValue::NewTagged(this, reinterpret_cast<Object*>(value));
}
case Translation::INT32_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::INT32);
+ int slot_offset = SlotOffsetFp(iterator->Next());
+ uint32_t value = GetUInt32Slot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%d ; (int) [fp %c %d] ",
+ static_cast<int32_t>(value), slot_offset < 0 ? '-' : '+',
+ std::abs(slot_offset));
+ }
+ return TranslatedValue::NewInt32(this, value);
}
case Translation::UINT32_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::UINT32);
+ int slot_offset = SlotOffsetFp(iterator->Next());
+ uint32_t value = GetUInt32Slot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%u ; (uint) [fp %c %d] ", value,
+ slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+ }
+ return TranslatedValue::NewUInt32(this, value);
}
case Translation::BOOL_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::BOOLBIT);
+ int slot_offset = SlotOffsetFp(iterator->Next());
+ uint32_t value = GetUInt32Slot(fp, slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%u ; (bool) [fp %c %d] ", value,
+ slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+ }
+ return TranslatedValue::NewBool(this, value);
}
case Translation::DOUBLE_STACK_SLOT: {
- int slot_index = iterator->Next();
- Address slot_addr = SlotAddress(frame, slot_index);
- return SlotRef(slot_addr, SlotRef::DOUBLE);
+ int slot_offset = SlotOffsetFp(iterator->Next());
+ double value = ReadDoubleValue(fp + slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%e ; (double) [fp %c %d] ", value,
+ slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+ }
+ return TranslatedValue::NewDouble(this, value);
}
case Translation::LITERAL: {
int literal_index = iterator->Next();
- return SlotRef(data->GetIsolate(),
- data->LiteralArray()->get(literal_index));
+ Object* value = literal_array->get(literal_index);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "0x%08" V8PRIxPTR " ; (literal %d) ",
+ reinterpret_cast<intptr_t>(value), literal_index);
+ reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
+ }
+
+ return TranslatedValue::NewTagged(this, value);
}
- case Translation::COMPILED_STUB_FRAME:
- UNREACHABLE();
- break;
+ case Translation::JS_FRAME_FUNCTION: {
+ int slot_offset = JavaScriptFrameConstants::kFunctionOffset;
+ intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "0x%08" V8PRIxPTR " ; (frame function) ", value);
+ reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
+ }
+ return TranslatedValue::NewTagged(this, reinterpret_cast<Object*>(value));
+ }
}
FATAL("We should never get here - unexpected deopt info.");
- return SlotRef();
+ return TranslatedValue(nullptr, TranslatedValue::kInvalid);
}
-SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
- int inlined_jsframe_index,
- int formal_parameter_count)
- : current_slot_(0),
- args_length_(-1),
- first_slot_index_(-1),
- should_deoptimize_(false) {
- DisallowHeapAllocation no_gc;
-
+TranslatedState::TranslatedState(JavaScriptFrame* frame)
+ : isolate_(nullptr),
+ stack_frame_pointer_(nullptr),
+ has_adapted_arguments_(false) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- CHECK_EQ(opcode, Translation::BEGIN);
- it.Next(); // Drop frame count.
-
- stack_frame_id_ = frame->fp();
-
- int jsframe_count = it.Next();
- CHECK_GT(jsframe_count, inlined_jsframe_index);
- int jsframes_to_skip = inlined_jsframe_index;
- int number_of_slots = -1; // Number of slots inside our frame (yet unknown)
- while (number_of_slots != 0) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- bool processed = false;
- if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
- if (jsframes_to_skip == 0) {
- CHECK_EQ(Translation::NumberOfOperandsFor(opcode), 2);
-
- it.Skip(1); // literal id
- int height = it.Next();
-
- // Skip the translation command for the receiver.
- it.Skip(Translation::NumberOfOperandsFor(
- static_cast<Translation::Opcode>(it.Next())));
-
- // We reached the arguments adaptor frame corresponding to the
- // inlined function in question. Number of arguments is height - 1.
- first_slot_index_ = slot_refs_.length();
- args_length_ = height - 1;
- number_of_slots = height - 1;
- processed = true;
- }
- } else if (opcode == Translation::JS_FRAME) {
- if (jsframes_to_skip == 0) {
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
-
- // Skip the translation command for the receiver.
- it.Skip(Translation::NumberOfOperandsFor(
- static_cast<Translation::Opcode>(it.Next())));
-
- // We reached the frame corresponding to the inlined function
- // in question. Process the translation commands for the
- // arguments. Number of arguments is equal to the number of
- // format parameter count.
- first_slot_index_ = slot_refs_.length();
- args_length_ = formal_parameter_count;
- number_of_slots = formal_parameter_count;
- processed = true;
- }
- jsframes_to_skip--;
- } else if (opcode != Translation::BEGIN &&
- opcode != Translation::CONSTRUCT_STUB_FRAME &&
- opcode != Translation::GETTER_STUB_FRAME &&
- opcode != Translation::SETTER_STUB_FRAME &&
- opcode != Translation::COMPILED_STUB_FRAME) {
- slot_refs_.Add(ComputeSlotForNextArgument(opcode, &it, data, frame));
-
- if (first_slot_index_ >= 0) {
- // We have found the beginning of our frame -> make sure we count
- // the nested slots of captured objects
- number_of_slots--;
- SlotRef& slot = slot_refs_.last();
- CHECK_NE(slot.Representation(), SlotRef::ARGUMENTS_OBJECT);
- number_of_slots += slot.GetChildrenCount();
- if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
- slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
- should_deoptimize_ = true;
- }
- }
-
- processed = true;
- }
- if (!processed) {
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- }
- }
- if (should_deoptimize_) {
- List<JSFunction*> functions(2);
- frame->GetFunctions(&functions);
- Deoptimizer::DeoptimizeFunction(functions[0]);
- }
+ Init(frame->fp(), frame->function(), &it, data->LiteralArray(),
+ nullptr /* registers */, nullptr /* trace file */);
}
-Handle<Object> SlotRef::GetValue(Isolate* isolate) {
- switch (representation_) {
- case TAGGED: {
- Handle<Object> value(Memory::Object_at(addr_), isolate);
- if (value->IsMutableHeapNumber()) {
- HeapNumber::cast(*value)->set_map(isolate->heap()->heap_number_map());
- }
- return value;
- }
+TranslatedState::TranslatedState()
+ : isolate_(nullptr),
+ stack_frame_pointer_(nullptr),
+ has_adapted_arguments_(false) {}
- case INT32: {
-#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
- int value = Memory::int32_at(addr_ + kIntSize);
-#else
- int value = Memory::int32_at(addr_);
-#endif
- if (Smi::IsValid(value)) {
- return Handle<Object>(Smi::FromInt(value), isolate);
- } else {
- return isolate->factory()->NewNumberFromInt(value);
+
+void TranslatedState::Init(Address input_frame_pointer,
+ JSFunction* input_frame_function,
+ TranslationIterator* iterator,
+ FixedArray* literal_array, RegisterValues* registers,
+ FILE* trace_file) {
+ DCHECK(frames_.empty());
+
+ isolate_ = literal_array->GetIsolate();
+ // Read out the 'header' translation.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ CHECK(opcode == Translation::BEGIN);
+
+ int count = iterator->Next();
+ iterator->Next(); // Drop JS frames count.
+
+ frames_.reserve(count);
+
+ std::stack<int> nested_counts;
+
+ // Read the frames
+ for (int i = 0; i < count; i++) {
+ // Read the frame descriptor.
+ frames_.push_back(
+ CreateNextTranslatedFrame(iterator, literal_array, input_frame_pointer,
+ input_frame_function, trace_file));
+ TranslatedFrame& frame = frames_.back();
+
+ // Read the values.
+ int values_to_process = frame.GetValueCount();
+ while (values_to_process > 0 || !nested_counts.empty()) {
+ if (trace_file != nullptr) {
+ if (nested_counts.empty()) {
+ // For top level values, print the value number.
+ PrintF(trace_file, " %3i: ",
+ frame.GetValueCount() - values_to_process);
+ } else {
+ // Take care of indenting for nested values.
+ PrintF(trace_file, " ");
+ for (size_t j = 0; j < nested_counts.size(); j++) {
+ PrintF(trace_file, " ");
+ }
+ }
}
- }
- case UINT32: {
-#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
- uint32_t value = Memory::uint32_at(addr_ + kIntSize);
-#else
- uint32_t value = Memory::uint32_at(addr_);
-#endif
- if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
- } else {
- return isolate->factory()->NewNumber(static_cast<double>(value));
+ TranslatedValue value = CreateNextTranslatedValue(
+ i, static_cast<int>(frame.values_.size()), iterator, literal_array,
+ input_frame_pointer, registers, trace_file);
+ frame.Add(value);
+
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "\n");
}
- }
- case BOOLBIT: {
-#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
- uint32_t value = Memory::uint32_at(addr_ + kIntSize);
-#else
- uint32_t value = Memory::uint32_at(addr_);
-#endif
- if (value == 0) {
- return isolate->factory()->false_value();
+ // Update the value count and resolve the nesting.
+ values_to_process--;
+ int children_count = value.GetChildrenCount();
+ if (children_count > 0) {
+ nested_counts.push(values_to_process);
+ values_to_process = children_count;
} else {
- DCHECK_EQ(1U, value);
- return isolate->factory()->true_value();
+ while (values_to_process == 0 && !nested_counts.empty()) {
+ values_to_process = nested_counts.top();
+ nested_counts.pop();
+ }
}
}
-
- case DOUBLE: {
- double value = read_double_value(addr_);
- return isolate->factory()->NewNumber(value);
- }
-
- case LITERAL:
- return literal_;
-
- default:
- FATAL("We should never get here - unexpected deopt info.");
- return Handle<Object>::null();
}
+
+ CHECK(!iterator->HasNext() ||
+ static_cast<Translation::Opcode>(iterator->Next()) ==
+ Translation::BEGIN);
}
-void SlotRefValueBuilder::Prepare(Isolate* isolate) {
- MaterializedObjectStore* materialized_store =
- isolate->materialized_object_store();
- previously_materialized_objects_ = materialized_store->Get(stack_frame_id_);
- prev_materialized_count_ = previously_materialized_objects_.is_null()
- ? 0 : previously_materialized_objects_->length();
+void TranslatedState::Prepare(bool has_adapted_arguments,
+ Address stack_frame_pointer) {
+ for (auto& frame : frames_) frame.Handlify();
- // Skip any materialized objects of the inlined "parent" frames.
- // (Note that we still need to materialize them because they might be
- // referred to as duplicated objects.)
- while (current_slot_ < first_slot_index_) {
- GetNext(isolate, 0);
- }
- CHECK_EQ(current_slot_, first_slot_index_);
+ stack_frame_pointer_ = stack_frame_pointer;
+ has_adapted_arguments_ = has_adapted_arguments;
+
+ UpdateFromPreviouslyMaterializedObjects();
}
-Handle<Object> SlotRefValueBuilder::GetPreviouslyMaterialized(
- Isolate* isolate, int length) {
- int object_index = materialized_objects_.length();
- Handle<Object> return_value = Handle<Object>(
- previously_materialized_objects_->get(object_index), isolate);
- materialized_objects_.Add(return_value);
+Handle<Object> TranslatedState::MaterializeAt(int frame_index,
+ int* value_index) {
+ TranslatedFrame* frame = &(frames_[frame_index]);
+ DCHECK(static_cast<size_t>(*value_index) < frame->values_.size());
- // Now need to skip all the nested objects (and possibly read them from
- // the materialization store, too).
- for (int i = 0; i < length; i++) {
- SlotRef& slot = slot_refs_[current_slot_];
- current_slot_++;
-
- // We need to read all the nested objects - add them to the
- // number of objects we need to process.
- length += slot.GetChildrenCount();
-
- // Put the nested deferred/duplicate objects into our materialization
- // array.
- if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
- slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
- int nested_object_index = materialized_objects_.length();
- Handle<Object> nested_object = Handle<Object>(
- previously_materialized_objects_->get(nested_object_index),
- isolate);
- materialized_objects_.Add(nested_object);
- }
- }
-
- return return_value;
-}
-
-
-Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
- SlotRef& slot = slot_refs_[current_slot_];
- current_slot_++;
- switch (slot.Representation()) {
- case SlotRef::TAGGED:
- case SlotRef::INT32:
- case SlotRef::UINT32:
- case SlotRef::BOOLBIT:
- case SlotRef::DOUBLE:
- case SlotRef::LITERAL:
- return slot.GetValue(isolate);
-
- case SlotRef::ARGUMENTS_OBJECT: {
- // We should never need to materialize an arguments object,
- // but we still need to put something into the array
- // so that the indexing is consistent.
- materialized_objects_.Add(isolate->factory()->undefined_value());
- int length = slot.GetChildrenCount();
- for (int i = 0; i < length; ++i) {
- // We don't need the argument, just ignore it
- GetNext(isolate, lvl + 1);
+ TranslatedValue* slot = &(frame->values_[*value_index]);
+ (*value_index)++;
+
+ switch (slot->kind()) {
+ case TranslatedValue::kTagged:
+ case TranslatedValue::kInt32:
+ case TranslatedValue::kUInt32:
+ case TranslatedValue::kBoolBit:
+ case TranslatedValue::kDouble: {
+ slot->MaterializeSimple();
+ Handle<Object> value = slot->GetValue();
+ if (value->IsMutableHeapNumber()) {
+ HeapNumber::cast(*value)->set_map(isolate()->heap()->heap_number_map());
}
- return isolate->factory()->undefined_value();
+ return value;
}
- case SlotRef::DEFERRED_OBJECT: {
- int length = slot.GetChildrenCount();
- CHECK(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL ||
- slot_refs_[current_slot_].Representation() == SlotRef::TAGGED);
- int object_index = materialized_objects_.length();
- if (object_index < prev_materialized_count_) {
- return GetPreviouslyMaterialized(isolate, length);
+ case TranslatedValue::kArgumentsObject: {
+ int length = slot->GetChildrenCount();
+ Handle<JSObject> arguments;
+ if (GetAdaptedArguments(&arguments, frame_index)) {
+ // Store the materialized object and consume the nested values.
+ for (int i = 0; i < length; ++i) {
+ MaterializeAt(frame_index, value_index);
+ }
+ } else {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(frame->front().GetValue());
+ arguments = isolate_->factory()->NewArgumentsObject(function, length);
+ Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
+ DCHECK_EQ(array->length(), length);
+ arguments->set_elements(*array);
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = MaterializeAt(frame_index, value_index);
+ array->set(i, *value);
+ }
+ }
+ slot->value_ = arguments;
+ return arguments;
+ }
+ case TranslatedValue::kCapturedObject: {
+ int length = slot->GetChildrenCount();
+
+ // The map must be a tagged object.
+ CHECK(frame->values_[*value_index].kind() == TranslatedValue::kTagged);
+
+ Handle<Object> result;
+ if (slot->value_.ToHandle(&result)) {
+ // This has been previously materialized, return the previous value.
+ // We still need to skip all the nested objects.
+ for (int i = 0; i < length; i++) {
+ MaterializeAt(frame_index, value_index);
+ }
+
+ return result;
}
- Handle<Object> map_object = slot_refs_[current_slot_].GetValue(isolate);
- Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
- Handle<Map>::cast(map_object));
- current_slot_++;
- // TODO(jarin) this should be unified with the code in
- // Deoptimizer::MaterializeNextHeapObject()
+ Handle<Object> map_object = MaterializeAt(frame_index, value_index);
+ Handle<Map> map =
+ Map::GeneralizeAllFieldRepresentations(Handle<Map>::cast(map_object));
switch (map->instance_type()) {
case MUTABLE_HEAP_NUMBER_TYPE:
case HEAP_NUMBER_TYPE: {
// Reuse the HeapNumber value directly as it is already properly
// tagged and skip materializing the HeapNumber explicitly.
- Handle<Object> object = GetNext(isolate, lvl + 1);
- materialized_objects_.Add(object);
+ Handle<Object> object = MaterializeAt(frame_index, value_index);
+ slot->value_ = object;
// On 32-bit architectures, there is an extra slot there because
// the escape analysis calculates the number of slots as
// object-size/pointer-size. To account for this, we read out
// any extra slots.
for (int i = 0; i < length - 2; i++) {
- GetNext(isolate, lvl + 1);
+ MaterializeAt(frame_index, value_index);
}
return object;
}
case JS_OBJECT_TYPE: {
Handle<JSObject> object =
- isolate->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
- materialized_objects_.Add(object);
- Handle<Object> properties = GetNext(isolate, lvl + 1);
- Handle<Object> elements = GetNext(isolate, lvl + 1);
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
+ slot->value_ = object;
+ Handle<Object> properties = MaterializeAt(frame_index, value_index);
+ Handle<Object> elements = MaterializeAt(frame_index, value_index);
object->set_properties(FixedArray::cast(*properties));
object->set_elements(FixedArrayBase::cast(*elements));
for (int i = 0; i < length - 3; ++i) {
- Handle<Object> value = GetNext(isolate, lvl + 1);
+ Handle<Object> value = MaterializeAt(frame_index, value_index);
FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
object->FastPropertyAtPut(index, *value);
}
@@ -3600,33 +3180,47 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
}
case JS_ARRAY_TYPE: {
Handle<JSArray> object =
- isolate->factory()->NewJSArray(0, map->elements_kind());
- materialized_objects_.Add(object);
- Handle<Object> properties = GetNext(isolate, lvl + 1);
- Handle<Object> elements = GetNext(isolate, lvl + 1);
- Handle<Object> length = GetNext(isolate, lvl + 1);
+ isolate_->factory()->NewJSArray(0, map->elements_kind());
+ slot->value_ = object;
+ Handle<Object> properties = MaterializeAt(frame_index, value_index);
+ Handle<Object> elements = MaterializeAt(frame_index, value_index);
+ Handle<Object> length = MaterializeAt(frame_index, value_index);
object->set_properties(FixedArray::cast(*properties));
object->set_elements(FixedArrayBase::cast(*elements));
object->set_length(*length);
return object;
}
default:
- PrintF(stderr,
- "[couldn't handle instance type %d]\n", map->instance_type());
- UNREACHABLE();
- break;
+ PrintF(stderr, "[couldn't handle instance type %d]\n",
+ map->instance_type());
+ FATAL("unreachable");
+ return Handle<Object>::null();
}
UNREACHABLE();
break;
}
- case SlotRef::DUPLICATE_OBJECT: {
- int object_index = slot.DuplicateObjectId();
- Handle<Object> object = materialized_objects_[object_index];
- materialized_objects_.Add(object);
+ case TranslatedValue::kDuplicatedObject: {
+ int object_index = slot->object_index();
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+
+ // Make sure the duplicate is refering to a previous object.
+ DCHECK(pos.frame_index_ < frame_index ||
+ (pos.frame_index_ == frame_index &&
+ pos.value_index_ < *value_index - 1));
+
+ Handle<Object> object =
+ frames_[pos.frame_index_].values_[pos.value_index_].GetValue();
+
+ // The object should have a (non-sentinel) value.
+ DCHECK(!object.is_null() &&
+ !object.is_identical_to(isolate_->factory()->arguments_marker()));
+
+ slot->value_ = object;
return object;
}
- default:
+
+ case TranslatedValue::kInvalid:
UNREACHABLE();
break;
}
@@ -3636,179 +3230,153 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
}
-void SlotRefValueBuilder::Finish(Isolate* isolate) {
- // We should have processed all the slots
- CHECK_EQ(slot_refs_.length(), current_slot_);
-
- if (should_deoptimize_ &&
- materialized_objects_.length() > prev_materialized_count_) {
- // We have materialized some new objects and they might be accessible
- // from the arguments object, so we have to store them
- // to prevent duplicate materialization.
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(
- materialized_objects_.length());
- for (int i = 0; i < materialized_objects_.length(); i++) {
- array->set(i, *(materialized_objects_.at(i)));
- }
- isolate->materialized_object_store()->Set(stack_frame_id_, array);
- }
-}
-
-
-Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
- int index = StackIdToIndex(fp);
- if (index == -1) {
- return Handle<FixedArray>::null();
- }
- Handle<FixedArray> array = GetStackEntries();
- CHECK_GT(array->length(), index);
- return Handle<FixedArray>::cast(Handle<Object>(array->get(index),
- isolate()));
-}
-
-
-void MaterializedObjectStore::Set(Address fp,
- Handle<FixedArray> materialized_objects) {
- int index = StackIdToIndex(fp);
- if (index == -1) {
- index = frame_fps_.length();
- frame_fps_.Add(fp);
- }
-
- Handle<FixedArray> array = EnsureStackEntries(index + 1);
- array->set(index, *materialized_objects);
+Handle<Object> TranslatedState::MaterializeObjectAt(int object_index) {
+ TranslatedState::ObjectPosition pos = object_positions_[object_index];
+ return MaterializeAt(pos.frame_index_, &(pos.value_index_));
}
-bool MaterializedObjectStore::Remove(Address fp) {
- int index = StackIdToIndex(fp);
- if (index == -1) {
- return false;
- }
- CHECK_GE(index, 0);
+bool TranslatedState::GetAdaptedArguments(Handle<JSObject>* result,
+ int frame_index) {
+ if (frame_index == 0) {
+ // Top level frame -> we need to go to the parent frame on the stack.
+ if (!has_adapted_arguments_) return false;
- frame_fps_.Remove(index);
- FixedArray* array = isolate()->heap()->materialized_objects();
- CHECK_LT(index, array->length());
- for (int i = index; i < frame_fps_.length(); i++) {
- array->set(i, array->get(i + 1));
+ // This is top level frame, so we need to go to the stack to get
+ // this function's argument. (Note that this relies on not inlining
+ // recursive functions!)
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(frames_[frame_index].front().GetValue());
+ *result = Handle<JSObject>::cast(Accessors::FunctionGetArguments(function));
+ return true;
+ } else {
+ TranslatedFrame* previous_frame = &(frames_[frame_index]);
+ if (previous_frame->kind() != TranslatedFrame::kArgumentsAdaptor) {
+ return false;
+ }
+ // We get the adapted arguments from the parent translation.
+ int length = previous_frame->height();
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(previous_frame->front().GetValue());
+ Handle<JSObject> arguments =
+ isolate_->factory()->NewArgumentsObject(function, length);
+ Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
+ arguments->set_elements(*array);
+ TranslatedFrame::iterator arg_iterator = previous_frame->begin();
+ arg_iterator++; // Skip function.
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = arg_iterator->GetValue();
+ array->set(i, *value);
+ arg_iterator++;
+ }
+ CHECK(arg_iterator == previous_frame->end());
+ *result = arguments;
+ return true;
}
- array->set(frame_fps_.length(), isolate()->heap()->undefined_value());
- return true;
}
-int MaterializedObjectStore::StackIdToIndex(Address fp) {
- for (int i = 0; i < frame_fps_.length(); i++) {
- if (frame_fps_[i] == fp) {
- return i;
+TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
+ int jsframe_index, int* args_count) {
+ for (size_t i = 0; i < frames_.size(); i++) {
+ if (frames_[i].kind() == TranslatedFrame::kFunction) {
+ if (jsframe_index > 0) {
+ jsframe_index--;
+ } else {
+ // We have the JS function frame, now check if it has arguments adaptor.
+ if (i > 0 &&
+ frames_[i - 1].kind() == TranslatedFrame::kArgumentsAdaptor) {
+ *args_count = frames_[i - 1].height();
+ return &(frames_[i - 1]);
+ }
+ *args_count =
+ frames_[i].shared_info()->internal_formal_parameter_count() + 1;
+ return &(frames_[i]);
+ }
}
}
- return -1;
+ return nullptr;
}
-Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
- return Handle<FixedArray>(isolate()->heap()->materialized_objects());
-}
+void TranslatedState::StoreMaterializedValuesAndDeopt() {
+ MaterializedObjectStore* materialized_store =
+ isolate_->materialized_object_store();
+ Handle<FixedArray> previously_materialized_objects =
+ materialized_store->Get(stack_frame_pointer_);
+ Handle<Object> marker = isolate_->factory()->arguments_marker();
-Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
- Handle<FixedArray> array = GetStackEntries();
- if (array->length() >= length) {
- return array;
- }
-
- int new_length = length > 10 ? length : 10;
- if (new_length < 2 * array->length()) {
- new_length = 2 * array->length();
+ int length = static_cast<int>(object_positions_.size());
+ bool new_store = false;
+ if (previously_materialized_objects.is_null()) {
+ previously_materialized_objects =
+ isolate_->factory()->NewFixedArray(length);
+ for (int i = 0; i < length; i++) {
+ previously_materialized_objects->set(i, *marker);
+ }
+ new_store = true;
}
- Handle<FixedArray> new_array =
- isolate()->factory()->NewFixedArray(new_length, TENURED);
- for (int i = 0; i < array->length(); i++) {
- new_array->set(i, array->get(i));
- }
- for (int i = array->length(); i < length; i++) {
- new_array->set(i, isolate()->heap()->undefined_value());
- }
- isolate()->heap()->public_set_materialized_objects(*new_array);
- return new_array;
-}
+ DCHECK_EQ(length, previously_materialized_objects->length());
+ bool value_changed = false;
+ for (int i = 0; i < length; i++) {
+ TranslatedState::ObjectPosition pos = object_positions_[i];
+ TranslatedValue* value_info =
+ &(frames_[pos.frame_index_].values_[pos.value_index_]);
-DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
- int frame_index,
- bool has_arguments_adaptor,
- bool has_construct_stub) {
- FrameDescription* output_frame = deoptimizer->output_[frame_index];
- function_ = output_frame->GetFunction();
- context_ = reinterpret_cast<Object*>(output_frame->GetContext());
- has_construct_stub_ = has_construct_stub;
- expression_count_ = output_frame->GetExpressionCount();
- expression_stack_ = new Object*[expression_count_];
- // Get the source position using the unoptimized code.
- Address pc = reinterpret_cast<Address>(output_frame->GetPc());
- Code* code = Code::cast(deoptimizer->isolate()->FindCodeObject(pc));
- source_position_ = code->SourcePosition(pc);
+ DCHECK(value_info->IsMaterializedObject());
- for (int i = 0; i < expression_count_; i++) {
- SetExpression(i, output_frame->GetExpression(i));
- }
+ Handle<Object> value(value_info->GetRawValue(), isolate_);
- if (has_arguments_adaptor) {
- output_frame = deoptimizer->output_[frame_index - 1];
- CHECK_EQ(output_frame->GetFrameType(), StackFrame::ARGUMENTS_ADAPTOR);
+ if (!value.is_identical_to(marker)) {
+ if (previously_materialized_objects->get(i) == *marker) {
+ previously_materialized_objects->set(i, *value);
+ value_changed = true;
+ } else {
+ DCHECK(previously_materialized_objects->get(i) == *value);
+ }
+ }
}
-
- parameters_count_ = output_frame->ComputeParametersCount();
- parameters_ = new Object*[parameters_count_];
- for (int i = 0; i < parameters_count_; i++) {
- SetParameter(i, output_frame->GetParameter(i));
+ if (new_store && value_changed) {
+ materialized_store->Set(stack_frame_pointer_,
+ previously_materialized_objects);
+ DCHECK_EQ(TranslatedFrame::kFunction, frames_[0].kind());
+ Object* const function = frames_[0].front().GetRawValue();
+ Deoptimizer::DeoptimizeFunction(JSFunction::cast(function));
}
}
-DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
- delete[] expression_stack_;
- delete[] parameters_;
-}
-
+void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
+ MaterializedObjectStore* materialized_store =
+ isolate_->materialized_object_store();
+ Handle<FixedArray> previously_materialized_objects =
+ materialized_store->Get(stack_frame_pointer_);
-void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
- v->VisitPointer(bit_cast<Object**>(&function_));
- v->VisitPointer(&context_);
- v->VisitPointers(parameters_, parameters_ + parameters_count_);
- v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
-}
+ // If we have no previously materialized objects, there is nothing to do.
+ if (previously_materialized_objects.is_null()) return;
+ Handle<Object> marker = isolate_->factory()->arguments_marker();
-const char* Deoptimizer::GetDeoptReason(DeoptReason deopt_reason) {
- DCHECK(deopt_reason < kLastDeoptReason);
-#define DEOPT_MESSAGES_TEXTS(C, T) T,
- static const char* deopt_messages_[] = {
- DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_TEXTS)};
-#undef DEOPT_MESSAGES_TEXTS
- return deopt_messages_[deopt_reason];
-}
+ int length = static_cast<int>(object_positions_.size());
+ DCHECK_EQ(length, previously_materialized_objects->length());
+ for (int i = 0; i < length; i++) {
+ // For a previously materialized objects, inject their value into the
+ // translated values.
+ if (previously_materialized_objects->get(i) != *marker) {
+ TranslatedState::ObjectPosition pos = object_positions_[i];
+ TranslatedValue* value_info =
+ &(frames_[pos.frame_index_].values_[pos.value_index_]);
+ DCHECK(value_info->IsMaterializedObject());
-Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
- SourcePosition last_position = SourcePosition::Unknown();
- Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
- int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
- RelocInfo::ModeMask(RelocInfo::POSITION);
- for (RelocIterator it(code, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->pc() >= pc) return DeoptInfo(last_position, NULL, last_reason);
- if (info->rmode() == RelocInfo::POSITION) {
- int raw_position = static_cast<int>(info->data());
- last_position = raw_position ? SourcePosition::FromRaw(raw_position)
- : SourcePosition::Unknown();
- } else if (info->rmode() == RelocInfo::DEOPT_REASON) {
- last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
+ value_info->value_ =
+ Handle<Object>(previously_materialized_objects->get(i), isolate_);
}
}
- return DeoptInfo(SourcePosition::Unknown(), NULL, Deoptimizer::kNoReason);
}
-} } // namespace v8::internal
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index d16e0558d9..ab76d41b6b 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -14,60 +14,284 @@
namespace v8 {
namespace internal {
-
-static inline double read_double_value(Address p) {
- double d;
- memcpy(&d, p, sizeof(d));
- return d;
-}
-
-
class FrameDescription;
class TranslationIterator;
class DeoptimizedFrameInfo;
+class TranslatedState;
+class RegisterValues;
-template<typename T>
-class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
+class TranslatedValue {
public:
- HeapNumberMaterializationDescriptor(T destination, double value)
- : destination_(destination), value_(value) { }
+ // Allocation-less getter of the value.
+ // Returns heap()->arguments_marker() if allocation would be
+ // necessary to get the value.
+ Object* GetRawValue() const;
+ Handle<Object> GetValue();
- T destination() const { return destination_; }
- double value() const { return value_; }
+ bool IsMaterializedObject() const;
private:
- T destination_;
- double value_;
+ friend class TranslatedState;
+ friend class TranslatedFrame;
+
+ enum Kind {
+ kInvalid,
+ kTagged,
+ kInt32,
+ kUInt32,
+ kBoolBit,
+ kDouble,
+ kCapturedObject, // Object captured by the escape analysis.
+ // The number of nested objects can be obtained
+ // with the DeferredObjectLength() method
+ // (the values of the nested objects follow
+ // this value in the depth-first order.)
+ kDuplicatedObject, // Duplicated object of a deferred object.
+ kArgumentsObject // Arguments object - only used to keep indexing
+ // in sync, it should not be materialized.
+ };
+
+ TranslatedValue(TranslatedState* container, Kind kind)
+ : kind_(kind), container_(container) {}
+ Kind kind() const { return kind_; }
+ void Handlify();
+ int GetChildrenCount() const;
+
+ static TranslatedValue NewArgumentsObject(TranslatedState* container,
+ int length, int object_index);
+ static TranslatedValue NewDeferredObject(TranslatedState* container,
+ int length, int object_index);
+ static TranslatedValue NewDuplicateObject(TranslatedState* container, int id);
+ static TranslatedValue NewDouble(TranslatedState* container, double value);
+ static TranslatedValue NewInt32(TranslatedState* container, int32_t value);
+ static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value);
+ static TranslatedValue NewBool(TranslatedState* container, uint32_t value);
+ static TranslatedValue NewTagged(TranslatedState* container, Object* literal);
+ static TranslatedValue NewInvalid(TranslatedState* container);
+
+ Isolate* isolate() const;
+ void MaterializeSimple();
+
+ Kind kind_;
+ TranslatedState* container_; // This is only needed for materialization of
+ // objects and constructing handles (to get
+ // to the isolate).
+
+ MaybeHandle<Object> value_; // Before handlification, this is always null,
+ // after materialization it is never null,
+ // in between it is only null if the value needs
+ // to be materialized.
+
+ struct MaterializedObjectInfo {
+ int id_;
+ int length_; // Applies only to kArgumentsObject or kCapturedObject kinds.
+ };
+
+ union {
+ // kind kTagged. After handlification it is always nullptr.
+ Object* raw_literal_;
+ // kind is kUInt32 or kBoolBit.
+ uint32_t uint32_value_;
+ // kind is kInt32.
+ int32_t int32_value_;
+ // kind is kDouble
+ double double_value_;
+ // kind is kDuplicatedObject or kArgumentsObject or kCapturedObject.
+ MaterializedObjectInfo materialization_info_;
+ };
+
+ // Checked accessors for the union members.
+ Object* raw_literal() const;
+ int32_t int32_value() const;
+ uint32_t uint32_value() const;
+ double double_value() const;
+ int object_length() const;
+ int object_index() const;
};
-class ObjectMaterializationDescriptor BASE_EMBEDDED {
+class TranslatedFrame {
public:
- ObjectMaterializationDescriptor(
- Address slot_address, int frame, int length, int duplicate, bool is_args)
- : slot_address_(slot_address),
- jsframe_index_(frame),
- object_length_(length),
- duplicate_object_(duplicate),
- is_arguments_(is_args) { }
-
- Address slot_address() const { return slot_address_; }
- int jsframe_index() const { return jsframe_index_; }
- int object_length() const { return object_length_; }
- int duplicate_object() const { return duplicate_object_; }
- bool is_arguments() const { return is_arguments_; }
-
- // Only used for allocated receivers in DoComputeConstructStubFrame.
- void patch_slot_address(intptr_t slot) {
- slot_address_ = reinterpret_cast<Address>(slot);
+ enum Kind {
+ kFunction,
+ kGetter,
+ kSetter,
+ kArgumentsAdaptor,
+ kConstructStub,
+ kCompiledStub,
+ kInvalid
+ };
+
+ int GetValueCount();
+
+ Kind kind() const { return kind_; }
+ BailoutId node_id() const { return node_id_; }
+ Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+ int height() const { return height_; }
+
+ class iterator {
+ public:
+ iterator& operator++() {
+ AdvanceIterator(&position_);
+ return *this;
+ }
+
+ iterator operator++(int) {
+ iterator original(position_);
+ AdvanceIterator(&position_);
+ return original;
+ }
+
+ bool operator==(const iterator& other) const {
+ return position_ == other.position_;
+ }
+ bool operator!=(const iterator& other) const { return !(*this == other); }
+
+ TranslatedValue& operator*() { return (*position_); }
+ TranslatedValue* operator->() { return &(*position_); }
+
+ private:
+ friend TranslatedFrame;
+
+ explicit iterator(std::deque<TranslatedValue>::iterator position)
+ : position_(position) {}
+
+ std::deque<TranslatedValue>::iterator position_;
+ };
+
+ typedef TranslatedValue& reference;
+ typedef TranslatedValue const& const_reference;
+
+ iterator begin() { return iterator(values_.begin()); }
+ iterator end() { return iterator(values_.end()); }
+
+ reference front() { return values_.front(); }
+ const_reference front() const { return values_.front(); }
+
+ private:
+ friend class TranslatedState;
+
+ // Constructor static methods.
+ static TranslatedFrame JSFrame(BailoutId node_id,
+ SharedFunctionInfo* shared_info, int height);
+ static TranslatedFrame AccessorFrame(Kind kind,
+ SharedFunctionInfo* shared_info);
+ static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
+ int height);
+ static TranslatedFrame ConstructStubFrame(SharedFunctionInfo* shared_info,
+ int height);
+ static TranslatedFrame CompiledStubFrame(int height, Isolate* isolate) {
+ return TranslatedFrame(kCompiledStub, isolate, nullptr, height);
}
+ static TranslatedFrame InvalidFrame() {
+ return TranslatedFrame(kInvalid, nullptr);
+ }
+
+ static void AdvanceIterator(std::deque<TranslatedValue>::iterator* iter);
+
+ TranslatedFrame(Kind kind, Isolate* isolate,
+ SharedFunctionInfo* shared_info = nullptr, int height = 0)
+ : kind_(kind),
+ node_id_(BailoutId::None()),
+ raw_shared_info_(shared_info),
+ height_(height),
+ isolate_(isolate) {}
+
+
+ void Add(const TranslatedValue& value) { values_.push_back(value); }
+ void Handlify();
+
+ Kind kind_;
+ BailoutId node_id_;
+ SharedFunctionInfo* raw_shared_info_;
+ Handle<SharedFunctionInfo> shared_info_;
+ int height_;
+ Isolate* isolate_;
+
+ typedef std::deque<TranslatedValue> ValuesContainer;
+
+ ValuesContainer values_;
+};
+
+
+// Auxiliary class for translating deoptimization values.
+// Typical usage sequence:
+//
+// 1. Construct the instance. This will involve reading out the translations
+// and resolving them to values using the supplied frame pointer and
+// machine state (registers). This phase is guaranteed not to allocate
+// and not to use any HandleScope. Any object pointers will be stored raw.
+//
+// 2. Handlify pointers. This will convert all the raw pointers to handles.
+//
+// 3. Reading out the frame values.
+//
+// Note: After the instance is constructed, it is possible to iterate over
+// the values eagerly.
+
+class TranslatedState {
+ public:
+ TranslatedState();
+ explicit TranslatedState(JavaScriptFrame* frame);
+
+ void Prepare(bool has_adapted_arguments, Address stack_frame_pointer);
+
+ // Store newly materialized values into the isolate.
+ void StoreMaterializedValuesAndDeopt();
+
+ typedef std::vector<TranslatedFrame>::iterator iterator;
+ iterator begin() { return frames_.begin(); }
+ iterator end() { return frames_.end(); }
+
+ typedef std::vector<TranslatedFrame>::const_iterator const_iterator;
+ const_iterator begin() const { return frames_.begin(); }
+ const_iterator end() const { return frames_.end(); }
+
+ std::vector<TranslatedFrame>& frames() { return frames_; }
+
+ TranslatedFrame* GetArgumentsInfoFromJSFrameIndex(int jsframe_index,
+ int* arguments_count);
+
+ Isolate* isolate() { return isolate_; }
+
+ void Init(Address input_frame_pointer, JSFunction* input_frame_function,
+ TranslationIterator* iterator, FixedArray* literal_array,
+ RegisterValues* registers, FILE* trace_file);
private:
- Address slot_address_;
- int jsframe_index_;
- int object_length_;
- int duplicate_object_;
- bool is_arguments_;
+ friend TranslatedValue;
+
+ TranslatedFrame CreateNextTranslatedFrame(TranslationIterator* iterator,
+ FixedArray* literal_array,
+ Address fp,
+ JSFunction* frame_function,
+ FILE* trace_file);
+ TranslatedValue CreateNextTranslatedValue(int frame_index, int value_index,
+ TranslationIterator* iterator,
+ FixedArray* literal_array,
+ Address fp,
+ RegisterValues* registers,
+ FILE* trace_file);
+
+ void UpdateFromPreviouslyMaterializedObjects();
+ Handle<Object> MaterializeAt(int frame_index, int* value_index);
+ Handle<Object> MaterializeObjectAt(int object_index);
+ bool GetAdaptedArguments(Handle<JSObject>* result, int frame_index);
+
+ static int SlotOffsetFp(int slot_index);
+ static Address SlotAddress(Address fp, int slot_index);
+ static uint32_t GetUInt32Slot(Address fp, int slot_index);
+
+ std::vector<TranslatedFrame> frames_;
+ Isolate* isolate_;
+ Address stack_frame_pointer_;
+ bool has_adapted_arguments_;
+
+ struct ObjectPosition {
+ int frame_index_;
+ int value_index_;
+ };
+ std::deque<ObjectPosition> object_positions_;
};
@@ -260,9 +484,6 @@ class Deoptimizer : public Malloced {
// Deoptimize all code in the given isolate.
static void DeoptimizeAll(Isolate* isolate);
- // Deoptimize code associated with the given global object.
- static void DeoptimizeGlobalObject(JSObject* object);
-
// Deoptimizes all optimized code that has been previously marked
// (via code->set_marked_for_deoptimization) and unlinks all functions that
// refer to that code.
@@ -280,10 +501,7 @@ class Deoptimizer : public Malloced {
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
void MaterializeHeapNumbersForDebuggerInspectableFrame(
- Address parameters_top,
- uint32_t parameters_size,
- Address expressions_top,
- uint32_t expressions_size,
+ int frame_index, int parameter_count, int expression_count,
DeoptimizedFrameInfo* info);
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -382,20 +600,16 @@ class Deoptimizer : public Malloced {
void DoComputeCompiledStubFrame(TranslationIterator* iterator,
int frame_index);
- // Translate object, store the result into an auxiliary array
- // (deferred_objects_tagged_values_).
- void DoTranslateObject(TranslationIterator* iterator,
- int object_index,
- int field_index);
-
- // Translate value, store the result into the given frame slot.
- void DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset);
-
- // Translate object, do not store the result anywhere (but do update
- // the deferred materialization array).
- void DoTranslateObjectAndSkip(TranslationIterator* iterator);
+ void WriteTranslatedValueToOutput(
+ TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
+ unsigned output_offset, const char* debug_hint_string = nullptr,
+ Address output_address_for_materialization = nullptr);
+ void WriteValueToOutput(Object* value, int input_index, int frame_index,
+ unsigned output_offset,
+ const char* debug_hint_string);
+ void DebugPrintOutputSlot(intptr_t value, int frame_index,
+ unsigned output_offset,
+ const char* debug_hint_string);
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
@@ -405,28 +619,6 @@ class Deoptimizer : public Malloced {
Object* ComputeLiteral(int index) const;
- void AddObjectStart(intptr_t slot_address, int argc, bool is_arguments);
- void AddObjectDuplication(intptr_t slot, int object_index);
- void AddObjectTaggedValue(intptr_t value);
- void AddObjectDoubleValue(double value);
- void AddDoubleValue(intptr_t slot_address, double value);
-
- bool ArgumentsObjectIsAdapted(int object_index) {
- ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
- int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
- return jsframe_has_adapted_arguments_[reverse_jsframe_index];
- }
-
- Handle<JSFunction> ArgumentsObjectFunction(int object_index) {
- ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
- int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
- return jsframe_functions_[reverse_jsframe_index];
- }
-
- // Helper function for heap object materialization.
- Handle<Object> MaterializeNextHeapObject();
- Handle<Object> MaterializeNextValue();
-
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@@ -484,27 +676,15 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
- // Deferred values to be materialized.
- List<Object*> deferred_objects_tagged_values_;
- List<HeapNumberMaterializationDescriptor<int> >
- deferred_objects_double_values_;
- List<ObjectMaterializationDescriptor> deferred_objects_;
- List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
-
// Key for lookup of previously materialized objects
Address stack_fp_;
- Handle<FixedArray> previously_materialized_objects_;
- int prev_materialized_count_;
-
- // Output frame information. Only used during heap object materialization.
- List<Handle<JSFunction> > jsframe_functions_;
- List<bool> jsframe_has_adapted_arguments_;
- // Materialized objects. Only used during heap object materialization.
- List<Handle<Object> >* materialized_values_;
- List<Handle<Object> >* materialized_objects_;
- int materialization_value_index_;
- int materialization_object_index_;
+ TranslatedState translated_state_;
+ struct ValueToMaterialize {
+ Address output_slot_address_;
+ TranslatedFrame::iterator value_;
+ };
+ std::vector<ValueToMaterialize> values_to_materialize_;
#ifdef DEBUG
DisallowHeapAllocation* disallow_heap_allocation_;
@@ -519,6 +699,41 @@ class Deoptimizer : public Malloced {
};
+class RegisterValues {
+ public:
+ intptr_t GetRegister(unsigned n) const {
+#if DEBUG
+ // This convoluted DCHECK is needed to work around a gcc problem that
+ // improperly detects an array bounds overflow in optimized debug builds
+ // when using a plain DCHECK.
+ if (n >= arraysize(registers_)) {
+ DCHECK(false);
+ return 0;
+ }
+#endif
+ return registers_[n];
+ }
+
+ double GetDoubleRegister(unsigned n) const {
+ DCHECK(n < arraysize(double_registers_));
+ return double_registers_[n];
+ }
+
+ void SetRegister(unsigned n, intptr_t value) {
+ DCHECK(n < arraysize(registers_));
+ registers_[n] = value;
+ }
+
+ void SetDoubleRegister(unsigned n, double value) {
+ DCHECK(n < arraysize(double_registers_));
+ double_registers_[n] = value;
+ }
+
+ intptr_t registers_[Register::kNumRegisters];
+ double double_registers_[DoubleRegister::kMaxNumRegisters];
+};
+
+
class FrameDescription {
public:
FrameDescription(uint32_t frame_size,
@@ -554,11 +769,15 @@ class FrameDescription {
return *GetFrameSlotPointer(offset);
}
- double GetDoubleFrameSlot(unsigned offset) {
- intptr_t* ptr = GetFrameSlotPointer(offset);
- return read_double_value(reinterpret_cast<Address>(ptr));
+ Address GetFramePointerAddress() {
+ int fp_offset = GetFrameSize() -
+ (ComputeParametersCount() + 1) * kPointerSize -
+ StandardFrameConstants::kCallerSPOffset;
+ return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset));
}
+ RegisterValues* GetRegisterValues() { return &register_values_; }
+
void SetFrameSlot(unsigned offset, intptr_t value) {
*GetFrameSlotPointer(offset) = value;
}
@@ -570,31 +789,19 @@ class FrameDescription {
void SetCallerConstantPool(unsigned offset, intptr_t value);
intptr_t GetRegister(unsigned n) const {
-#if DEBUG
- // This convoluted DCHECK is needed to work around a gcc problem that
- // improperly detects an array bounds overflow in optimized debug builds
- // when using a plain DCHECK.
- if (n >= arraysize(registers_)) {
- DCHECK(false);
- return 0;
- }
-#endif
- return registers_[n];
+ return register_values_.GetRegister(n);
}
double GetDoubleRegister(unsigned n) const {
- DCHECK(n < arraysize(double_registers_));
- return double_registers_[n];
+ return register_values_.GetDoubleRegister(n);
}
void SetRegister(unsigned n, intptr_t value) {
- DCHECK(n < arraysize(registers_));
- registers_[n] = value;
+ register_values_.SetRegister(n, value);
}
void SetDoubleRegister(unsigned n, double value) {
- DCHECK(n < arraysize(double_registers_));
- double_registers_[n] = value;
+ register_values_.SetDoubleRegister(n, value);
}
intptr_t GetTop() const { return top_; }
@@ -635,31 +842,27 @@ class FrameDescription {
Object* GetExpression(int index);
static int registers_offset() {
- return OFFSET_OF(FrameDescription, registers_);
+ return OFFSET_OF(FrameDescription, register_values_.registers_);
}
static int double_registers_offset() {
- return OFFSET_OF(FrameDescription, double_registers_);
+ return OFFSET_OF(FrameDescription, register_values_.double_registers_);
}
static int frame_size_offset() {
- return OFFSET_OF(FrameDescription, frame_size_);
+ return offsetof(FrameDescription, frame_size_);
}
- static int pc_offset() {
- return OFFSET_OF(FrameDescription, pc_);
- }
+ static int pc_offset() { return offsetof(FrameDescription, pc_); }
- static int state_offset() {
- return OFFSET_OF(FrameDescription, state_);
- }
+ static int state_offset() { return offsetof(FrameDescription, state_); }
static int continuation_offset() {
- return OFFSET_OF(FrameDescription, continuation_);
+ return offsetof(FrameDescription, continuation_);
}
static int frame_content_offset() {
- return OFFSET_OF(FrameDescription, frame_content_);
+ return offsetof(FrameDescription, frame_content_);
}
private:
@@ -670,8 +873,7 @@ class FrameDescription {
// the end of the structure aligned.
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
- intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kMaxNumRegisters];
+ RegisterValues register_values_;
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
@@ -776,7 +978,8 @@ class TranslationIterator BASE_EMBEDDED {
V(UINT32_STACK_SLOT) \
V(BOOL_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
- V(LITERAL)
+ V(LITERAL) \
+ V(JS_FRAME_FUNCTION)
class Translation BASE_EMBEDDED {
@@ -802,7 +1005,7 @@ class Translation BASE_EMBEDDED {
// Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
- void BeginCompiledStubFrame();
+ void BeginCompiledStubFrame(int height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
@@ -822,6 +1025,7 @@ class Translation BASE_EMBEDDED {
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
void StoreArgumentsObject(bool args_known, int args_index, int args_length);
+ void StoreJSFrameFunction();
Zone* zone() const { return zone_; }
@@ -831,9 +1035,6 @@ class Translation BASE_EMBEDDED {
static const char* StringFor(Opcode opcode);
#endif
- // A literal id which refers to the JSFunction itself.
- static const int kSelfLiteralId = -239;
-
private:
TranslationBuffer* buffer_;
int index_;
@@ -841,124 +1042,6 @@ class Translation BASE_EMBEDDED {
};
-class SlotRef BASE_EMBEDDED {
- public:
- enum SlotRepresentation {
- UNKNOWN,
- TAGGED,
- INT32,
- UINT32,
- BOOLBIT,
- DOUBLE,
- LITERAL,
- DEFERRED_OBJECT, // Object captured by the escape analysis.
- // The number of nested objects can be obtained
- // with the DeferredObjectLength() method
- // (the SlotRefs of the nested objects follow
- // this SlotRef in the depth-first order.)
- DUPLICATE_OBJECT, // Duplicated object of a deferred object.
- ARGUMENTS_OBJECT // Arguments object - only used to keep indexing
- // in sync, it should not be materialized.
- };
-
- SlotRef()
- : addr_(NULL), representation_(UNKNOWN) { }
-
- SlotRef(Address addr, SlotRepresentation representation)
- : addr_(addr), representation_(representation) { }
-
- SlotRef(Isolate* isolate, Object* literal)
- : literal_(literal, isolate), representation_(LITERAL) { }
-
- static SlotRef NewArgumentsObject(int length) {
- SlotRef slot;
- slot.representation_ = ARGUMENTS_OBJECT;
- slot.deferred_object_length_ = length;
- return slot;
- }
-
- static SlotRef NewDeferredObject(int length) {
- SlotRef slot;
- slot.representation_ = DEFERRED_OBJECT;
- slot.deferred_object_length_ = length;
- return slot;
- }
-
- SlotRepresentation Representation() { return representation_; }
-
- static SlotRef NewDuplicateObject(int id) {
- SlotRef slot;
- slot.representation_ = DUPLICATE_OBJECT;
- slot.duplicate_object_id_ = id;
- return slot;
- }
-
- int GetChildrenCount() {
- if (representation_ == DEFERRED_OBJECT ||
- representation_ == ARGUMENTS_OBJECT) {
- return deferred_object_length_;
- } else {
- return 0;
- }
- }
-
- int DuplicateObjectId() { return duplicate_object_id_; }
-
- Handle<Object> GetValue(Isolate* isolate);
-
- private:
- Address addr_;
- Handle<Object> literal_;
- SlotRepresentation representation_;
- int deferred_object_length_;
- int duplicate_object_id_;
-};
-
-class SlotRefValueBuilder BASE_EMBEDDED {
- public:
- SlotRefValueBuilder(
- JavaScriptFrame* frame,
- int inlined_frame_index,
- int formal_parameter_count);
-
- void Prepare(Isolate* isolate);
- Handle<Object> GetNext(Isolate* isolate, int level);
- void Finish(Isolate* isolate);
-
- int args_length() { return args_length_; }
-
- private:
- List<Handle<Object> > materialized_objects_;
- Handle<FixedArray> previously_materialized_objects_;
- int prev_materialized_count_;
- Address stack_frame_id_;
- List<SlotRef> slot_refs_;
- int current_slot_;
- int args_length_;
- int first_slot_index_;
- bool should_deoptimize_;
-
- static SlotRef ComputeSlotForNextArgument(
- Translation::Opcode opcode,
- TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
-
- Handle<Object> GetPreviouslyMaterialized(Isolate* isolate, int length);
-
- static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
- if (slot_index >= 0) {
- const int offset = JavaScriptFrameConstants::kLocal0Offset;
- return frame->fp() + offset - (slot_index * kPointerSize);
- } else {
- const int offset = JavaScriptFrameConstants::kLastParameterOffset;
- return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
- }
- }
-
- Handle<Object> GetDeferredObject(Isolate* isolate);
-};
-
class MaterializedObjectStore {
public:
explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {
@@ -1058,6 +1141,7 @@ class DeoptimizedFrameInfo : public Malloced {
friend class Deoptimizer;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DEOPTIMIZER_H_
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index fbdda54646..47e506d112 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -100,8 +100,8 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
int num_const = d.ConstantPoolSizeAt(pc);
if (num_const >= 0) {
SNPrintF(decode_buffer,
- "%08x constant pool begin",
- *reinterpret_cast<int32_t*>(pc));
+ "%08x constant pool begin (num_const = %d)",
+ *reinterpret_cast<int32_t*>(pc), num_const);
constants = num_const;
pc += 4;
} else if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
@@ -295,4 +295,5 @@ int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
#endif // ENABLE_DISASSEMBLER
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/diy-fp.cc b/deps/v8/src/diy-fp.cc
index b705df0e67..b64f3407f8 100644
--- a/deps/v8/src/diy-fp.cc
+++ b/deps/v8/src/diy-fp.cc
@@ -33,4 +33,5 @@ void DiyFp::Multiply(const DiyFp& other) {
f_ = result_f;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/dtoa.cc
index fc02aca299..76993cf650 100644
--- a/deps/v8/src/dtoa.cc
+++ b/deps/v8/src/dtoa.cc
@@ -79,4 +79,5 @@ void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
buffer[*length] = '\0';
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 0ebc6dc246..758b80ddbd 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -44,7 +44,8 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
return kPointerSizeLog2;
}
UNREACHABLE();
@@ -54,8 +55,14 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
- return IsExternalArrayElementsKind(elements_kind)
- ? 0 : (FixedArray::kHeaderSize - kHeapObjectTag);
+
+ if (IsExternalArrayElementsKind(elements_kind)) {
+ return 0;
+ } else if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ return FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
+ } else {
+ return FixedArray::kHeaderSize - kHeapObjectTag;
+ }
}
@@ -128,21 +135,6 @@ ElementsKind GetNextTransitionElementsKind(ElementsKind kind) {
}
-ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
- bool allow_only_packed) {
- DCHECK(IsFastElementsKind(elements_kind));
- DCHECK(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
- while (true) {
- elements_kind = GetNextTransitionElementsKind(elements_kind);
- if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
- return elements_kind;
- }
- }
- UNREACHABLE();
- return TERMINAL_FAST_ELEMENTS_KIND;
-}
-
-
static bool IsTypedArrayElementsKind(ElementsKind elements_kind) {
return IsFixedTypedArrayElementsKind(elements_kind) ||
IsExternalArrayElementsKind(elements_kind);
@@ -195,4 +187,5 @@ bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index fb973411ec..b7d169b82e 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -28,7 +28,10 @@ enum ElementsKind {
// The "slow" kind.
DICTIONARY_ELEMENTS,
- SLOPPY_ARGUMENTS_ELEMENTS,
+
+ FAST_SLOPPY_ARGUMENTS_ELEMENTS,
+ SLOW_SLOPPY_ARGUMENTS_ELEMENTS,
+
// The "fast" kind for external arrays
EXTERNAL_INT8_ELEMENTS,
EXTERNAL_UINT8_ELEMENTS,
@@ -88,7 +91,8 @@ inline bool IsDictionaryElementsKind(ElementsKind kind) {
inline bool IsSloppyArgumentsElements(ElementsKind kind) {
- return kind == SLOPPY_ARGUMENTS_ELEMENTS;
+ return kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS ||
+ kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
}
@@ -117,7 +121,8 @@ inline bool IsFastElementsKind(ElementsKind kind) {
inline bool IsTransitionElementsKind(ElementsKind kind) {
- return IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind);
+ return IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind) ||
+ kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
@@ -239,19 +244,6 @@ inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
}
-ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
- bool allow_only_packed);
-
-
-inline bool CanTransitionToMoreGeneralFastElementsKind(
- ElementsKind elements_kind,
- bool allow_only_packed) {
- return IsFastElementsKind(elements_kind) &&
- (elements_kind != TERMINAL_FAST_ELEMENTS_KIND &&
- (!allow_only_packed || elements_kind != FAST_ELEMENTS));
-}
-
-
} } // namespace v8::internal
#endif // V8_ELEMENTS_KIND_H_
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index ce32cb2898..e830d7c465 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -47,12 +47,17 @@
// - FixedUint8ClampedElementsAccessor
// - DictionaryElementsAccessor
// - SloppyArgumentsElementsAccessor
+// - FastSloppyArgumentsElementsAccessor
+// - SlowSloppyArgumentsElementsAccessor
namespace v8 {
namespace internal {
+namespace {
+
+
static const int kPackedSizeNotKnown = -1;
@@ -61,48 +66,45 @@ static const int kPackedSizeNotKnown = -1;
// fast element handler for smi-only arrays. The implementation is currently
// identical. Note that the order must match that of the ElementsKind enum for
// the |accessor_array[]| below to work.
-#define ELEMENTS_LIST(V) \
- V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \
- V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \
- FixedArray) \
- V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
- V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
- V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \
- FixedDoubleArray) \
- V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
- FixedDoubleArray) \
- V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
- SeededNumberDictionary) \
- V(SloppyArgumentsElementsAccessor, SLOPPY_ARGUMENTS_ELEMENTS, \
- FixedArray) \
- V(ExternalInt8ElementsAccessor, EXTERNAL_INT8_ELEMENTS, \
- ExternalInt8Array) \
- V(ExternalUint8ElementsAccessor, \
- EXTERNAL_UINT8_ELEMENTS, ExternalUint8Array) \
- V(ExternalInt16ElementsAccessor, EXTERNAL_INT16_ELEMENTS, \
- ExternalInt16Array) \
- V(ExternalUint16ElementsAccessor, \
- EXTERNAL_UINT16_ELEMENTS, ExternalUint16Array) \
- V(ExternalInt32ElementsAccessor, EXTERNAL_INT32_ELEMENTS, \
- ExternalInt32Array) \
- V(ExternalUint32ElementsAccessor, \
- EXTERNAL_UINT32_ELEMENTS, ExternalUint32Array) \
- V(ExternalFloat32ElementsAccessor, \
- EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array) \
- V(ExternalFloat64ElementsAccessor, \
- EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array) \
- V(ExternalUint8ClampedElementsAccessor, \
- EXTERNAL_UINT8_CLAMPED_ELEMENTS, \
- ExternalUint8ClampedArray) \
- V(FixedUint8ElementsAccessor, UINT8_ELEMENTS, FixedUint8Array) \
- V(FixedInt8ElementsAccessor, INT8_ELEMENTS, FixedInt8Array) \
- V(FixedUint16ElementsAccessor, UINT16_ELEMENTS, FixedUint16Array) \
- V(FixedInt16ElementsAccessor, INT16_ELEMENTS, FixedInt16Array) \
- V(FixedUint32ElementsAccessor, UINT32_ELEMENTS, FixedUint32Array) \
- V(FixedInt32ElementsAccessor, INT32_ELEMENTS, FixedInt32Array) \
- V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \
- V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \
- V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
+#define ELEMENTS_LIST(V) \
+ V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \
+ V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, FixedArray) \
+ V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
+ V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
+ V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
+ V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
+ FixedDoubleArray) \
+ V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, SeededNumberDictionary) \
+ V(FastSloppyArgumentsElementsAccessor, FAST_SLOPPY_ARGUMENTS_ELEMENTS, \
+ FixedArray) \
+ V(SlowSloppyArgumentsElementsAccessor, SLOW_SLOPPY_ARGUMENTS_ELEMENTS, \
+ FixedArray) \
+ V(ExternalInt8ElementsAccessor, EXTERNAL_INT8_ELEMENTS, ExternalInt8Array) \
+ V(ExternalUint8ElementsAccessor, EXTERNAL_UINT8_ELEMENTS, \
+ ExternalUint8Array) \
+ V(ExternalInt16ElementsAccessor, EXTERNAL_INT16_ELEMENTS, \
+ ExternalInt16Array) \
+ V(ExternalUint16ElementsAccessor, EXTERNAL_UINT16_ELEMENTS, \
+ ExternalUint16Array) \
+ V(ExternalInt32ElementsAccessor, EXTERNAL_INT32_ELEMENTS, \
+ ExternalInt32Array) \
+ V(ExternalUint32ElementsAccessor, EXTERNAL_UINT32_ELEMENTS, \
+ ExternalUint32Array) \
+ V(ExternalFloat32ElementsAccessor, EXTERNAL_FLOAT32_ELEMENTS, \
+ ExternalFloat32Array) \
+ V(ExternalFloat64ElementsAccessor, EXTERNAL_FLOAT64_ELEMENTS, \
+ ExternalFloat64Array) \
+ V(ExternalUint8ClampedElementsAccessor, EXTERNAL_UINT8_CLAMPED_ELEMENTS, \
+ ExternalUint8ClampedArray) \
+ V(FixedUint8ElementsAccessor, UINT8_ELEMENTS, FixedUint8Array) \
+ V(FixedInt8ElementsAccessor, INT8_ELEMENTS, FixedInt8Array) \
+ V(FixedUint16ElementsAccessor, UINT16_ELEMENTS, FixedUint16Array) \
+ V(FixedInt16ElementsAccessor, INT16_ELEMENTS, FixedInt16Array) \
+ V(FixedUint32ElementsAccessor, UINT32_ELEMENTS, FixedUint32Array) \
+ V(FixedInt32ElementsAccessor, INT32_ELEMENTS, FixedInt32Array) \
+ V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \
+ V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \
+ V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
FixedUint8ClampedArray)
@@ -121,20 +123,13 @@ ELEMENTS_LIST(ELEMENTS_TRAITS)
#undef ELEMENTS_TRAITS
-ElementsAccessor** ElementsAccessor::elements_accessors_ = NULL;
-
-
-static bool HasKey(Handle<FixedArray> array, Handle<Object> key_handle) {
+static bool HasIndex(Handle<FixedArray> array, Handle<Object> index_handle) {
DisallowHeapAllocation no_gc;
- Object* key = *key_handle;
+ Object* index = *index_handle;
int len0 = array->length();
for (int i = 0; i < len0; i++) {
Object* element = array->get(i);
- if (element->IsSmi() && element == key) return true;
- if (element->IsString() &&
- key->IsString() && String::cast(element)->Equals(String::cast(key))) {
- return true;
- }
+ if (index->KeyEquals(element)) return true;
}
return false;
}
@@ -493,46 +488,6 @@ static void TraceTopFrame(Isolate* isolate) {
}
-void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t key,
- bool allow_appending) {
- DisallowHeapAllocation no_allocation;
- Object* raw_length = NULL;
- const char* elements_type = "array";
- if (obj->IsJSArray()) {
- JSArray* array = JSArray::cast(*obj);
- raw_length = array->length();
- } else {
- raw_length = Smi::FromInt(obj->elements()->length());
- elements_type = "object";
- }
-
- if (raw_length->IsNumber()) {
- double n = raw_length->Number();
- if (FastI2D(FastD2UI(n)) == n) {
- int32_t int32_length = DoubleToInt32(n);
- uint32_t compare_length = static_cast<uint32_t>(int32_length);
- if (allow_appending) compare_length++;
- if (key >= compare_length) {
- PrintF("[OOB %s %s (%s length = %d, element accessed = %d) in ",
- elements_type, op, elements_type,
- static_cast<int>(int32_length),
- static_cast<int>(key));
- TraceTopFrame(obj->GetIsolate());
- PrintF("]\n");
- }
- } else {
- PrintF("[%s elements length not integer value in ", elements_type);
- TraceTopFrame(obj->GetIsolate());
- PrintF("]\n");
- }
- } else {
- PrintF("[%s elements length not a number in ", elements_type);
- TraceTopFrame(obj->GetIsolate());
- PrintF("]\n");
- }
-}
-
-
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@@ -553,14 +508,14 @@ void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t key,
template <typename ElementsAccessorSubclass,
typename ElementsTraitsParam>
class ElementsAccessorBase : public ElementsAccessor {
- protected:
+ public:
explicit ElementsAccessorBase(const char* name)
: ElementsAccessor(name) { }
typedef ElementsTraitsParam ElementsTraits;
typedef typename ElementsTraitsParam::BackingStore BackingStore;
- ElementsKind kind() const final { return ElementsTraits::Kind; }
+ static ElementsKind kind() { return ElementsTraits::Kind; }
static void ValidateContents(Handle<JSObject> holder, int length) {
}
@@ -587,106 +542,147 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateImpl(holder);
}
- static bool HasElementImpl(Handle<JSObject> holder, uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- return ElementsAccessorSubclass::GetAttributesImpl(holder, key,
- backing_store) != ABSENT;
- }
-
- virtual bool HasElement(Handle<JSObject> holder, uint32_t key,
+ virtual bool HasElement(Handle<JSObject> holder, uint32_t index,
Handle<FixedArrayBase> backing_store) final {
- return ElementsAccessorSubclass::HasElementImpl(holder, key, backing_store);
+ return ElementsAccessorSubclass::GetEntryForIndexImpl(
+ *holder, *backing_store, index) != kMaxUInt32;
}
- MUST_USE_RESULT virtual MaybeHandle<Object> Get(
- Handle<Object> receiver, Handle<JSObject> holder, uint32_t key,
- Handle<FixedArrayBase> backing_store) final {
+ virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t index,
+ Handle<FixedArrayBase> backing_store) final {
if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
FLAG_trace_js_array_abuse) {
- CheckArrayAbuse(holder, "elements read", key);
+ CheckArrayAbuse(holder, "elements read", index);
}
if (IsExternalArrayElementsKind(ElementsTraits::Kind) &&
FLAG_trace_external_array_abuse) {
- CheckArrayAbuse(holder, "external elements read", key);
+ CheckArrayAbuse(holder, "external elements read", index);
}
- return ElementsAccessorSubclass::GetImpl(
- receiver, holder, key, backing_store);
+ return ElementsAccessorSubclass::GetImpl(holder, index, backing_store);
}
- MUST_USE_RESULT static MaybeHandle<Object> GetImpl(
- Handle<Object> receiver,
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- if (key < ElementsAccessorSubclass::GetCapacityImpl(obj, backing_store)) {
- return BackingStore::get(Handle<BackingStore>::cast(backing_store), key);
+ static Handle<Object> GetImpl(Handle<JSObject> obj, uint32_t index,
+ Handle<FixedArrayBase> backing_store) {
+ if (index <
+ ElementsAccessorSubclass::GetCapacityImpl(*obj, *backing_store)) {
+ return BackingStore::get(Handle<BackingStore>::cast(backing_store),
+ index);
} else {
return backing_store->GetIsolate()->factory()->the_hole_value();
}
}
- MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Handle<JSObject> holder, uint32_t key,
- Handle<FixedArrayBase> backing_store) final {
- return ElementsAccessorSubclass::GetAttributesImpl(holder, key,
- backing_store);
+ virtual void Set(FixedArrayBase* backing_store, uint32_t index,
+ Object* value) final {
+ ElementsAccessorSubclass::SetImpl(backing_store, index, value);
}
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- if (key >= ElementsAccessorSubclass::GetCapacityImpl(obj, backing_store)) {
- return ABSENT;
- }
- return
- Handle<BackingStore>::cast(backing_store)->is_the_hole(key)
- ? ABSENT : NONE;
+ static void SetImpl(FixedArrayBase* backing_store, uint32_t index,
+ Object* value) {
+ BackingStore::cast(backing_store)->SetValue(index, value);
}
- MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair(
- Handle<JSObject> holder, uint32_t key,
- Handle<FixedArrayBase> backing_store) final {
- return ElementsAccessorSubclass::GetAccessorPairImpl(holder, key,
- backing_store);
+ virtual void Reconfigure(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) final {
+ ElementsAccessorSubclass::ReconfigureImpl(object, store, entry, value,
+ attributes);
}
- MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- return MaybeHandle<AccessorPair>();
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ UNREACHABLE();
}
- MUST_USE_RESULT virtual MaybeHandle<Object> SetLength(
- Handle<JSArray> array, Handle<Object> length) final {
- return ElementsAccessorSubclass::SetLengthImpl(
- array, length, handle(array->elements()));
+ virtual void Add(Handle<JSObject> object, uint32_t entry,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) final {
+ ElementsAccessorSubclass::AddImpl(object, entry, value, attributes,
+ new_capacity);
}
- MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl(
- Handle<JSObject> obj,
- Handle<Object> length,
- Handle<FixedArrayBase> backing_store);
+ static void AddImpl(Handle<JSObject> object, uint32_t entry,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ UNREACHABLE();
+ }
+
+ virtual void SetLength(Handle<JSArray> array, uint32_t length) final {
+ ElementsAccessorSubclass::SetLengthImpl(array, length,
+ handle(array->elements()));
+ }
+
+ static void SetLengthImpl(Handle<JSArray> array, uint32_t length,
+ Handle<FixedArrayBase> backing_store);
+
+ static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
+ ElementsKind from_kind, uint32_t capacity) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArrayBase> elements;
+ if (IsFastDoubleElementsKind(kind())) {
+ elements = isolate->factory()->NewFixedDoubleArray(capacity);
+ } else {
+ elements = isolate->factory()->NewUninitializedFixedArray(capacity);
+ }
+
+ int packed = kPackedSizeNotKnown;
+ if (IsFastPackedElementsKind(from_kind) && object->IsJSArray()) {
+ packed = Smi::cast(JSArray::cast(*object)->length())->value();
+ }
- virtual void SetCapacityAndLength(Handle<JSArray> array, int capacity,
- int length) final {
- ElementsAccessorSubclass::
- SetFastElementsCapacityAndLength(array, capacity, length);
+ ElementsAccessorSubclass::CopyElementsImpl(
+ *old_elements, 0, *elements, from_kind, 0, packed,
+ ElementsAccessor::kCopyToEndAndInitializeToHole);
+ return elements;
+ }
+
+ static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
+ ElementsKind from_kind = object->GetElementsKind();
+ if (IsFastSmiOrObjectElementsKind(from_kind)) {
+ // Array optimizations rely on the prototype lookups of Array objects
+ // always returning undefined. If there is a store to the initial
+ // prototype object, make sure all of these optimizations are invalidated.
+ object->GetIsolate()->UpdateArrayProtectorOnSetLength(object);
+ }
+ Handle<FixedArrayBase> old_elements(object->elements());
+ // This method should only be called if there's a reason to update the
+ // elements.
+ DCHECK(IsFastDoubleElementsKind(from_kind) !=
+ IsFastDoubleElementsKind(kind()) ||
+ IsDictionaryElementsKind(from_kind) ||
+ static_cast<uint32_t>(old_elements->length()) < capacity);
+ Handle<FixedArrayBase> elements =
+ ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
+
+ ElementsKind to_kind = kind();
+ if (IsHoleyElementsKind(from_kind)) to_kind = GetHoleyElementsKind(to_kind);
+ Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, to_kind);
+ JSObject::SetMapAndElements(object, new_map, elements);
+
+ // Transition through the allocation site as well if present.
+ JSObject::UpdateAllocationSite(object, to_kind);
+
+ if (FLAG_trace_elements_transitions) {
+ JSObject::PrintElementsTransition(stdout, object, from_kind, old_elements,
+ to_kind, elements);
+ }
}
- static void SetFastElementsCapacityAndLength(
- Handle<JSObject> obj,
- int capacity,
- int length) {
- UNIMPLEMENTED();
+ virtual void GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) final {
+ ElementsAccessorSubclass::GrowCapacityAndConvertImpl(object, capacity);
}
- MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> obj, uint32_t key,
- LanguageMode language_mode) override = 0;
+ virtual void Delete(Handle<JSObject> obj, uint32_t entry) final {
+ ElementsAccessorSubclass::DeleteImpl(obj, entry);
+ }
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
FixedArrayBase* to, ElementsKind from_kind,
@@ -737,9 +733,11 @@ class ElementsAccessorBase : public ElementsAccessor {
from, from_start, *to, from_kind, to_start, packed_size, copy_size);
}
- virtual MaybeHandle<FixedArray> AddElementsToFixedArray(
- Handle<Object> receiver, Handle<JSObject> holder, Handle<FixedArray> to,
- Handle<FixedArrayBase> from, FixedArray::KeyFilter filter) final {
+ virtual Handle<FixedArray> AddElementsToFixedArray(
+ Handle<JSObject> receiver, Handle<FixedArray> to,
+ FixedArray::KeyFilter filter) final {
+ Handle<FixedArrayBase> from(receiver->elements());
+
int len0 = to->length();
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
@@ -751,7 +749,7 @@ class ElementsAccessorBase : public ElementsAccessor {
// Optimize if 'other' is empty.
// We cannot optimize if 'this' is empty, as other may have holes.
- uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(holder, from);
+ uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(*receiver, *from);
if (len1 == 0) return to;
Isolate* isolate = from->GetIsolate();
@@ -759,19 +757,19 @@ class ElementsAccessorBase : public ElementsAccessor {
// Compute how many elements are not in other.
uint32_t extra = 0;
for (uint32_t y = 0; y < len1; y++) {
- uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
- if (ElementsAccessorSubclass::HasElementImpl(holder, key, from)) {
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value,
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, from),
- FixedArray);
+ if (ElementsAccessorSubclass::HasEntryImpl(*from, y)) {
+ uint32_t index =
+ ElementsAccessorSubclass::GetIndexForEntryImpl(*from, y);
+ Handle<Object> value =
+ ElementsAccessorSubclass::GetImpl(receiver, index, from);
DCHECK(!value->IsTheHole());
+ DCHECK(!value->IsAccessorPair());
+ DCHECK(!value->IsExecutableAccessorInfo());
if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
continue;
}
- if (!HasKey(to, value)) {
+ if (!HasIndex(to, value)) {
extra++;
}
}
@@ -793,48 +791,71 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
// Fill in the extra values.
- uint32_t index = 0;
+ uint32_t entry = 0;
for (uint32_t y = 0; y < len1; y++) {
- uint32_t key =
- ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
- if (ElementsAccessorSubclass::HasElementImpl(holder, key, from)) {
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value,
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, from),
- FixedArray);
+ if (ElementsAccessorSubclass::HasEntryImpl(*from, y)) {
+ uint32_t index =
+ ElementsAccessorSubclass::GetIndexForEntryImpl(*from, y);
+ Handle<Object> value =
+ ElementsAccessorSubclass::GetImpl(receiver, index, from);
+ DCHECK(!value->IsAccessorPair());
+ DCHECK(!value->IsExecutableAccessorInfo());
if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
continue;
}
- if (!value->IsTheHole() && !HasKey(to, value)) {
- result->set(len0 + index, *value);
- index++;
+ if (!value->IsTheHole() && !HasIndex(to, value)) {
+ result->set(len0 + entry, *value);
+ entry++;
}
}
}
- DCHECK(extra == index);
+ DCHECK(extra == entry);
return result;
}
- protected:
- static uint32_t GetCapacityImpl(Handle<JSObject> holder,
- Handle<FixedArrayBase> backing_store) {
+ static uint32_t GetCapacityImpl(JSObject* holder,
+ FixedArrayBase* backing_store) {
return backing_store->length();
}
- uint32_t GetCapacity(Handle<JSObject> holder,
- Handle<FixedArrayBase> backing_store) final {
+ uint32_t GetCapacity(JSObject* holder, FixedArrayBase* backing_store) final {
return ElementsAccessorSubclass::GetCapacityImpl(holder, backing_store);
}
- static uint32_t GetKeyForIndexImpl(Handle<FixedArrayBase> backing_store,
- uint32_t index) {
- return index;
+ static bool HasEntryImpl(FixedArrayBase* backing_store, uint32_t entry) {
+ return true;
}
- virtual uint32_t GetKeyForIndex(Handle<FixedArrayBase> backing_store,
- uint32_t index) final {
- return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase* backing_store,
+ uint32_t entry) {
+ return entry;
+ }
+
+ static uint32_t GetEntryForIndexImpl(JSObject* holder,
+ FixedArrayBase* backing_store,
+ uint32_t index) {
+ return index < ElementsAccessorSubclass::GetCapacityImpl(holder,
+ backing_store) &&
+ !BackingStore::cast(backing_store)->is_the_hole(index)
+ ? index
+ : kMaxUInt32;
+ }
+
+ virtual uint32_t GetEntryForIndex(JSObject* holder,
+ FixedArrayBase* backing_store,
+ uint32_t index) final {
+ return ElementsAccessorSubclass::GetEntryForIndexImpl(holder, backing_store,
+ index);
+ }
+
+ static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
+ uint32_t entry) {
+ return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+ }
+
+ virtual PropertyDetails GetDetails(FixedArrayBase* backing_store,
+ uint32_t entry) final {
+ return ElementsAccessorSubclass::GetDetailsImpl(backing_store, entry);
}
private:
@@ -842,6 +863,168 @@ class ElementsAccessorBase : public ElementsAccessor {
};
+class DictionaryElementsAccessor
+ : public ElementsAccessorBase<DictionaryElementsAccessor,
+ ElementsKindTraits<DICTIONARY_ELEMENTS> > {
+ public:
+ explicit DictionaryElementsAccessor(const char* name)
+ : ElementsAccessorBase<DictionaryElementsAccessor,
+ ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
+
+ static void SetLengthImpl(Handle<JSArray> array, uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
+ Handle<SeededNumberDictionary> dict =
+ Handle<SeededNumberDictionary>::cast(backing_store);
+ Isolate* isolate = array->GetIsolate();
+ int capacity = dict->Capacity();
+ uint32_t old_length = 0;
+ CHECK(array->length()->ToArrayLength(&old_length));
+ if (length < old_length) {
+ if (dict->requires_slow_elements()) {
+ // Find last non-deletable element in range of elements to be
+ // deleted and adjust range accordingly.
+ for (int entry = 0; entry < capacity; entry++) {
+ DisallowHeapAllocation no_gc;
+ Object* index = dict->KeyAt(entry);
+ if (index->IsNumber()) {
+ uint32_t number = static_cast<uint32_t>(index->Number());
+ if (length <= number && number < old_length) {
+ PropertyDetails details = dict->DetailsAt(entry);
+ if (!details.IsConfigurable()) length = number + 1;
+ }
+ }
+ }
+ }
+
+ if (length == 0) {
+ // Flush the backing store.
+ JSObject::ResetElements(array);
+ } else {
+ DisallowHeapAllocation no_gc;
+ // Remove elements that should be deleted.
+ int removed_entries = 0;
+ Handle<Object> the_hole_value = isolate->factory()->the_hole_value();
+ for (int entry = 0; entry < capacity; entry++) {
+ Object* index = dict->KeyAt(entry);
+ if (index->IsNumber()) {
+ uint32_t number = static_cast<uint32_t>(index->Number());
+ if (length <= number && number < old_length) {
+ dict->SetEntry(entry, the_hole_value, the_hole_value);
+ removed_entries++;
+ }
+ }
+ }
+
+ // Update the number of elements.
+ dict->ElementsRemoved(removed_entries);
+ }
+ }
+
+ Handle<Object> length_obj = isolate->factory()->NewNumberFromUint(length);
+ array->set_length(*length_obj);
+ }
+
+ static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
+ FixedArrayBase* to, ElementsKind from_kind,
+ uint32_t to_start, int packed_size,
+ int copy_size) {
+ UNREACHABLE();
+ }
+
+
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ // TODO(verwaest): Remove reliance on index in Shrink.
+ Handle<SeededNumberDictionary> dict(
+ SeededNumberDictionary::cast(obj->elements()));
+ uint32_t index = GetIndexForEntryImpl(*dict, entry);
+ Handle<Object> result = SeededNumberDictionary::DeleteProperty(dict, entry);
+ USE(result);
+ DCHECK(result->IsTrue());
+ Handle<FixedArray> new_elements =
+ SeededNumberDictionary::Shrink(dict, index);
+ obj->set_elements(*new_elements);
+ }
+
+ static Handle<Object> GetImpl(Handle<JSObject> obj, uint32_t index,
+ Handle<FixedArrayBase> store) {
+ Handle<SeededNumberDictionary> backing_store =
+ Handle<SeededNumberDictionary>::cast(store);
+ Isolate* isolate = backing_store->GetIsolate();
+ int entry = backing_store->FindEntry(index);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ return handle(backing_store->ValueAt(entry), isolate);
+ }
+ return isolate->factory()->the_hole_value();
+ }
+
+ static void SetImpl(FixedArrayBase* store, uint32_t index, Object* value) {
+ SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
+ int entry = dictionary->FindEntry(index);
+ DCHECK_NE(SeededNumberDictionary::kNotFound, entry);
+ dictionary->ValueAtPut(entry, value);
+ }
+
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(*store);
+ if (attributes != NONE) dictionary->set_requires_slow_elements();
+ dictionary->ValueAtPut(entry, *value);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ details = PropertyDetails(attributes, DATA, details.dictionary_index(),
+ PropertyCellType::kNoCell);
+ dictionary->DetailsAtPut(entry, details);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t entry,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ Handle<SeededNumberDictionary> dictionary =
+ object->HasFastElements()
+ ? JSObject::NormalizeElements(object)
+ : handle(SeededNumberDictionary::cast(object->elements()));
+ Handle<SeededNumberDictionary> new_dictionary =
+ SeededNumberDictionary::AddNumberEntry(dictionary, entry, value,
+ details);
+ if (attributes != NONE) new_dictionary->set_requires_slow_elements();
+ if (dictionary.is_identical_to(new_dictionary)) return;
+ object->set_elements(*new_dictionary);
+ }
+
+ static bool HasEntryImpl(FixedArrayBase* store, uint32_t entry) {
+ DisallowHeapAllocation no_gc;
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
+ Object* index = dict->KeyAt(entry);
+ return !index->IsTheHole();
+ }
+
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase* store, uint32_t entry) {
+ DisallowHeapAllocation no_gc;
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
+ uint32_t result = 0;
+ CHECK(dict->KeyAt(entry)->ToArrayIndex(&result));
+ return result;
+ }
+
+ static uint32_t GetEntryForIndexImpl(JSObject* holder, FixedArrayBase* store,
+ uint32_t index) {
+ DisallowHeapAllocation no_gc;
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
+ int entry = dict->FindEntry(index);
+ return entry == SeededNumberDictionary::kNotFound
+ ? kMaxUInt32
+ : static_cast<uint32_t>(entry);
+ }
+
+ static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
+ uint32_t entry) {
+ return SeededNumberDictionary::cast(backing_store)->DetailsAt(entry);
+ }
+};
+
+
// Super class for all fast element arrays.
template<typename FastElementsAccessorSubclass,
typename KindTraits>
@@ -851,134 +1034,131 @@ class FastElementsAccessor
explicit FastElementsAccessor(const char* name)
: ElementsAccessorBase<FastElementsAccessorSubclass,
KindTraits>(name) {}
- protected:
- friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
- friend class SloppyArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore;
- // Adjusts the length of the fast backing store.
- static Handle<Object> SetLengthWithoutNormalize(
- Handle<FixedArrayBase> backing_store,
- Handle<JSArray> array,
- Handle<Object> length_object,
- uint32_t length) {
- Isolate* isolate = array->GetIsolate();
- uint32_t old_capacity = backing_store->length();
- Handle<Object> old_length(array->length(), isolate);
- bool same_or_smaller_size = old_length->IsSmi() &&
- static_cast<uint32_t>(Handle<Smi>::cast(old_length)->value()) >= length;
- ElementsKind kind = array->GetElementsKind();
-
- if (!same_or_smaller_size && IsFastElementsKind(kind) &&
- !IsFastHoleyElementsKind(kind)) {
- kind = GetHoleyElementsKind(kind);
- JSObject::TransitionElementsKind(array, kind);
+ static void DeleteAtEnd(Handle<JSObject> obj,
+ Handle<BackingStore> backing_store, uint32_t entry) {
+ uint32_t length = static_cast<uint32_t>(backing_store->length());
+ Heap* heap = obj->GetHeap();
+ for (; entry > 0; entry--) {
+ if (!backing_store->is_the_hole(entry - 1)) break;
}
-
- // Check whether the backing store should be shrunk.
- if (length <= old_capacity) {
- if (array->HasFastSmiOrObjectElements()) {
- backing_store = JSObject::EnsureWritableFastElements(array);
- }
- if (2 * length <= old_capacity) {
- // If more than half the elements won't be used, trim the array.
- if (length == 0) {
- array->initialize_elements();
- } else {
- isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- *backing_store, old_capacity - length);
- }
+ if (entry == 0) {
+ FixedArray* empty = heap->empty_fixed_array();
+ if (obj->HasFastArgumentsElements()) {
+ FixedArray::cast(obj->elements())->set(1, empty);
} else {
- // Otherwise, fill the unused tail with holes.
- int old_length = FastD2IChecked(array->length()->Number());
- for (int i = length; i < old_length; i++) {
- Handle<BackingStore>::cast(backing_store)->set_the_hole(i);
- }
+ obj->set_elements(empty);
}
- return length_object;
+ return;
}
- // Check whether the backing store should be expanded.
- uint32_t min = JSObject::NewElementsCapacity(old_capacity);
- uint32_t new_capacity = length > min ? length : min;
- FastElementsAccessorSubclass::SetFastElementsCapacityAndLength(
- array, new_capacity, length);
- JSObject::ValidateElements(array);
- return length_object;
+ heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*backing_store,
+ length - entry);
}
- static Handle<Object> DeleteCommon(Handle<JSObject> obj, uint32_t key,
- LanguageMode language_mode) {
+ static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
+ Handle<FixedArrayBase> store) {
DCHECK(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
- Isolate* isolate = obj->GetIsolate();
- Heap* heap = obj->GetHeap();
- Handle<FixedArrayBase> elements(obj->elements());
- if (*elements == heap->empty_fixed_array()) {
- return isolate->factory()->true_value();
+ Handle<BackingStore> backing_store = Handle<BackingStore>::cast(store);
+ if (!obj->IsJSArray() &&
+ entry == static_cast<uint32_t>(store->length()) - 1) {
+ DeleteAtEnd(obj, backing_store, entry);
+ return;
}
- Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
- bool is_sloppy_arguments_elements_map =
- backing_store->map() == heap->sloppy_arguments_elements_map();
- if (is_sloppy_arguments_elements_map) {
- backing_store = handle(
- BackingStore::cast(Handle<FixedArray>::cast(backing_store)->get(1)),
- isolate);
+
+ backing_store->set_the_hole(entry);
+
+ // TODO(verwaest): Move this out of elements.cc.
+ // If an old space backing store is larger than a certain size and
+ // has too few used values, normalize it.
+ // To avoid doing the check on every delete we require at least
+ // one adjacent hole to the value being deleted.
+ const int kMinLengthForSparsenessCheck = 64;
+ if (backing_store->length() < kMinLengthForSparsenessCheck) return;
+ if (backing_store->GetHeap()->InNewSpace(*backing_store)) return;
+ uint32_t length = 0;
+ if (obj->IsJSArray()) {
+ JSArray::cast(*obj)->length()->ToArrayLength(&length);
+ } else {
+ length = static_cast<uint32_t>(store->length());
}
- uint32_t length = static_cast<uint32_t>(
- obj->IsJSArray()
- ? Smi::cast(Handle<JSArray>::cast(obj)->length())->value()
- : backing_store->length());
- if (key < length) {
- if (!is_sloppy_arguments_elements_map) {
- ElementsKind kind = KindTraits::Kind;
- if (IsFastPackedElementsKind(kind)) {
- JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind));
+ if ((entry > 0 && backing_store->is_the_hole(entry - 1)) ||
+ (entry + 1 < length && backing_store->is_the_hole(entry + 1))) {
+ if (!obj->IsJSArray()) {
+ uint32_t i;
+ for (i = entry + 1; i < length; i++) {
+ if (!backing_store->is_the_hole(i)) break;
}
- if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
- Handle<Object> writable = JSObject::EnsureWritableFastElements(obj);
- backing_store = Handle<BackingStore>::cast(writable);
+ if (i == length) {
+ DeleteAtEnd(obj, backing_store, entry);
+ return;
}
}
- backing_store->set_the_hole(key);
- // If an old space backing store is larger than a certain size and
- // has too few used values, normalize it.
- // To avoid doing the check on every delete we require at least
- // one adjacent hole to the value being deleted.
- const int kMinLengthForSparsenessCheck = 64;
- if (backing_store->length() >= kMinLengthForSparsenessCheck &&
- !heap->InNewSpace(*backing_store) &&
- ((key > 0 && backing_store->is_the_hole(key - 1)) ||
- (key + 1 < length && backing_store->is_the_hole(key + 1)))) {
- int num_used = 0;
- for (int i = 0; i < backing_store->length(); ++i) {
- if (!backing_store->is_the_hole(i)) ++num_used;
- // Bail out early if more than 1/4 is used.
- if (4 * num_used > backing_store->length()) break;
- }
- if (4 * num_used <= backing_store->length()) {
- JSObject::NormalizeElements(obj);
- }
+ int num_used = 0;
+ for (int i = 0; i < backing_store->length(); ++i) {
+ if (!backing_store->is_the_hole(i)) ++num_used;
+ // Bail out early if more than 1/4 is used.
+ if (4 * num_used > backing_store->length()) break;
+ }
+ if (4 * num_used <= backing_store->length()) {
+ JSObject::NormalizeElements(obj);
}
}
- return isolate->factory()->true_value();
}
- virtual MaybeHandle<Object> Delete(Handle<JSObject> obj, uint32_t key,
- LanguageMode language_mode) final {
- return DeleteCommon(obj, key, language_mode);
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<SeededNumberDictionary> dictionary =
+ JSObject::NormalizeElements(object);
+ entry = dictionary->FindEntry(entry);
+ DictionaryElementsAccessor::ReconfigureImpl(object, dictionary, entry,
+ value, attributes);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t entry,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ DCHECK_EQ(NONE, attributes);
+ ElementsKind from_kind = object->GetElementsKind();
+ ElementsKind to_kind = FastElementsAccessorSubclass::kind();
+ if (IsDictionaryElementsKind(from_kind) ||
+ IsFastDoubleElementsKind(from_kind) !=
+ IsFastDoubleElementsKind(to_kind) ||
+ FastElementsAccessorSubclass::GetCapacityImpl(
+ *object, object->elements()) != new_capacity) {
+ FastElementsAccessorSubclass::GrowCapacityAndConvertImpl(object,
+ new_capacity);
+ } else {
+ if (from_kind != to_kind) {
+ JSObject::TransitionElementsKind(object, to_kind);
+ }
+ if (IsFastSmiOrObjectElementsKind(from_kind)) {
+ DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
+ JSObject::EnsureWritableFastElements(object);
+ }
+ }
+ FastElementsAccessorSubclass::SetImpl(object->elements(), entry, *value);
}
- static bool HasElementImpl(
- Handle<JSObject> holder,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- if (key >= static_cast<uint32_t>(backing_store->length())) {
- return false;
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ ElementsKind kind = KindTraits::Kind;
+ if (IsFastPackedElementsKind(kind)) {
+ JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind));
}
- return !Handle<BackingStore>::cast(backing_store)->is_the_hole(key);
+ if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
+ JSObject::EnsureWritableFastElements(obj);
+ }
+ DeleteCommon(obj, entry, handle(obj->elements()));
+ }
+
+ static bool HasEntryImpl(FixedArrayBase* backing_store, uint32_t entry) {
+ return !BackingStore::cast(backing_store)->is_the_hole(entry);
}
static void ValidateContents(Handle<JSObject> holder, int length) {
@@ -996,44 +1176,18 @@ class FastElementsAccessor
if (length == 0) return; // nothing to do!
DisallowHeapAllocation no_gc;
Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
- for (int i = 0; i < length; i++) {
- DCHECK((!IsFastSmiElementsKind(KindTraits::Kind) ||
- BackingStore::get(backing_store, i)->IsSmi()) ||
- (IsFastHoleyElementsKind(KindTraits::Kind) ==
- backing_store->is_the_hole(i)));
+ if (IsFastSmiElementsKind(KindTraits::Kind)) {
+ for (int i = 0; i < length; i++) {
+ DCHECK(BackingStore::get(backing_store, i)->IsSmi() ||
+ (IsFastHoleyElementsKind(KindTraits::Kind) &&
+ backing_store->is_the_hole(i)));
+ }
}
#endif
}
};
-static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) {
- switch (array->map()->instance_type()) {
- case FIXED_ARRAY_TYPE:
- if (array->IsDictionary()) {
- return DICTIONARY_ELEMENTS;
- } else {
- return FAST_HOLEY_ELEMENTS;
- }
- case FIXED_DOUBLE_ARRAY_TYPE:
- return FAST_HOLEY_DOUBLE_ELEMENTS;
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ARRAY_TYPE: \
- return EXTERNAL_##TYPE##_ELEMENTS; \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- return TYPE##_ELEMENTS;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- default:
- UNREACHABLE();
- }
- return FAST_HOLEY_ELEMENTS;
-}
-
-
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastSmiOrObjectElementsAccessor
@@ -1073,17 +1227,9 @@ class FastSmiOrObjectElementsAccessor
CopyDictionaryToObjectElements(from, from_start, to, to_kind, to_start,
copy_size);
break;
- case SLOPPY_ARGUMENTS_ELEMENTS: {
- // TODO(verwaest): This is a temporary hack to support extending
- // SLOPPY_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
- // This case should be UNREACHABLE().
- FixedArray* parameter_map = FixedArray::cast(from);
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
- ElementsKind from_kind = ElementsKindForArray(arguments);
- CopyElementsImpl(arguments, from_start, to, from_kind,
- to_start, packed_size, copy_size);
- break;
- }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS: \
@@ -1092,19 +1238,6 @@ class FastSmiOrObjectElementsAccessor
#undef TYPED_ARRAY_CASE
}
}
-
-
- static void SetFastElementsCapacityAndLength(
- Handle<JSObject> obj,
- uint32_t capacity,
- uint32_t length) {
- JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
- obj->HasFastSmiElements()
- ? JSObject::kAllowSmiElements
- : JSObject::kDontAllowSmiElements;
- JSObject::SetFastElementsCapacityAndLength(
- obj, capacity, length, set_capacity_mode);
- }
};
@@ -1165,13 +1298,6 @@ class FastDoubleElementsAccessor
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits>(name) {}
- static void SetFastElementsCapacityAndLength(Handle<JSObject> obj,
- uint32_t capacity,
- uint32_t length) {
- JSObject::SetFastDoubleElementsCapacityAndLength(obj, capacity, length);
- }
-
- protected:
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
FixedArrayBase* to, ElementsKind from_kind,
uint32_t to_start, int packed_size,
@@ -1197,7 +1323,8 @@ class FastDoubleElementsAccessor
CopyDictionaryToDoubleElements(from, from_start, to, to_start,
copy_size);
break;
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
@@ -1216,8 +1343,6 @@ class FastPackedDoubleElementsAccessor
FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> > {
public:
- friend class ElementsAccessorBase<FastPackedDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
explicit FastPackedDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
@@ -1230,9 +1355,6 @@ class FastHoleyDoubleElementsAccessor
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> > {
public:
- friend class ElementsAccessorBase<
- FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >;
explicit FastHoleyDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastHoleyDoubleElementsAccessor,
@@ -1250,57 +1372,45 @@ class TypedElementsAccessor
: ElementsAccessorBase<AccessorClass,
ElementsKindTraits<Kind> >(name) {}
- protected:
typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
typedef TypedElementsAccessor<Kind> AccessorClass;
- friend class ElementsAccessorBase<AccessorClass,
- ElementsKindTraits<Kind> >;
-
- MUST_USE_RESULT static MaybeHandle<Object> GetImpl(
- Handle<Object> receiver,
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- if (key < AccessorClass::GetCapacityImpl(obj, backing_store)) {
- return BackingStore::get(Handle<BackingStore>::cast(backing_store), key);
+ static Handle<Object> GetImpl(Handle<JSObject> obj, uint32_t index,
+ Handle<FixedArrayBase> backing_store) {
+ if (index < AccessorClass::GetCapacityImpl(*obj, *backing_store)) {
+ return BackingStore::get(Handle<BackingStore>::cast(backing_store),
+ index);
} else {
return backing_store->GetIsolate()->factory()->undefined_value();
}
}
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- return key < AccessorClass::GetCapacityImpl(obj, backing_store) ? NONE
- : ABSENT;
+ static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
+ uint32_t entry) {
+ return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
}
- MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl(
- Handle<JSObject> obj,
- Handle<Object> length,
- Handle<FixedArrayBase> backing_store) {
+ static void SetLengthImpl(Handle<JSArray> array, uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
- return obj;
}
- MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> obj, uint32_t key, LanguageMode language_mode) final {
- // External arrays always ignore deletes.
- return obj->GetIsolate()->factory()->true_value();
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ UNREACHABLE();
}
- static bool HasElementImpl(Handle<JSObject> holder, uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- uint32_t capacity = AccessorClass::GetCapacityImpl(holder, backing_store);
- return key < capacity;
+ static uint32_t GetEntryForIndexImpl(JSObject* holder,
+ FixedArrayBase* backing_store,
+ uint32_t index) {
+ return index < AccessorClass::GetCapacityImpl(holder, backing_store)
+ ? index
+ : kMaxUInt32;
}
- static uint32_t GetCapacityImpl(Handle<JSObject> holder,
- Handle<FixedArrayBase> backing_store) {
- Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(holder);
+ static uint32_t GetCapacityImpl(JSObject* holder,
+ FixedArrayBase* backing_store) {
+ JSArrayBufferView* view = JSArrayBufferView::cast(holder);
if (view->WasNeutered()) return 0;
return backing_store->length();
}
@@ -1323,480 +1433,428 @@ TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
#undef FIXED_ELEMENTS_ACCESSOR
-
-class DictionaryElementsAccessor
- : public ElementsAccessorBase<DictionaryElementsAccessor,
- ElementsKindTraits<DICTIONARY_ELEMENTS> > {
+template <typename SloppyArgumentsElementsAccessorSubclass,
+ typename ArgumentsAccessor, typename KindTraits>
+class SloppyArgumentsElementsAccessor
+ : public ElementsAccessorBase<SloppyArgumentsElementsAccessorSubclass,
+ KindTraits> {
public:
- explicit DictionaryElementsAccessor(const char* name)
- : ElementsAccessorBase<DictionaryElementsAccessor,
- ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
-
- // Adjusts the length of the dictionary backing store and returns the new
- // length according to ES5 section 15.4.5.2 behavior.
- static Handle<Object> SetLengthWithoutNormalize(
- Handle<FixedArrayBase> store,
- Handle<JSArray> array,
- Handle<Object> length_object,
- uint32_t length) {
- Handle<SeededNumberDictionary> dict =
- Handle<SeededNumberDictionary>::cast(store);
- Isolate* isolate = array->GetIsolate();
- int capacity = dict->Capacity();
- uint32_t new_length = length;
- uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
- if (new_length < old_length) {
- // Find last non-deletable element in range of elements to be
- // deleted and adjust range accordingly.
- for (int i = 0; i < capacity; i++) {
- DisallowHeapAllocation no_gc;
- Object* key = dict->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(key->Number());
- if (new_length <= number && number < old_length) {
- PropertyDetails details = dict->DetailsAt(i);
- if (!details.IsConfigurable()) new_length = number + 1;
- }
- }
- }
- if (new_length != length) {
- length_object = isolate->factory()->NewNumberFromUint(new_length);
- }
- }
+ explicit SloppyArgumentsElementsAccessor(const char* name)
+ : ElementsAccessorBase<SloppyArgumentsElementsAccessorSubclass,
+ KindTraits>(name) {}
- if (new_length == 0) {
- // Flush the backing store.
- JSObject::ResetElements(array);
- } else {
+ static Handle<Object> GetImpl(Handle<JSObject> obj, uint32_t index,
+ Handle<FixedArrayBase> parameters) {
+ Isolate* isolate = obj->GetIsolate();
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
+ Handle<Object> probe(GetParameterMapArg(*parameter_map, index), isolate);
+ if (!probe->IsTheHole()) {
DisallowHeapAllocation no_gc;
- // Remove elements that should be deleted.
- int removed_entries = 0;
- Handle<Object> the_hole_value = isolate->factory()->the_hole_value();
- for (int i = 0; i < capacity; i++) {
- Object* key = dict->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(key->Number());
- if (new_length <= number && number < old_length) {
- dict->SetEntry(i, the_hole_value, the_hole_value);
- removed_entries++;
- }
- }
+ Context* context = Context::cast(parameter_map->get(0));
+ int context_entry = Handle<Smi>::cast(probe)->value();
+ DCHECK(!context->get(context_entry)->IsTheHole());
+ return handle(context->get(context_entry), isolate);
+ } else {
+ // Object is not mapped, defer to the arguments.
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)),
+ isolate);
+ Handle<Object> result = ArgumentsAccessor::GetImpl(obj, index, arguments);
+ // Elements of the arguments object in slow mode might be slow aliases.
+ if (result->IsAliasedArgumentsEntry()) {
+ DisallowHeapAllocation no_gc;
+ AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*result);
+ Context* context = Context::cast(parameter_map->get(0));
+ int context_entry = entry->aliased_context_slot();
+ DCHECK(!context->get(context_entry)->IsTheHole());
+ return handle(context->get(context_entry), isolate);
+ } else {
+ return result;
}
-
- // Update the number of elements.
- dict->ElementsRemoved(removed_entries);
}
- return length_object;
}
- MUST_USE_RESULT static MaybeHandle<Object> DeleteCommon(
- Handle<JSObject> obj, uint32_t key, LanguageMode language_mode) {
- Isolate* isolate = obj->GetIsolate();
- Handle<FixedArray> backing_store(FixedArray::cast(obj->elements()),
- isolate);
- bool is_arguments =
- (obj->GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS);
- if (is_arguments) {
- backing_store = handle(FixedArray::cast(backing_store->get(1)), isolate);
- }
- Handle<SeededNumberDictionary> dictionary =
- Handle<SeededNumberDictionary>::cast(backing_store);
- int entry = dictionary->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- Handle<Object> result =
- SeededNumberDictionary::DeleteProperty(dictionary, entry);
- if (*result == *isolate->factory()->false_value()) {
- if (is_strict(language_mode)) {
- // Deleting a non-configurable property in strict mode.
- Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
- Handle<Object> args[2] = { name, obj };
- THROW_NEW_ERROR(isolate, NewTypeError("strict_delete_property",
- HandleVector(args, 2)),
- Object);
- }
- return isolate->factory()->false_value();
- }
- Handle<FixedArray> new_elements =
- SeededNumberDictionary::Shrink(dictionary, key);
+ static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
+ UNREACHABLE();
+ }
- if (is_arguments) {
- FixedArray::cast(obj->elements())->set(1, *new_elements);
- } else {
- obj->set_elements(*new_elements);
- }
+ static void SetImpl(FixedArrayBase* store, uint32_t index, Object* value) {
+ FixedArray* parameter_map = FixedArray::cast(store);
+ Object* probe = GetParameterMapArg(parameter_map, index);
+ if (!probe->IsTheHole()) {
+ Context* context = Context::cast(parameter_map->get(0));
+ int context_entry = Smi::cast(probe)->value();
+ DCHECK(!context->get(context_entry)->IsTheHole());
+ context->set(context_entry, value);
+ } else {
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ ArgumentsAccessor::SetImpl(arguments, index, value);
}
- return isolate->factory()->true_value();
}
- static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
- FixedArrayBase* to, ElementsKind from_kind,
- uint32_t to_start, int packed_size,
- int copy_size) {
+ static void SetLengthImpl(Handle<JSArray> array, uint32_t length,
+ Handle<FixedArrayBase> parameter_map) {
+ // Sloppy arguments objects are not arrays.
UNREACHABLE();
}
+ static uint32_t GetCapacityImpl(JSObject* holder,
+ FixedArrayBase* backing_store) {
+ FixedArray* parameter_map = FixedArray::cast(backing_store);
+ FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ return parameter_map->length() - 2 +
+ ArgumentsAccessor::GetCapacityImpl(holder, arguments);
+ }
- protected:
- friend class ElementsAccessorBase<DictionaryElementsAccessor,
- ElementsKindTraits<DICTIONARY_ELEMENTS> >;
+ static bool HasEntryImpl(FixedArrayBase* parameters, uint32_t entry) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
+ uint32_t length = parameter_map->length() - 2;
+ if (entry < length) {
+ return !GetParameterMapArg(parameter_map, entry)->IsTheHole();
+ }
- MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> obj, uint32_t key, LanguageMode language_mode) final {
- return DeleteCommon(obj, key, language_mode);
+ FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ return ArgumentsAccessor::HasEntryImpl(arguments, entry - length);
}
- MUST_USE_RESULT static MaybeHandle<Object> GetImpl(
- Handle<Object> receiver,
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> store) {
- Handle<SeededNumberDictionary> backing_store =
- Handle<SeededNumberDictionary>::cast(store);
- Isolate* isolate = backing_store->GetIsolate();
- int entry = backing_store->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- Handle<Object> element(backing_store->ValueAt(entry), isolate);
- PropertyDetails details = backing_store->DetailsAt(entry);
- if (details.type() == ACCESSOR_CONSTANT) {
- return JSObject::GetElementWithCallback(
- obj, receiver, element, key, obj);
- } else {
- return element;
- }
- }
- return isolate->factory()->the_hole_value();
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase* parameters,
+ uint32_t entry) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
+ uint32_t length = parameter_map->length() - 2;
+ if (entry < length) return entry;
+
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ArgumentsAccessor::GetIndexForEntryImpl(arguments, entry - length);
}
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- Handle<SeededNumberDictionary> dictionary =
- Handle<SeededNumberDictionary>::cast(backing_store);
- int entry = dictionary->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- return dictionary->DetailsAt(entry).attributes();
- }
- return ABSENT;
+ static uint32_t GetEntryForIndexImpl(JSObject* holder,
+ FixedArrayBase* parameters,
+ uint32_t index) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
+ Object* probe = GetParameterMapArg(parameter_map, index);
+ if (!probe->IsTheHole()) return index;
+
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ uint32_t entry =
+ ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments, index);
+ if (entry == kMaxUInt32) return entry;
+ return (parameter_map->length() - 2) + entry;
}
- MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> store) {
- Handle<SeededNumberDictionary> backing_store =
- Handle<SeededNumberDictionary>::cast(store);
- int entry = backing_store->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound &&
- backing_store->DetailsAt(entry).type() == ACCESSOR_CONSTANT &&
- backing_store->ValueAt(entry)->IsAccessorPair()) {
- return handle(AccessorPair::cast(backing_store->ValueAt(entry)));
+ static PropertyDetails GetDetailsImpl(FixedArrayBase* parameters,
+ uint32_t entry) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
+ uint32_t length = parameter_map->length() - 2;
+ if (entry < length) {
+ return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
}
- return MaybeHandle<AccessorPair>();
+ entry -= length;
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ArgumentsAccessor::GetDetailsImpl(arguments, entry);
}
- static bool HasElementImpl(Handle<JSObject> holder, uint32_t key,
- Handle<FixedArrayBase> store) {
- Handle<SeededNumberDictionary> backing_store =
- Handle<SeededNumberDictionary>::cast(store);
- return backing_store->FindEntry(key) != SeededNumberDictionary::kNotFound;
+ static Object* GetParameterMapArg(FixedArray* parameter_map, uint32_t index) {
+ uint32_t length = parameter_map->length() - 2;
+ return index < length
+ ? parameter_map->get(index + 2)
+ : Object::cast(parameter_map->GetHeap()->the_hole_value());
}
- static uint32_t GetKeyForIndexImpl(Handle<FixedArrayBase> store,
- uint32_t index) {
- DisallowHeapAllocation no_gc;
- Handle<SeededNumberDictionary> dict =
- Handle<SeededNumberDictionary>::cast(store);
- Object* key = dict->KeyAt(index);
- return Smi::cast(key)->value();
+ static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
+ FixedArray* parameter_map = FixedArray::cast(obj->elements());
+ uint32_t length = static_cast<uint32_t>(parameter_map->length()) - 2;
+ if (entry < length) {
+ // TODO(kmillikin): We could check if this was the last aliased
+ // parameter, and revert to normal elements in that case. That
+ // would enable GC of the context.
+ parameter_map->set_the_hole(entry + 2);
+ } else {
+ SloppyArgumentsElementsAccessorSubclass::DeleteFromArguments(
+ obj, entry - length);
+ }
}
};
-class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
- SloppyArgumentsElementsAccessor,
- ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> > {
+class SlowSloppyArgumentsElementsAccessor
+ : public SloppyArgumentsElementsAccessor<
+ SlowSloppyArgumentsElementsAccessor, DictionaryElementsAccessor,
+ ElementsKindTraits<SLOW_SLOPPY_ARGUMENTS_ELEMENTS> > {
public:
- explicit SloppyArgumentsElementsAccessor(const char* name)
- : ElementsAccessorBase<
- SloppyArgumentsElementsAccessor,
- ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
- protected:
- friend class ElementsAccessorBase<
- SloppyArgumentsElementsAccessor,
- ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> >;
-
- MUST_USE_RESULT static MaybeHandle<Object> GetImpl(
- Handle<Object> receiver,
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> parameters) {
- Isolate* isolate = obj->GetIsolate();
- Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
- Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- DisallowHeapAllocation no_gc;
- Context* context = Context::cast(parameter_map->get(0));
- int context_index = Handle<Smi>::cast(probe)->value();
- DCHECK(!context->get(context_index)->IsTheHole());
- return handle(context->get(context_index), isolate);
- } else {
- // Object is not mapped, defer to the arguments.
- Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)),
- isolate);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- ElementsAccessor::ForArray(arguments)->Get(
- receiver, obj, key, arguments),
- Object);
- // Elements of the arguments object in slow mode might be slow aliases.
- if (result->IsAliasedArgumentsEntry()) {
- DisallowHeapAllocation no_gc;
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*result);
- Context* context = Context::cast(parameter_map->get(0));
- int context_index = entry->aliased_context_slot();
- DCHECK(!context->get(context_index)->IsTheHole());
- return handle(context->get(context_index), isolate);
- } else {
- return result;
- }
- }
- }
+ explicit SlowSloppyArgumentsElementsAccessor(const char* name)
+ : SloppyArgumentsElementsAccessor<
+ SlowSloppyArgumentsElementsAccessor, DictionaryElementsAccessor,
+ ElementsKindTraits<SLOW_SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) {
- Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(backing_store);
- Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- return NONE;
- } else {
- // If not aliased, check the arguments.
- Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
- return ElementsAccessor::ForArray(arguments)
- ->GetAttributes(obj, key, arguments);
+ static void DeleteFromArguments(Handle<JSObject> obj, uint32_t entry) {
+ Handle<FixedArray> parameter_map(FixedArray::cast(obj->elements()));
+ Handle<SeededNumberDictionary> dict(
+ SeededNumberDictionary::cast(parameter_map->get(1)));
+ // TODO(verwaest): Remove reliance on index in Shrink.
+ uint32_t index = GetIndexForEntryImpl(*dict, entry);
+ Handle<Object> result = SeededNumberDictionary::DeleteProperty(dict, entry);
+ USE(result);
+ DCHECK(result->IsTrue());
+ Handle<FixedArray> new_elements =
+ SeededNumberDictionary::Shrink(dict, index);
+ parameter_map->set(1, *new_elements);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
+ Handle<FixedArrayBase> old_elements(
+ FixedArrayBase::cast(parameter_map->get(1)));
+ Handle<SeededNumberDictionary> dictionary =
+ old_elements->IsSeededNumberDictionary()
+ ? Handle<SeededNumberDictionary>::cast(old_elements)
+ : JSObject::NormalizeElements(object);
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ Handle<SeededNumberDictionary> new_dictionary =
+ SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
+ details);
+ if (attributes != NONE) new_dictionary->set_requires_slow_elements();
+ if (*dictionary != *new_dictionary) {
+ FixedArray::cast(object->elements())->set(1, *new_dictionary);
}
}
- MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
- Handle<JSObject> obj,
- uint32_t key,
- Handle<FixedArrayBase> parameters) {
- Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
- Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- return MaybeHandle<AccessorPair>();
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(store);
+ uint32_t length = parameter_map->length() - 2;
+ if (entry < length) {
+ Object* probe = parameter_map->get(entry + 2);
+ DCHECK(!probe->IsTheHole());
+ Context* context = Context::cast(parameter_map->get(0));
+ int context_entry = Smi::cast(probe)->value();
+ DCHECK(!context->get(context_entry)->IsTheHole());
+ context->set(context_entry, *value);
+
+ // Redefining attributes of an aliased element destroys fast aliasing.
+ parameter_map->set_the_hole(entry + 2);
+ // For elements that are still writable we re-establish slow aliasing.
+ if ((attributes & READ_ONLY) == 0) {
+ Isolate* isolate = store->GetIsolate();
+ value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
+ }
+
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ Handle<SeededNumberDictionary> arguments(
+ SeededNumberDictionary::cast(parameter_map->get(1)));
+ arguments = SeededNumberDictionary::AddNumberEntry(arguments, entry,
+ value, details);
+ parameter_map->set(1, *arguments);
} else {
- // If not aliased, check the arguments.
- Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
- return ElementsAccessor::ForArray(arguments)
- ->GetAccessorPair(obj, key, arguments);
+ Handle<FixedArrayBase> arguments(
+ FixedArrayBase::cast(parameter_map->get(1)));
+ DictionaryElementsAccessor::ReconfigureImpl(
+ object, arguments, entry - length, value, attributes);
}
}
+};
+
- MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl(
- Handle<JSObject> obj,
- Handle<Object> length,
- Handle<FixedArrayBase> parameter_map) {
- // TODO(mstarzinger): This was never implemented but will be used once we
- // correctly implement [[DefineOwnProperty]] on arrays.
- UNIMPLEMENTED();
- return obj;
+class FastSloppyArgumentsElementsAccessor
+ : public SloppyArgumentsElementsAccessor<
+ FastSloppyArgumentsElementsAccessor, FastHoleyObjectElementsAccessor,
+ ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS> > {
+ public:
+ explicit FastSloppyArgumentsElementsAccessor(const char* name)
+ : SloppyArgumentsElementsAccessor<
+ FastSloppyArgumentsElementsAccessor,
+ FastHoleyObjectElementsAccessor,
+ ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
+
+ static void DeleteFromArguments(Handle<JSObject> obj, uint32_t entry) {
+ FixedArray* parameter_map = FixedArray::cast(obj->elements());
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
+ FastHoleyObjectElementsAccessor::DeleteCommon(obj, entry, arguments);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ DCHECK_EQ(NONE, attributes);
+ Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
+ Handle<FixedArrayBase> old_elements(
+ FixedArrayBase::cast(parameter_map->get(1)));
+ if (old_elements->IsSeededNumberDictionary() ||
+ static_cast<uint32_t>(old_elements->length()) < new_capacity) {
+ GrowCapacityAndConvertImpl(object, new_capacity);
+ }
+ SetImpl(object->elements(), index, *value);
}
- MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> obj, uint32_t key, LanguageMode language_mode) final {
- Isolate* isolate = obj->GetIsolate();
- Handle<FixedArray> parameter_map(FixedArray::cast(obj->elements()));
- Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- // TODO(kmillikin): We could check if this was the last aliased
- // parameter, and revert to normal elements in that case. That
- // would enable GC of the context.
- parameter_map->set_the_hole(key + 2);
- } else {
- Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
- if (arguments->IsDictionary()) {
- return DictionaryElementsAccessor::DeleteCommon(obj, key,
- language_mode);
- } else {
- // It's difficult to access the version of DeleteCommon that is declared
- // in the templatized super class, call the concrete implementation in
- // the class for the most generalized ElementsKind subclass.
- return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key,
- language_mode);
- }
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<SeededNumberDictionary> dictionary =
+ JSObject::NormalizeElements(object);
+ FixedArray::cast(*store)->set(1, *dictionary);
+ uint32_t length = static_cast<uint32_t>(store->length()) - 2;
+ if (entry >= length) {
+ entry = dictionary->FindEntry(entry - length) + length;
}
- return isolate->factory()->true_value();
+ SlowSloppyArgumentsElementsAccessor::ReconfigureImpl(object, store, entry,
+ value, attributes);
}
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
FixedArrayBase* to, ElementsKind from_kind,
uint32_t to_start, int packed_size,
int copy_size) {
- UNREACHABLE();
- }
-
- static uint32_t GetCapacityImpl(Handle<JSObject> holder,
- Handle<FixedArrayBase> backing_store) {
- Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(backing_store);
- Handle<FixedArrayBase> arguments(
- FixedArrayBase::cast(parameter_map->get(1)));
- return Max(static_cast<uint32_t>(parameter_map->length() - 2),
- ForArray(arguments)->GetCapacity(holder, arguments));
- }
-
- static uint32_t GetKeyForIndexImpl(Handle<FixedArrayBase> dict,
- uint32_t index) {
- return index;
+ DCHECK(!to->IsDictionary());
+ if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
+ CopyDictionaryToObjectElements(from, from_start, to, FAST_HOLEY_ELEMENTS,
+ to_start, copy_size);
+ } else {
+ DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, from_kind);
+ CopyObjectToObjectElements(from, FAST_HOLEY_ELEMENTS, from_start, to,
+ FAST_HOLEY_ELEMENTS, to_start, copy_size);
+ }
}
- private:
- static Handle<Object> GetParameterMapArg(Handle<JSObject> holder,
- Handle<FixedArray> parameter_map,
- uint32_t key) {
- Isolate* isolate = holder->GetIsolate();
- uint32_t length = holder->IsJSArray()
- ? Smi::cast(Handle<JSArray>::cast(holder)->length())->value()
- : parameter_map->length();
- return key < (length - 2)
- ? handle(parameter_map->get(key + 2), isolate)
- : Handle<Object>::cast(isolate->factory()->the_hole_value());
+ static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
+ Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
+ Handle<FixedArray> old_elements(FixedArray::cast(parameter_map->get(1)));
+ ElementsKind from_kind = object->GetElementsKind();
+ // This method should only be called if there's a reason to update the
+ // elements.
+ DCHECK(from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS ||
+ static_cast<uint32_t>(old_elements->length()) < capacity);
+ Handle<FixedArrayBase> elements =
+ ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
+ Handle<Map> new_map = JSObject::GetElementsTransitionMap(
+ object, FAST_SLOPPY_ARGUMENTS_ELEMENTS);
+ JSObject::MigrateToMap(object, new_map);
+ parameter_map->set(1, *elements);
+ JSObject::ValidateElements(object);
}
};
-ElementsAccessor* ElementsAccessor::ForArray(Handle<FixedArrayBase> array) {
- return elements_accessors_[ElementsKindForArray(*array)];
-}
-
-
-void ElementsAccessor::InitializeOncePerProcess() {
- static ElementsAccessor* accessor_array[] = {
-#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind),
- ELEMENTS_LIST(ACCESSOR_ARRAY)
-#undef ACCESSOR_ARRAY
- };
-
- STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
- kElementsKindCount);
-
- elements_accessors_ = accessor_array;
-}
-
-
-void ElementsAccessor::TearDown() {
- if (elements_accessors_ == NULL) return;
-#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
- ELEMENTS_LIST(ACCESSOR_DELETE)
-#undef ACCESSOR_DELETE
- elements_accessors_ = NULL;
-}
-
-
template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-MUST_USE_RESULT
-MaybeHandle<Object> ElementsAccessorBase<ElementsAccessorSubclass,
- ElementsKindTraits>::
- SetLengthImpl(Handle<JSObject> obj,
- Handle<Object> length,
+void ElementsAccessorBase<ElementsAccessorSubclass, ElementsKindTraits>::
+ SetLengthImpl(Handle<JSArray> array, uint32_t length,
Handle<FixedArrayBase> backing_store) {
- Isolate* isolate = obj->GetIsolate();
- Handle<JSArray> array = Handle<JSArray>::cast(obj);
-
- // Fast case: The new length fits into a Smi.
- Handle<Object> smi_length;
-
- if (Object::ToSmi(isolate, length).ToHandle(&smi_length) &&
- smi_length->IsSmi()) {
- const int value = Handle<Smi>::cast(smi_length)->value();
- if (value >= 0) {
- Handle<Object> new_length = ElementsAccessorSubclass::
- SetLengthWithoutNormalize(backing_store, array, smi_length, value);
- DCHECK(!new_length.is_null());
-
- // even though the proposed length was a smi, new_length could
- // still be a heap number because SetLengthWithoutNormalize doesn't
- // allow the array length property to drop below the index of
- // non-deletable elements.
- DCHECK(new_length->IsSmi() || new_length->IsHeapNumber() ||
- new_length->IsUndefined());
- if (new_length->IsSmi()) {
- array->set_length(*Handle<Smi>::cast(new_length));
- return array;
- } else if (new_length->IsHeapNumber()) {
- array->set_length(*new_length);
- return array;
- }
+ DCHECK(!array->SetLengthWouldNormalize(length));
+ DCHECK(IsFastElementsKind(array->GetElementsKind()));
+ uint32_t old_length = 0;
+ CHECK(array->length()->ToArrayIndex(&old_length));
+
+ if (old_length < length) {
+ ElementsKind kind = array->GetElementsKind();
+ if (!IsFastHoleyElementsKind(kind)) {
+ kind = GetHoleyElementsKind(kind);
+ JSObject::TransitionElementsKind(array, kind);
+ }
+ }
+
+ // Check whether the backing store should be shrunk.
+ uint32_t capacity = backing_store->length();
+ if (length == 0) {
+ array->initialize_elements();
+ } else if (length <= capacity) {
+ if (array->HasFastSmiOrObjectElements()) {
+ backing_store = JSObject::EnsureWritableFastElements(array);
+ }
+ if (2 * length <= capacity) {
+ // If more than half the elements won't be used, trim the array.
+ array->GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
+ *backing_store, capacity - length);
} else {
- return ThrowArrayLengthRangeError(isolate);
+ // Otherwise, fill the unused tail with holes.
+ for (uint32_t i = length; i < old_length; i++) {
+ BackingStore::cast(*backing_store)->set_the_hole(i);
+ }
}
+ } else {
+ // Check whether the backing store should be expanded.
+ capacity = Max(length, JSObject::NewElementsCapacity(capacity));
+ ElementsAccessorSubclass::GrowCapacityAndConvertImpl(array, capacity);
}
- // Slow case: The new length does not fit into a Smi or conversion
- // to slow elements is needed for other reasons.
- if (length->IsNumber()) {
- uint32_t value;
- if (length->ToArrayIndex(&value)) {
- Handle<SeededNumberDictionary> dictionary =
- JSObject::NormalizeElements(array);
- DCHECK(!dictionary.is_null());
+ array->set_length(Smi::FromInt(length));
+ JSObject::ValidateElements(array);
+}
+} // namespace
- Handle<Object> new_length = DictionaryElementsAccessor::
- SetLengthWithoutNormalize(dictionary, array, length, value);
- DCHECK(!new_length.is_null());
- DCHECK(new_length->IsNumber());
- array->set_length(*new_length);
- return array;
+void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t index,
+ bool allow_appending) {
+ DisallowHeapAllocation no_allocation;
+ Object* raw_length = NULL;
+ const char* elements_type = "array";
+ if (obj->IsJSArray()) {
+ JSArray* array = JSArray::cast(*obj);
+ raw_length = array->length();
+ } else {
+ raw_length = Smi::FromInt(obj->elements()->length());
+ elements_type = "object";
+ }
+
+ if (raw_length->IsNumber()) {
+ double n = raw_length->Number();
+ if (FastI2D(FastD2UI(n)) == n) {
+ int32_t int32_length = DoubleToInt32(n);
+ uint32_t compare_length = static_cast<uint32_t>(int32_length);
+ if (allow_appending) compare_length++;
+ if (index >= compare_length) {
+ PrintF("[OOB %s %s (%s length = %d, element accessed = %d) in ",
+ elements_type, op, elements_type, static_cast<int>(int32_length),
+ static_cast<int>(index));
+ TraceTopFrame(obj->GetIsolate());
+ PrintF("]\n");
+ }
} else {
- return ThrowArrayLengthRangeError(isolate);
+ PrintF("[%s elements length not integer value in ", elements_type);
+ TraceTopFrame(obj->GetIsolate());
+ PrintF("]\n");
}
+ } else {
+ PrintF("[%s elements length not a number in ", elements_type);
+ TraceTopFrame(obj->GetIsolate());
+ PrintF("]\n");
}
-
- // Fall-back case: The new length is not a number so make the array
- // size one and set only element to length.
- Handle<FixedArray> new_backing_store = isolate->factory()->NewFixedArray(1);
- new_backing_store->set(0, *length);
- JSArray::SetContent(array, new_backing_store);
- return array;
}
MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
Arguments* args) {
- // Optimize the case where there is one argument and the argument is a
- // small smi.
- if (args->length() == 1) {
- Handle<Object> obj = args->at<Object>(0);
- if (obj->IsSmi()) {
- int len = Handle<Smi>::cast(obj)->value();
- if (len > 0 && len < JSObject::kInitialMaxFastElementArray) {
- ElementsKind elements_kind = array->GetElementsKind();
- JSArray::Initialize(array, len, len);
-
- if (!IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- JSObject::TransitionElementsKind(array, elements_kind);
- }
- return array;
- } else if (len == 0) {
- JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
- return array;
- }
- }
+ if (args->length() == 0) {
+ // Optimize the case where there are no parameters passed.
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ return array;
- // Take the argument as the length.
- JSArray::Initialize(array, 0);
+ } else if (args->length() == 1 && args->at<Object>(0)->IsNumber()) {
+ uint32_t length;
+ if (!args->at<Object>(0)->ToArrayLength(&length)) {
+ return ThrowArrayLengthRangeError(array->GetIsolate());
+ }
- return JSArray::SetElementsLength(array, obj);
- }
+ // Optimize the case where there is one argument and the argument is a small
+ // smi.
+ if (length > 0 && length < JSObject::kInitialMaxFastElementArray) {
+ ElementsKind elements_kind = array->GetElementsKind();
+ JSArray::Initialize(array, length, length);
- // Optimize the case where there are no parameters passed.
- if (args->length() == 0) {
- JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ if (!IsFastHoleyElementsKind(elements_kind)) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ JSObject::TransitionElementsKind(array, elements_kind);
+ }
+ } else if (length == 0) {
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ } else {
+ // Take the argument as the length.
+ JSArray::Initialize(array, 0);
+ JSArray::SetLength(array, length);
+ }
return array;
}
@@ -1823,8 +1881,8 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
Handle<FixedArray> smi_elms = Handle<FixedArray>::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER);
+ for (int entry = 0; entry < number_of_elements; entry++) {
+ smi_elms->set(entry, (*args)[entry], SKIP_WRITE_BARRIER);
}
break;
}
@@ -1833,8 +1891,8 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
Handle<FixedArray> object_elms = Handle<FixedArray>::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- object_elms->set(index, (*args)[index], mode);
+ for (int entry = 0; entry < number_of_elements; entry++) {
+ object_elms->set(entry, (*args)[entry], mode);
}
break;
}
@@ -1842,8 +1900,8 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
case FAST_DOUBLE_ELEMENTS: {
Handle<FixedDoubleArray> double_elms =
Handle<FixedDoubleArray>::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- double_elms->set(index, (*args)[index]->Number());
+ for (int entry = 0; entry < number_of_elements; entry++) {
+ double_elms->set(entry, (*args)[entry]->Number());
}
break;
}
@@ -1857,4 +1915,30 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
return array;
}
-} } // namespace v8::internal
+
+void ElementsAccessor::InitializeOncePerProcess() {
+ static ElementsAccessor* accessor_array[] = {
+#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind),
+ ELEMENTS_LIST(ACCESSOR_ARRAY)
+#undef ACCESSOR_ARRAY
+ };
+
+ STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
+ kElementsKindCount);
+
+ elements_accessors_ = accessor_array;
+}
+
+
+void ElementsAccessor::TearDown() {
+ if (elements_accessors_ == NULL) return;
+#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
+ ELEMENTS_LIST(ACCESSOR_DELETE)
+#undef ACCESSOR_DELETE
+ elements_accessors_ = NULL;
+}
+
+
+ElementsAccessor** ElementsAccessor::elements_accessors_ = NULL;
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index e743849ff7..9005096a1e 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -20,77 +20,34 @@ class ElementsAccessor {
explicit ElementsAccessor(const char* name) : name_(name) { }
virtual ~ElementsAccessor() { }
- virtual ElementsKind kind() const = 0;
const char* name() const { return name_; }
// Checks the elements of an object for consistency, asserting when a problem
// is found.
virtual void Validate(Handle<JSObject> obj) = 0;
- // Returns true if a holder contains an element with the specified key
+ // Returns true if a holder contains an element with the specified index
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
// the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
// holder->elements() is used as the backing store.
- virtual bool HasElement(
- Handle<JSObject> holder,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) = 0;
-
- inline bool HasElement(
- Handle<JSObject> holder,
- uint32_t key) {
- return HasElement(holder, key, handle(holder->elements()));
- }
+ virtual bool HasElement(Handle<JSObject> holder, uint32_t index,
+ Handle<FixedArrayBase> backing_store) = 0;
- // Returns the element with the specified key or undefined if there is no such
- // element. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual MaybeHandle<Object> Get(
- Handle<Object> receiver,
- Handle<JSObject> holder,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) = 0;
-
- MUST_USE_RESULT inline MaybeHandle<Object> Get(
- Handle<Object> receiver,
- Handle<JSObject> holder,
- uint32_t key) {
- return Get(receiver, holder, key, handle(holder->elements()));
+ inline bool HasElement(Handle<JSObject> holder, uint32_t index) {
+ return HasElement(holder, index, handle(holder->elements()));
}
- // Returns an element's attributes, or ABSENT if there is no such
- // element. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
+ // Returns the element with the specified index or undefined if there is no
+ // such element. This method doesn't iterate up the prototype chain. The
+ // caller can optionally pass in the backing store to use for the check, which
+ // must be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Handle<JSObject> holder,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) = 0;
-
- MUST_USE_RESULT inline PropertyAttributes GetAttributes(
- Handle<JSObject> holder,
- uint32_t key) {
- return GetAttributes(holder, key, handle(holder->elements()));
- }
+ virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t index,
+ Handle<FixedArrayBase> backing_store) = 0;
- // Returns an element's accessors, or NULL if the element does not exist or
- // is plain. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair(
- Handle<JSObject> holder,
- uint32_t key,
- Handle<FixedArrayBase> backing_store) = 0;
-
- MUST_USE_RESULT inline MaybeHandle<AccessorPair> GetAccessorPair(
- Handle<JSObject> holder,
- uint32_t key) {
- return GetAccessorPair(holder, key, handle(holder->elements()));
+ inline Handle<Object> Get(Handle<JSObject> holder, uint32_t index) {
+ return Get(holder, index, handle(holder->elements()));
}
// Modifies the length data property as specified for JSArrays and resizes the
@@ -98,24 +55,10 @@ class ElementsAccessor {
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
- MUST_USE_RESULT virtual MaybeHandle<Object> SetLength(
- Handle<JSArray> holder,
- Handle<Object> new_length) = 0;
-
- // Modifies both the length and capacity of a JSArray, resizing the underlying
- // backing store as necessary. This method does NOT honor the semantics of
- // EcmaScript 5.1 15.4.5.2, arrays can be shrunk beyond non-deletable
- // elements. This method should only be called for array expansion OR by
- // runtime JavaScript code that use InternalArrays and don't care about
- // EcmaScript 5.1 semantics.
- virtual void SetCapacityAndLength(
- Handle<JSArray> array,
- int capacity,
- int length) = 0;
-
- // Deletes an element in an object, returning a new elements backing store.
- MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> holder, uint32_t key, LanguageMode language_mode) = 0;
+ virtual void SetLength(Handle<JSArray> holder, uint32_t new_length) = 0;
+
+ // Deletes an element in an object.
+ virtual void Delete(Handle<JSObject> holder, uint32_t entry) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
@@ -158,16 +101,12 @@ class ElementsAccessor {
*from_holder, 0, from_kind, to, 0, kCopyToEndAndInitializeToHole);
}
- MUST_USE_RESULT virtual MaybeHandle<FixedArray> AddElementsToFixedArray(
- Handle<Object> receiver, Handle<JSObject> holder, Handle<FixedArray> to,
- Handle<FixedArrayBase> from, FixedArray::KeyFilter filter) = 0;
+ virtual void GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) = 0;
- MUST_USE_RESULT inline MaybeHandle<FixedArray> AddElementsToFixedArray(
- Handle<Object> receiver, Handle<JSObject> holder, Handle<FixedArray> to,
- FixedArray::KeyFilter filter) {
- return AddElementsToFixedArray(receiver, holder, to,
- handle(holder->elements()), filter);
- }
+ virtual Handle<FixedArray> AddElementsToFixedArray(
+ Handle<JSObject> receiver, Handle<FixedArray> to,
+ FixedArray::KeyFilter filter) = 0;
// Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) {
@@ -180,22 +119,37 @@ class ElementsAccessor {
static void InitializeOncePerProcess();
static void TearDown();
+ virtual void Set(FixedArrayBase* backing_store, uint32_t index,
+ Object* value) = 0;
+ virtual void Reconfigure(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) = 0;
+ virtual void Add(Handle<JSObject> object, uint32_t entry,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) = 0;
+
protected:
- friend class SloppyArgumentsElementsAccessor;
-
- virtual uint32_t GetCapacity(Handle<JSObject> holder,
- Handle<FixedArrayBase> backing_store) = 0;
-
- // Element handlers distinguish between indexes and keys when they manipulate
- // elements. Indexes refer to elements in terms of their location in the
- // underlying storage's backing store representation, and are between 0 and
- // GetCapacity. Keys refer to elements in terms of the value that would be
- // specified in JavaScript to access the element. In most implementations,
- // keys are equivalent to indexes, and GetKeyForIndex returns the same value
- // it is passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps
- // the index to a key using the KeyAt method on the NumberDictionary.
- virtual uint32_t GetKeyForIndex(Handle<FixedArrayBase> backing_store,
- uint32_t index) = 0;
+ friend class LookupIterator;
+
+ static ElementsAccessor* ForArray(FixedArrayBase* array);
+
+ virtual uint32_t GetCapacity(JSObject* holder,
+ FixedArrayBase* backing_store) = 0;
+
+ // Element handlers distinguish between entries and indices when they
+ // manipulate elements. Entries refer to elements in terms of their location
+ // in the underlying storage's backing store representation, and are between 0
+ // and GetCapacity. Indices refer to elements in terms of the value that would
+ // be specified in JavaScript to access the element. In most implementations,
+ // indices are equivalent to entries. In the NumberDictionary
+ // ElementsAccessor, entries are mapped to an index using the KeyAt method on
+ // the NumberDictionary.
+ virtual uint32_t GetEntryForIndex(JSObject* holder,
+ FixedArrayBase* backing_store,
+ uint32_t index) = 0;
+ virtual PropertyDetails GetDetails(FixedArrayBase* backing_store,
+ uint32_t entry) = 0;
private:
static ElementsAccessor** elements_accessors_;
@@ -204,7 +158,7 @@ class ElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
-void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t key,
+void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t index,
bool allow_appending = false);
MUST_USE_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 97c660e10f..fc0e50e553 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -200,7 +200,7 @@ MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
// creating message objects during stack overflow we shouldn't
// capture messages.
{
- v8::TryCatch catcher;
+ v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
@@ -602,35 +602,6 @@ MaybeHandle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
}
-Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
- Isolate* isolate = string->GetIsolate();
- Factory* factory = isolate->factory();
-
- int int_index = static_cast<int>(index);
- if (int_index < 0 || int_index >= string->length()) {
- return factory->undefined_value();
- }
-
- Handle<Object> char_at = Object::GetProperty(
- isolate->js_builtins_object(),
- factory->char_at_string()).ToHandleChecked();
- if (!char_at->IsJSFunction()) {
- return factory->undefined_value();
- }
-
- Handle<Object> index_object = factory->NewNumberFromInt(int_index);
- Handle<Object> index_arg[] = { index_object };
- Handle<Object> result;
- if (!TryCall(Handle<JSFunction>::cast(char_at),
- string,
- arraysize(index_arg),
- index_arg).ToHandle(&result)) {
- return factory->undefined_value();
- }
- return result;
-}
-
-
Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
Handle<Object> pos,
@@ -692,4 +663,5 @@ Object* StackGuard::HandleInterrupts() {
return isolate_->heap()->undefined_value();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 643d84f085..fd7636db96 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -94,9 +94,6 @@ class Execution final : public AllStatic {
MUST_USE_RESULT static MaybeHandle<JSRegExp> NewJSRegExp(
Handle<String> pattern, Handle<String> flags);
- // Used to implement [] notation on strings (calls JS code)
- static Handle<Object> CharAt(Handle<String> str, uint32_t index);
-
static Handle<Object> GetFunctionFor();
static Handle<String> GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
diff --git a/deps/v8/src/expression-classifier.h b/deps/v8/src/expression-classifier.h
new file mode 100644
index 0000000000..6edb99e838
--- /dev/null
+++ b/deps/v8/src/expression-classifier.h
@@ -0,0 +1,255 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXPRESSION_CLASSIFIER_H
+#define V8_EXPRESSION_CLASSIFIER_H
+
+#include "src/v8.h"
+
+#include "src/messages.h"
+#include "src/scanner.h"
+#include "src/token.h"
+
+namespace v8 {
+namespace internal {
+
+
+class ExpressionClassifier {
+ public:
+ struct Error {
+ Error()
+ : location(Scanner::Location::invalid()),
+ message(MessageTemplate::kNone),
+ arg(nullptr) {}
+
+ Scanner::Location location;
+ MessageTemplate::Template message;
+ const char* arg;
+ };
+
+ enum TargetProduction {
+ ExpressionProduction = 1 << 0,
+ BindingPatternProduction = 1 << 1,
+ AssignmentPatternProduction = 1 << 2,
+ DistinctFormalParametersProduction = 1 << 3,
+ StrictModeFormalParametersProduction = 1 << 4,
+ StrongModeFormalParametersProduction = 1 << 5,
+ ArrowFormalParametersProduction = 1 << 6,
+
+ PatternProductions =
+ (BindingPatternProduction | AssignmentPatternProduction),
+ FormalParametersProductions = (DistinctFormalParametersProduction |
+ StrictModeFormalParametersProduction |
+ StrongModeFormalParametersProduction),
+ StandardProductions = ExpressionProduction | PatternProductions,
+ AllProductions = (StandardProductions | FormalParametersProductions |
+ ArrowFormalParametersProduction)
+ };
+
+ ExpressionClassifier()
+ : invalid_productions_(0), duplicate_finder_(nullptr) {}
+
+ explicit ExpressionClassifier(DuplicateFinder* duplicate_finder)
+ : invalid_productions_(0), duplicate_finder_(duplicate_finder) {}
+
+ bool is_valid(unsigned productions) const {
+ return (invalid_productions_ & productions) == 0;
+ }
+
+ DuplicateFinder* duplicate_finder() const { return duplicate_finder_; }
+
+ bool is_valid_expression() const { return is_valid(ExpressionProduction); }
+
+ bool is_valid_binding_pattern() const {
+ return is_valid(BindingPatternProduction);
+ }
+
+ bool is_valid_assignment_pattern() const {
+ return is_valid(AssignmentPatternProduction);
+ }
+
+ bool is_valid_arrow_formal_parameters() const {
+ return is_valid(ArrowFormalParametersProduction);
+ }
+
+ bool is_valid_formal_parameter_list_without_duplicates() const {
+ return is_valid(DistinctFormalParametersProduction);
+ }
+
+ // Note: callers should also check
+ // is_valid_formal_parameter_list_without_duplicates().
+ bool is_valid_strict_mode_formal_parameters() const {
+ return is_valid(StrictModeFormalParametersProduction);
+ }
+
+ // Note: callers should also check is_valid_strict_mode_formal_parameters()
+ // and is_valid_formal_parameter_list_without_duplicates().
+ bool is_valid_strong_mode_formal_parameters() const {
+ return is_valid(StrongModeFormalParametersProduction);
+ }
+
+ const Error& expression_error() const { return expression_error_; }
+
+ const Error& binding_pattern_error() const { return binding_pattern_error_; }
+
+ const Error& assignment_pattern_error() const {
+ return assignment_pattern_error_;
+ }
+
+ const Error& arrow_formal_parameters_error() const {
+ return arrow_formal_parameters_error_;
+ }
+
+ const Error& duplicate_formal_parameter_error() const {
+ return duplicate_formal_parameter_error_;
+ }
+
+ const Error& strict_mode_formal_parameter_error() const {
+ return strict_mode_formal_parameter_error_;
+ }
+
+ const Error& strong_mode_formal_parameter_error() const {
+ return strong_mode_formal_parameter_error_;
+ }
+
+ void RecordExpressionError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_expression()) return;
+ invalid_productions_ |= ExpressionProduction;
+ expression_error_.location = loc;
+ expression_error_.message = message;
+ expression_error_.arg = arg;
+ }
+
+ void RecordBindingPatternError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_binding_pattern()) return;
+ invalid_productions_ |= BindingPatternProduction;
+ binding_pattern_error_.location = loc;
+ binding_pattern_error_.message = message;
+ binding_pattern_error_.arg = arg;
+ }
+
+ void RecordAssignmentPatternError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_assignment_pattern()) return;
+ invalid_productions_ |= AssignmentPatternProduction;
+ assignment_pattern_error_.location = loc;
+ assignment_pattern_error_.message = message;
+ assignment_pattern_error_.arg = arg;
+ }
+
+ void RecordArrowFormalParametersError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_arrow_formal_parameters()) return;
+ invalid_productions_ |= ArrowFormalParametersProduction;
+ arrow_formal_parameters_error_.location = loc;
+ arrow_formal_parameters_error_.message = message;
+ arrow_formal_parameters_error_.arg = arg;
+ }
+
+ void RecordDuplicateFormalParameterError(const Scanner::Location& loc) {
+ if (!is_valid_formal_parameter_list_without_duplicates()) return;
+ invalid_productions_ |= DistinctFormalParametersProduction;
+ duplicate_formal_parameter_error_.location = loc;
+ duplicate_formal_parameter_error_.message =
+ MessageTemplate::kStrictParamDupe;
+ duplicate_formal_parameter_error_.arg = nullptr;
+ }
+
+ // Record a binding that would be invalid in strict mode. Confusingly this
+ // is not the same as StrictFormalParameterList, which simply forbids
+ // duplicate bindings.
+ void RecordStrictModeFormalParameterError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_strict_mode_formal_parameters()) return;
+ invalid_productions_ |= StrictModeFormalParametersProduction;
+ strict_mode_formal_parameter_error_.location = loc;
+ strict_mode_formal_parameter_error_.message = message;
+ strict_mode_formal_parameter_error_.arg = arg;
+ }
+
+ void RecordStrongModeFormalParameterError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_strong_mode_formal_parameters()) return;
+ invalid_productions_ |= StrongModeFormalParametersProduction;
+ strong_mode_formal_parameter_error_.location = loc;
+ strong_mode_formal_parameter_error_.message = message;
+ strong_mode_formal_parameter_error_.arg = arg;
+ }
+
+ void Accumulate(const ExpressionClassifier& inner,
+ unsigned productions = StandardProductions) {
+ // Propagate errors from inner, but don't overwrite already recorded
+ // errors.
+ unsigned non_arrow_inner_invalid_productions =
+ inner.invalid_productions_ & ~ArrowFormalParametersProduction;
+ if (non_arrow_inner_invalid_productions == 0) return;
+ unsigned non_arrow_productions =
+ productions & ~ArrowFormalParametersProduction;
+ unsigned errors =
+ non_arrow_productions & non_arrow_inner_invalid_productions;
+ errors &= ~invalid_productions_;
+ if (errors != 0) {
+ invalid_productions_ |= errors;
+ if (errors & ExpressionProduction)
+ expression_error_ = inner.expression_error_;
+ if (errors & BindingPatternProduction)
+ binding_pattern_error_ = inner.binding_pattern_error_;
+ if (errors & AssignmentPatternProduction)
+ assignment_pattern_error_ = inner.assignment_pattern_error_;
+ if (errors & DistinctFormalParametersProduction)
+ duplicate_formal_parameter_error_ =
+ inner.duplicate_formal_parameter_error_;
+ if (errors & StrictModeFormalParametersProduction)
+ strict_mode_formal_parameter_error_ =
+ inner.strict_mode_formal_parameter_error_;
+ if (errors & StrongModeFormalParametersProduction)
+ strong_mode_formal_parameter_error_ =
+ inner.strong_mode_formal_parameter_error_;
+ }
+
+ // As an exception to the above, the result continues to be a valid arrow
+ // formal parameters if the inner expression is a valid binding pattern.
+ if (productions & ArrowFormalParametersProduction &&
+ is_valid_arrow_formal_parameters() &&
+ !inner.is_valid_binding_pattern()) {
+ invalid_productions_ |= ArrowFormalParametersProduction;
+ arrow_formal_parameters_error_ = inner.binding_pattern_error_;
+ }
+ }
+
+ void AccumulateReclassifyingAsPattern(const ExpressionClassifier& inner) {
+ Accumulate(inner, AllProductions & ~PatternProductions);
+ if (!inner.is_valid_expression()) {
+ if (is_valid_binding_pattern()) {
+ binding_pattern_error_ = inner.expression_error();
+ }
+ if (is_valid_assignment_pattern()) {
+ assignment_pattern_error_ = inner.expression_error();
+ }
+ }
+ }
+
+ private:
+ unsigned invalid_productions_;
+ Error expression_error_;
+ Error binding_pattern_error_;
+ Error assignment_pattern_error_;
+ Error arrow_formal_parameters_error_;
+ Error duplicate_formal_parameter_error_;
+ Error strict_mode_formal_parameter_error_;
+ Error strong_mode_formal_parameter_error_;
+ DuplicateFinder* duplicate_finder_;
+};
+}
+} // v8::internal
+
+#endif // V8_EXPRESSION_CLASSIFIER_H
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 8d38dfa0f0..aedfb5e93f 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -37,9 +37,9 @@ const char* const ExternalizeStringExtension::kSource =
"native function externalizeString();"
"native function isOneByteString();";
-v8::Handle<v8::FunctionTemplate>
+v8::Local<v8::FunctionTemplate>
ExternalizeStringExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Handle<v8::String> str) {
+ v8::Isolate* isolate, v8::Local<v8::String> str) {
if (strcmp(*v8::String::Utf8Value(str), "externalizeString") == 0) {
return v8::FunctionTemplate::New(isolate,
ExternalizeStringExtension::Externalize);
@@ -54,28 +54,36 @@ ExternalizeStringExtension::GetNativeFunctionTemplate(
void ExternalizeStringExtension::Externalize(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
- args.GetIsolate(),
- "First parameter to externalizeString() must be a string."));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(
+ args.GetIsolate(),
+ "First parameter to externalizeString() must be a string.",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
bool force_two_byte = false;
if (args.Length() >= 2) {
if (args[1]->IsBoolean()) {
- force_two_byte = args[1]->BooleanValue();
+ force_two_byte =
+ args[1]
+ ->BooleanValue(args.GetIsolate()->GetCurrentContext())
+ .FromJust();
} else {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
- args.GetIsolate(),
- "Second parameter to externalizeString() must be a boolean."));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(
+ args.GetIsolate(),
+ "Second parameter to externalizeString() must be a boolean.",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
}
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (string->IsExternalString()) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
- args.GetIsolate(),
- "externalizeString() can't externalize twice."));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(),
+ "externalizeString() can't externalize twice.",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
if (string->IsOneByteRepresentation() && !force_two_byte) {
@@ -102,8 +110,10 @@ void ExternalizeStringExtension::Externalize(
if (!result) delete resource;
}
if (!result) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
- args.GetIsolate(), "externalizeString() failed."));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(),
+ "externalizeString() failed.",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
}
@@ -112,9 +122,11 @@ void ExternalizeStringExtension::Externalize(
void ExternalizeStringExtension::IsOneByte(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
- args.GetIsolate(),
- "isOneByteString() requires a single string argument."));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(
+ args.GetIsolate(),
+ "isOneByteString() requires a single string argument.",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
bool is_one_byte =
@@ -122,4 +134,5 @@ void ExternalizeStringExtension::IsOneByte(
args.GetReturnValue().Set(is_one_byte);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index f8c54f8f12..dc23ffd2d3 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -13,9 +13,8 @@ namespace internal {
class ExternalizeStringExtension : public v8::Extension {
public:
ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> name);
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name);
static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
static void IsOneByte(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/deps/v8/src/extensions/free-buffer-extension.cc b/deps/v8/src/extensions/free-buffer-extension.cc
index c880d75d0b..b642b3df30 100644
--- a/deps/v8/src/extensions/free-buffer-extension.cc
+++ b/deps/v8/src/extensions/free-buffer-extension.cc
@@ -11,20 +11,20 @@ namespace v8 {
namespace internal {
-v8::Handle<v8::FunctionTemplate> FreeBufferExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> str) {
+v8::Local<v8::FunctionTemplate> FreeBufferExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> str) {
return v8::FunctionTemplate::New(isolate, FreeBufferExtension::FreeBuffer);
}
void FreeBufferExtension::FreeBuffer(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Handle<v8::ArrayBuffer> arrayBuffer = args[0].As<v8::ArrayBuffer>();
+ v8::Local<v8::ArrayBuffer> arrayBuffer = args[0].As<v8::ArrayBuffer>();
v8::ArrayBuffer::Contents contents = arrayBuffer->Externalize();
Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
isolate->array_buffer_allocator()->Free(contents.Data(),
contents.ByteLength());
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/extensions/free-buffer-extension.h b/deps/v8/src/extensions/free-buffer-extension.h
index bccf760cc2..d62ed02b86 100644
--- a/deps/v8/src/extensions/free-buffer-extension.h
+++ b/deps/v8/src/extensions/free-buffer-extension.h
@@ -14,9 +14,8 @@ class FreeBufferExtension : public v8::Extension {
public:
FreeBufferExtension()
: v8::Extension("v8/free-buffer", "native function freeBuffer();") {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> name);
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name);
static void FreeBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
};
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 74b74811c3..9eb453b986 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -10,17 +10,20 @@ namespace v8 {
namespace internal {
-v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> str) {
+v8::Local<v8::FunctionTemplate> GCExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> str) {
return v8::FunctionTemplate::New(isolate, GCExtension::GC);
}
void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetIsolate()->RequestGarbageCollectionForTesting(
- args[0]->BooleanValue() ? v8::Isolate::kMinorGarbageCollection
- : v8::Isolate::kFullGarbageCollection);
+ args[0]
+ ->BooleanValue(args.GetIsolate()->GetCurrentContext())
+ .FromMaybe(false)
+ ? v8::Isolate::kMinorGarbageCollection
+ : v8::Isolate::kFullGarbageCollection);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index 789354597e..2462bd9604 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -15,9 +15,8 @@ class GCExtension : public v8::Extension {
explicit GCExtension(const char* fun_name)
: v8::Extension("v8/gc",
BuildSource(buffer_, sizeof(buffer_), fun_name)) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> name);
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name);
static void GC(const v8::FunctionCallbackInfo<v8::Value>& args);
private:
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index d1e110e267..eb0fed80d5 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -11,9 +11,8 @@ const char* const StatisticsExtension::kSource =
"native function getV8Statistics();";
-v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> str) {
+v8::Local<v8::FunctionTemplate> StatisticsExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> str) {
DCHECK(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0);
return v8::FunctionTemplate::New(isolate, StatisticsExtension::GetCounters);
}
@@ -24,8 +23,11 @@ static void AddCounter(v8::Isolate* isolate,
StatsCounter* counter,
const char* name) {
if (counter->Enabled()) {
- object->Set(v8::String::NewFromUtf8(isolate, name),
- v8::Number::New(isolate, *counter->GetInternalPointer()));
+ object->Set(isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(isolate, *counter->GetInternalPointer()))
+ .FromJust();
}
}
@@ -33,8 +35,10 @@ static void AddNumber(v8::Isolate* isolate,
v8::Local<v8::Object> object,
intptr_t value,
const char* name) {
- object->Set(v8::String::NewFromUtf8(isolate, name),
- v8::Number::New(isolate, static_cast<double>(value)));
+ object->Set(isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(isolate, static_cast<double>(value))).FromJust();
}
@@ -42,8 +46,10 @@ static void AddNumber64(v8::Isolate* isolate,
v8::Local<v8::Object> object,
int64_t value,
const char* name) {
- object->Set(v8::String::NewFromUtf8(isolate, name),
- v8::Number::New(isolate, static_cast<double>(value)));
+ object->Set(isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(isolate, static_cast<double>(value))).FromJust();
}
@@ -54,7 +60,9 @@ void StatisticsExtension::GetCounters(
if (args.Length() > 0) { // GC if first argument evaluates to true.
if (args[0]->IsBoolean() &&
- args[0]->ToBoolean(args.GetIsolate())->Value()) {
+ args[0]
+ ->BooleanValue(args.GetIsolate()->GetCurrentContext())
+ .FromMaybe(false)) {
heap->CollectAllGarbage(Heap::kNoGCFlags, "counters extension");
}
}
@@ -129,4 +137,5 @@ void StatisticsExtension::GetCounters(
args.GetReturnValue().Set(result);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
index 0915e61de0..8149e44afe 100644
--- a/deps/v8/src/extensions/statistics-extension.h
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -13,9 +13,8 @@ namespace internal {
class StatisticsExtension : public v8::Extension {
public:
StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> name);
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name);
static void GetCounters(const v8::FunctionCallbackInfo<v8::Value>& args);
private:
diff --git a/deps/v8/src/extensions/trigger-failure-extension.cc b/deps/v8/src/extensions/trigger-failure-extension.cc
index b0aacb42c6..672c1a7064 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.cc
+++ b/deps/v8/src/extensions/trigger-failure-extension.cc
@@ -15,10 +15,9 @@ const char* const TriggerFailureExtension::kSource =
"native function triggerSlowAssertFalse();";
-v8::Handle<v8::FunctionTemplate>
-TriggerFailureExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> str) {
+v8::Local<v8::FunctionTemplate>
+TriggerFailureExtension::GetNativeFunctionTemplate(v8::Isolate* isolate,
+ v8::Local<v8::String> str) {
if (strcmp(*v8::String::Utf8Value(str), "triggerCheckFalse") == 0) {
return v8::FunctionTemplate::New(
isolate,
@@ -53,4 +52,5 @@ void TriggerFailureExtension::TriggerSlowAssertFalse(
SLOW_DCHECK(false);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/extensions/trigger-failure-extension.h b/deps/v8/src/extensions/trigger-failure-extension.h
index 6974da5e31..c01b37d3e9 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.h
+++ b/deps/v8/src/extensions/trigger-failure-extension.h
@@ -13,9 +13,8 @@ namespace internal {
class TriggerFailureExtension : public v8::Extension {
public:
TriggerFailureExtension() : v8::Extension("v8/trigger-failure", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> name);
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name);
static void TriggerCheckFalse(
const v8::FunctionCallbackInfo<v8::Value>& args);
static void TriggerAssertFalse(
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 0e66ec6268..f0967c7200 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -55,6 +55,7 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo> result =
Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE));
result->set_prototype_users(WeakFixedArray::Empty());
+ result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_validity_cell(Smi::FromInt(0));
result->set_constructor_name(Smi::FromInt(0));
return result;
@@ -126,28 +127,6 @@ Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
}
-Handle<ConstantPoolArray> Factory::NewConstantPoolArray(
- const ConstantPoolArray::NumberOfEntries& small) {
- DCHECK(small.total_count() > 0);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateConstantPoolArray(small),
- ConstantPoolArray);
-}
-
-
-Handle<ConstantPoolArray> Factory::NewExtendedConstantPoolArray(
- const ConstantPoolArray::NumberOfEntries& small,
- const ConstantPoolArray::NumberOfEntries& extended) {
- DCHECK(small.total_count() > 0);
- DCHECK(extended.total_count() > 0);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExtendedConstantPoolArray(small, extended),
- ConstantPoolArray);
-}
-
-
Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity);
}
@@ -698,23 +677,21 @@ Handle<Symbol> Factory::NewSymbol() {
}
-Handle<Symbol> Factory::NewPrivateSymbol() {
+Handle<Symbol> Factory::NewPrivateSymbol(Handle<Object> name) {
Handle<Symbol> symbol = NewSymbol();
symbol->set_is_private(true);
- return symbol;
-}
-
-
-Handle<Symbol> Factory::NewPrivateOwnSymbol() {
- Handle<Symbol> symbol = NewSymbol();
- symbol->set_is_private(true);
- symbol->set_is_own(true);
+ if (name->IsString()) {
+ symbol->set_name(*name);
+ } else {
+ DCHECK(name->IsUndefined());
+ }
return symbol;
}
Handle<Context> Factory::NewNativeContext() {
- Handle<FixedArray> array = NewFixedArray(Context::NATIVE_CONTEXT_SLOTS);
+ Handle<FixedArray> array =
+ NewFixedArray(Context::NATIVE_CONTEXT_SLOTS, TENURED);
array->set_map_no_write_barrier(*native_context_map());
Handle<Context> context = Handle<Context>::cast(array);
context->set_js_array_maps(*undefined_value());
@@ -869,6 +846,7 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared(heap->undefined_value());
script->set_eval_from_instructions_offset(Smi::FromInt(0));
+ script->set_shared_function_infos(Smi::FromInt(0));
script->set_flags(Smi::FromInt(0));
return script;
@@ -912,16 +890,12 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
- int length,
- ExternalArrayType array_type,
+ int length, ExternalArrayType array_type, bool initialize,
PretenureFlag pretenure) {
DCHECK(0 <= length && length <= Smi::kMaxValue);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFixedTypedArray(length,
- array_type,
- pretenure),
- FixedTypedArrayBase);
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateFixedTypedArray(
+ length, array_type, initialize, pretenure),
+ FixedTypedArrayBase);
}
@@ -1023,14 +997,6 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
}
-Handle<ConstantPoolArray> Factory::CopyConstantPoolArray(
- Handle<ConstantPoolArray> array) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyConstantPoolArray(*array),
- ConstantPoolArray);
-}
-
-
Handle<Object> Factory::NewNumber(double value,
PretenureFlag pretenure) {
// We need to distinguish the minus zero value and this cannot be
@@ -1076,61 +1042,11 @@ Handle<HeapNumber> Factory::NewHeapNumber(double value,
}
-Handle<Object> Factory::NewTypeError(const char* message,
- Vector<Handle<Object> > args) {
- return NewError("MakeTypeError", message, args);
-}
-
-
-Handle<Object> Factory::NewTypeError(Handle<String> message) {
- return NewError("$TypeError", message);
-}
-
-
-Handle<Object> Factory::NewRangeError(const char* message,
- Vector<Handle<Object> > args) {
- return NewError("MakeRangeError", message, args);
-}
-
-
-Handle<Object> Factory::NewRangeError(Handle<String> message) {
- return NewError("$RangeError", message);
-}
-
-
-Handle<Object> Factory::NewSyntaxError(const char* message,
- Handle<JSArray> args) {
- return NewError("MakeSyntaxError", message, args);
-}
-
-
-Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
- return NewError("$SyntaxError", message);
-}
-
-
-Handle<Object> Factory::NewReferenceError(const char* message,
- Handle<JSArray> args) {
- return NewError("MakeReferenceError", message, args);
-}
-
-
-Handle<Object> Factory::NewReferenceError(Handle<String> message) {
- return NewError("$ReferenceError", message);
-}
-
-
-Handle<Object> Factory::NewError(const char* maker, const char* message,
- Vector<Handle<Object> > args) {
- // Instantiate a closeable HandleScope for EscapeFrom.
- v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
- Handle<FixedArray> array = NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- array->set(i, *args[i]);
- }
- Handle<JSArray> object = NewJSArrayWithElements(array);
- Handle<Object> result = NewError(maker, message, object);
- return result.EscapeFrom(&scope);
+Handle<Float32x4> Factory::NewFloat32x4(float w, float x, float y, float z,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(), isolate()->heap()->AllocateFloat32x4(w, x, y, z, pretenure),
+ Float32x4);
}
@@ -1140,6 +1056,13 @@ Handle<Object> Factory::NewError(const char* maker,
Handle<Object> arg2) {
HandleScope scope(isolate());
Handle<String> error_maker = InternalizeUtf8String(maker);
+ if (isolate()->bootstrapper()->IsActive()) {
+ // If this exception is being thrown during bootstrapping,
+ // js_builtins_object is unavailable. We return the error maker
+ // name's string as the exception since we have nothing better
+ // to do.
+ return scope.CloseAndEscape(error_maker);
+ }
Handle<Object> fun_obj = Object::GetProperty(isolate()->js_builtins_object(),
error_maker).ToHandleChecked();
@@ -1181,6 +1104,13 @@ Handle<Object> Factory::NewTypeError(MessageTemplate::Template template_index,
}
+Handle<Object> Factory::NewSyntaxError(MessageTemplate::Template template_index,
+ Handle<Object> arg0, Handle<Object> arg1,
+ Handle<Object> arg2) {
+ return NewError("MakeSyntaxError", template_index, arg0, arg1, arg2);
+}
+
+
Handle<Object> Factory::NewReferenceError(
MessageTemplate::Template template_index, Handle<Object> arg0,
Handle<Object> arg1, Handle<Object> arg2) {
@@ -1269,11 +1199,6 @@ Handle<Object> Factory::NewError(const char* maker, const char* message,
}
-Handle<Object> Factory::NewError(Handle<String> message) {
- return NewError("$Error", message);
-}
-
-
Handle<Object> Factory::NewError(const char* constructor,
Handle<String> message) {
Handle<String> constr = InternalizeUtf8String(constructor);
@@ -1308,7 +1233,7 @@ void Factory::InitializeFunction(Handle<JSFunction> function,
function->set_context(*context);
function->set_prototype_or_initial_map(*the_hole_value());
function->set_literals_or_bindings(*empty_fixed_array());
- function->set_next_function_link(*undefined_value());
+ function->set_next_function_link(*undefined_value(), SKIP_WRITE_BARRIER);
}
@@ -1455,23 +1380,23 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
result->MarkForOptimization();
}
- int index = info->SearchOptimizedCodeMap(context->native_context(),
- BailoutId::None());
- if (!info->bound() && index < 0) {
+ CodeAndLiterals cached = info->SearchOptimizedCodeMap(
+ context->native_context(), BailoutId::None());
+ if (cached.code != nullptr) {
+ // Caching of optimized code enabled and optimized code found.
+ if (cached.literals != nullptr) result->set_literals(cached.literals);
+ DCHECK(!cached.code->marked_for_deoptimization());
+ DCHECK(result->shared()->is_compiled());
+ result->ReplaceCode(cached.code);
+ }
+
+ if (cached.literals == nullptr && !info->bound()) {
int number_of_literals = info->num_literals();
+ // TODO(mstarzinger): Consider sharing the newly created literals array.
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
result->set_literals(*literals);
}
- if (index > 0) {
- // Caching of optimized code enabled and optimized code found.
- FixedArray* literals = info->GetLiteralsFromOptimizedCodeMap(index);
- if (literals != NULL) result->set_literals(literals);
- Code* code = info->GetCodeFromOptimizedCodeMap(index);
- DCHECK(!code->marked_for_deoptimization());
- result->ReplaceCode(code);
- }
-
return result;
}
@@ -1507,17 +1432,15 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
int prologue_offset,
bool is_debug) {
Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
- Handle<ConstantPoolArray> constant_pool =
- desc.origin->NewConstantPool(isolate());
// Compute size.
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
int obj_size = Code::SizeFor(body_size);
Handle<Code> code = NewCodeRaw(obj_size, immovable);
- DCHECK(isolate()->code_range() == NULL ||
- !isolate()->code_range()->valid() ||
- isolate()->code_range()->contains(code->address()));
+ DCHECK(isolate()->code_range() == NULL || !isolate()->code_range()->valid() ||
+ isolate()->code_range()->contains(code->address()) ||
+ obj_size <= isolate()->heap()->code_space()->AreaSize());
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
@@ -1535,6 +1458,9 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
code->set_next_code_link(*undefined_value());
code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_prologue_offset(prologue_offset);
+ if (FLAG_enable_embedded_constant_pool) {
+ code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
+ }
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
@@ -1544,9 +1470,6 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
code->set_has_debug_break_slots(true);
}
- desc.origin->PopulateConstantPool(*constant_pool);
- code->set_constant_pool(*constant_pool);
-
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_ref.is_null()) *(self_ref.location()) = *code;
@@ -1634,8 +1557,8 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
// Allocate a dictionary object for backing storage.
int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size;
- Handle<NameDictionary> dictionary =
- NameDictionary::New(isolate(), at_least_space_for);
+ Handle<GlobalDictionary> dictionary =
+ GlobalDictionary::New(isolate(), at_least_space_for);
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
@@ -1650,7 +1573,7 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
Handle<PropertyCell> cell = NewPropertyCell();
cell->set_value(descs->GetCallbacksObject(i));
// |dictionary| already contains enough space for all properties.
- USE(NameDictionary::Add(dictionary, name, cell, d));
+ USE(GlobalDictionary::Add(dictionary, name, cell, d));
}
// Allocate the global object and initialize it with the backing store.
@@ -1688,22 +1611,24 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
+ Strength strength,
PretenureFlag pretenure) {
- Context* native_context = isolate()->context()->native_context();
- JSFunction* array_function = native_context->array_function();
- Map* map = array_function->initial_map();
- Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
- if (transition_map != NULL) map = transition_map;
+ Map* map = isolate()->get_initial_js_array_map(elements_kind, strength);
+ if (map == nullptr) {
+ DCHECK(strength == Strength::WEAK);
+ Context* native_context = isolate()->context()->native_context();
+ JSFunction* array_function = native_context->array_function();
+ map = array_function->initial_map();
+ }
return Handle<JSArray>::cast(NewJSObjectFromMap(handle(map), pretenure));
}
-Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
- int length,
- int capacity,
+Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
+ int capacity, Strength strength,
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
- Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
+ Handle<JSArray> array = NewJSArray(elements_kind, strength, pretenure);
NewJSArrayStorage(array, length, capacity, mode);
return array;
}
@@ -1711,10 +1636,10 @@ Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
- int length,
+ int length, Strength strength,
PretenureFlag pretenure) {
DCHECK(length <= elements->length());
- Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
+ Handle<JSArray> array = NewJSArray(elements_kind, strength, pretenure);
array->set_elements(*elements);
array->set_length(Smi::FromInt(length));
@@ -1773,9 +1698,11 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
}
-Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
+Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared) {
Handle<JSFunction> array_buffer_fun(
- isolate()->native_context()->array_buffer_fun());
+ shared == SharedFlag::kShared
+ ? isolate()->native_context()->shared_array_buffer_fun()
+ : isolate()->native_context()->array_buffer_fun());
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObject(*array_buffer_fun),
@@ -1793,6 +1720,22 @@ Handle<JSDataView> Factory::NewJSDataView() {
}
+Handle<JSMap> Factory::NewJSMap() {
+ Handle<Map> map(isolate()->native_context()->js_map_map());
+ Handle<JSMap> js_map = Handle<JSMap>::cast(NewJSObjectFromMap(map));
+ Runtime::JSMapInitialize(isolate(), js_map);
+ return js_map;
+}
+
+
+Handle<JSSet> Factory::NewJSSet() {
+ Handle<Map> map(isolate()->native_context()->js_set_map());
+ Handle<JSSet> js_set = Handle<JSSet>::cast(NewJSObjectFromMap(map));
+ Runtime::JSSetInitialize(isolate(), js_set);
+ return js_set;
+}
+
+
Handle<JSMapIterator> Factory::NewJSMapIterator() {
Handle<Map> map(isolate()->native_context()->map_iterator_map());
CALL_HEAP_FUNCTION(isolate(),
@@ -1990,11 +1933,12 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
obj->set_length(*length_object);
Handle<JSArrayBuffer> buffer = isolate()->factory()->NewJSArrayBuffer();
- Runtime::SetupArrayBuffer(isolate(), buffer, true, NULL, byte_length);
+ Runtime::SetupArrayBuffer(isolate(), buffer, true, NULL, byte_length,
+ SharedFlag::kNotShared);
obj->set_buffer(*buffer);
Handle<FixedTypedArrayBase> elements =
isolate()->factory()->NewFixedTypedArray(
- static_cast<int>(number_of_elements), array_type);
+ static_cast<int>(number_of_elements), array_type, true);
obj->set_elements(*elements);
return obj;
}
@@ -2051,17 +1995,13 @@ void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
int size) {
DCHECK(type == JS_OBJECT_TYPE || type == JS_FUNCTION_TYPE);
- // Allocate fresh map.
- // TODO(rossberg): Once we optimize proxies, cache these maps.
- Handle<Map> map = NewMap(type, size);
+ Handle<Map> proxy_map(proxy->map());
+ Handle<Map> map = Map::FixProxy(proxy_map, type, size);
// Check that the receiver has at least the size of the fresh object.
- int size_difference = proxy->map()->instance_size() - map->instance_size();
+ int size_difference = proxy_map->instance_size() - map->instance_size();
DCHECK(size_difference >= 0);
- Handle<Object> prototype(proxy->map()->prototype(), isolate());
- Map::SetPrototype(map, prototype);
-
// Allocate the backing storage for the properties.
int prop_size = map->InitialPropertiesLength();
Handle<FixedArray> properties = NewFixedArray(prop_size, TENURED);
@@ -2196,24 +2136,21 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<JSMessageObject> Factory::NewJSMessageObject(
- Handle<String> type,
- Handle<JSArray> arguments,
- int start_position,
- int end_position,
- Handle<Object> script,
+ MessageTemplate::Template message, Handle<Object> argument,
+ int start_position, int end_position, Handle<Object> script,
Handle<Object> stack_frames) {
Handle<Map> map = message_object_map();
- Handle<JSMessageObject> message = New<JSMessageObject>(map, NEW_SPACE);
- message->set_properties(*empty_fixed_array(), SKIP_WRITE_BARRIER);
- message->initialize_elements();
- message->set_elements(*empty_fixed_array(), SKIP_WRITE_BARRIER);
- message->set_type(*type);
- message->set_arguments(*arguments);
- message->set_start_position(start_position);
- message->set_end_position(end_position);
- message->set_script(*script);
- message->set_stack_frames(*stack_frames);
- return message;
+ Handle<JSMessageObject> message_obj = New<JSMessageObject>(map, NEW_SPACE);
+ message_obj->set_properties(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ message_obj->initialize_elements();
+ message_obj->set_elements(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ message_obj->set_type(message);
+ message_obj->set_argument(*argument);
+ message_obj->set_start_position(start_position);
+ message_obj->set_end_position(end_position);
+ message_obj->set_script(*script);
+ message_obj->set_stack_frames(*stack_frames);
+ return message_obj;
}
@@ -2398,6 +2335,7 @@ Handle<JSWeakMap> Factory::NewJSWeakMap() {
Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
int number_of_properties,
+ bool is_strong,
bool* is_result_from_cache) {
const int kMapCacheSize = 128;
@@ -2405,23 +2343,33 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
if (number_of_properties > kMapCacheSize ||
isolate()->bootstrapper()->IsActive()) {
*is_result_from_cache = false;
- return Map::Create(isolate(), number_of_properties);
+ Handle<Map> map = Map::Create(isolate(), number_of_properties);
+ if (is_strong) map->set_is_strong();
+ return map;
}
*is_result_from_cache = true;
if (number_of_properties == 0) {
// Reuse the initial map of the Object function if the literal has no
- // predeclared properties.
- return handle(context->object_function()->initial_map(), isolate());
+ // predeclared properties, or the strong map if strong.
+ return handle(is_strong
+ ? context->js_object_strong_map()
+ : context->object_function()->initial_map(), isolate());
}
+
int cache_index = number_of_properties - 1;
- if (context->map_cache()->IsUndefined()) {
+ Handle<Object> maybe_cache(is_strong ? context->strong_map_cache()
+ : context->map_cache(), isolate());
+ if (maybe_cache->IsUndefined()) {
// Allocate the new map cache for the native context.
- Handle<FixedArray> new_cache = NewFixedArray(kMapCacheSize, TENURED);
- context->set_map_cache(*new_cache);
- }
- // Check to see whether there is a matching element in the cache.
- Handle<FixedArray> cache(FixedArray::cast(context->map_cache()));
- {
+ maybe_cache = NewFixedArray(kMapCacheSize, TENURED);
+ if (is_strong) {
+ context->set_strong_map_cache(*maybe_cache);
+ } else {
+ context->set_map_cache(*maybe_cache);
+ }
+ } else {
+ // Check to see whether there is a matching element in the cache.
+ Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
Object* result = cache->get(cache_index);
if (result->IsWeakCell()) {
WeakCell* cell = WeakCell::cast(result);
@@ -2431,7 +2379,9 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
}
}
// Create a new map and add it to the cache.
+ Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
Handle<Map> map = Map::Create(isolate(), number_of_properties);
+ if (is_strong) map->set_is_strong();
Handle<WeakCell> cell = NewWeakCell(map);
cache->set(cache_index, *cell);
return map;
@@ -2487,4 +2437,5 @@ Handle<Object> Factory::ToBoolean(bool value) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 44aec28b5f..2de768bf13 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -46,13 +46,6 @@ class Factory final {
int size,
PretenureFlag pretenure = NOT_TENURED);
- Handle<ConstantPoolArray> NewConstantPoolArray(
- const ConstantPoolArray::NumberOfEntries& small);
-
- Handle<ConstantPoolArray> NewExtendedConstantPoolArray(
- const ConstantPoolArray::NumberOfEntries& small,
- const ConstantPoolArray::NumberOfEntries& extended);
-
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
@@ -232,8 +225,7 @@ class Factory final {
// Create a symbol.
Handle<Symbol> NewSymbol();
- Handle<Symbol> NewPrivateSymbol();
- Handle<Symbol> NewPrivateOwnSymbol();
+ Handle<Symbol> NewPrivateSymbol(Handle<Object> name);
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -298,8 +290,7 @@ class Factory final {
PretenureFlag pretenure = NOT_TENURED);
Handle<FixedTypedArrayBase> NewFixedTypedArray(
- int length,
- ExternalArrayType array_type,
+ int length, ExternalArrayType array_type, bool initialize,
PretenureFlag pretenure = NOT_TENURED);
Handle<Cell> NewCell(Handle<Object> value);
@@ -339,9 +330,6 @@ class Factory final {
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
- Handle<ConstantPoolArray> CopyConstantPoolArray(
- Handle<ConstantPoolArray> array);
-
// Numbers (e.g. literals) are pretenured by the parser.
// The return value may be a smi or a heap number.
Handle<Object> NewNumber(double value,
@@ -362,6 +350,8 @@ class Factory final {
Handle<HeapNumber> NewHeapNumber(double value,
MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<Float32x4> NewFloat32x4(float w, float x, float y, float z,
+ PretenureFlag pretenure = NOT_TENURED);
// These objects are used by the api to create env-independent data
// structures in the heap.
@@ -399,41 +389,42 @@ class Factory final {
// JS arrays are pretenured when allocated by the parser.
// Create a JSArray with no elements.
- Handle<JSArray> NewJSArray(
- ElementsKind elements_kind,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArray(ElementsKind elements_kind,
+ Strength strength = Strength::WEAK,
+ PretenureFlag pretenure = NOT_TENURED);
// Create a JSArray with a specified length and elements initialized
// according to the specified mode.
Handle<JSArray> NewJSArray(
ElementsKind elements_kind, int length, int capacity,
+ Strength strength = Strength::WEAK,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArray(
- int capacity,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ int capacity, ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ Strength strength = Strength::WEAK,
PretenureFlag pretenure = NOT_TENURED) {
if (capacity != 0) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
- return NewJSArray(elements_kind, 0, capacity,
+ return NewJSArray(elements_kind, 0, capacity, strength,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, pretenure);
}
// Create a JSArray with the given elements.
- Handle<JSArray> NewJSArrayWithElements(
- Handle<FixedArrayBase> elements,
- ElementsKind elements_kind,
- int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArrayWithElements(Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind, int length,
+ Strength strength = Strength::WEAK,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ Strength strength = Strength::WEAK,
PretenureFlag pretenure = NOT_TENURED) {
- return NewJSArrayWithElements(
- elements, elements_kind, elements->length(), pretenure);
+ return NewJSArrayWithElements(elements, elements_kind, elements->length(),
+ strength, pretenure);
}
void NewJSArrayStorage(
@@ -444,7 +435,8 @@ class Factory final {
Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
- Handle<JSArrayBuffer> NewJSArrayBuffer();
+ Handle<JSArrayBuffer> NewJSArrayBuffer(
+ SharedFlag shared = SharedFlag::kNotShared);
Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
@@ -463,6 +455,9 @@ class Factory final {
Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
size_t byte_offset, size_t byte_length);
+ Handle<JSMap> NewJSMap();
+ Handle<JSSet> NewJSSet();
+
// TODO(aandrey): Maybe these should take table, index and kind arguments.
Handle<JSMapIterator> NewJSMapIterator();
Handle<JSSetIterator> NewJSSetIterator();
@@ -540,35 +535,18 @@ class Factory final {
Handle<Object> NewError(const char* maker, const char* message,
Handle<JSArray> args);
Handle<String> EmergencyNewError(const char* message, Handle<JSArray> args);
- Handle<Object> NewError(const char* maker, const char* message,
- Vector<Handle<Object> > args);
- Handle<Object> NewError(const char* message, Vector<Handle<Object> > args);
- Handle<Object> NewError(Handle<String> message);
Handle<Object> NewError(const char* constructor, Handle<String> message);
- Handle<Object> NewTypeError(const char* message,
- Vector<Handle<Object> > args);
- Handle<Object> NewTypeError(Handle<String> message);
-
- Handle<Object> NewRangeError(const char* message,
- Vector<Handle<Object> > args);
- Handle<Object> NewRangeError(Handle<String> message);
-
Handle<Object> NewInvalidStringLengthError() {
return NewRangeError(MessageTemplate::kInvalidStringLength);
}
- Handle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
- Handle<Object> NewSyntaxError(Handle<String> message);
-
- Handle<Object> NewReferenceError(const char* message, Handle<JSArray> args);
- Handle<Object> NewReferenceError(Handle<String> message);
-
Handle<Object> NewError(const char* maker,
MessageTemplate::Template template_index,
- Handle<Object> arg0, Handle<Object> arg1,
- Handle<Object> arg2);
+ Handle<Object> arg0 = Handle<Object>(),
+ Handle<Object> arg1 = Handle<Object>(),
+ Handle<Object> arg2 = Handle<Object>());
Handle<Object> NewError(MessageTemplate::Template template_index,
Handle<Object> arg0 = Handle<Object>(),
@@ -580,6 +558,11 @@ class Factory final {
Handle<Object> arg1 = Handle<Object>(),
Handle<Object> arg2 = Handle<Object>());
+ Handle<Object> NewSyntaxError(MessageTemplate::Template template_index,
+ Handle<Object> arg0 = Handle<Object>(),
+ Handle<Object> arg1 = Handle<Object>(),
+ Handle<Object> arg2 = Handle<Object>());
+
Handle<Object> NewReferenceError(MessageTemplate::Template template_index,
Handle<Object> arg0 = Handle<Object>(),
Handle<Object> arg1 = Handle<Object>(),
@@ -669,13 +652,12 @@ class Factory final {
Handle<TypeFeedbackVector> NewTypeFeedbackVector(const Spec* spec);
// Allocates a new JSMessageObject object.
- Handle<JSMessageObject> NewJSMessageObject(
- Handle<String> type,
- Handle<JSArray> arguments,
- int start_position,
- int end_position,
- Handle<Object> script,
- Handle<Object> stack_frames);
+ Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
+ Handle<Object> argument,
+ int start_position,
+ int end_position,
+ Handle<Object> script,
+ Handle<Object> stack_frames);
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
@@ -683,6 +665,7 @@ class Factory final {
// native context.
Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
int number_of_properties,
+ bool is_strong,
bool* is_result_from_cache);
// Creates a new FixedArray that holds the data associated with the
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc
index d6edddce1d..ed7223ee44 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/fast-dtoa.cc
@@ -712,4 +712,5 @@ bool FastDtoa(double v,
return result;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/fixed-dtoa.cc b/deps/v8/src/fixed-dtoa.cc
index 7856b13108..a33ba1744f 100644
--- a/deps/v8/src/fixed-dtoa.cc
+++ b/deps/v8/src/fixed-dtoa.cc
@@ -382,4 +382,5 @@ bool FastFixedDtoa(double v,
return true;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 2b905e31d4..79611270dd 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -174,6 +174,7 @@ DEFINE_IMPLICATION(use_strong, use_strict)
DEFINE_BOOL(strong_mode, false, "experimental strong language mode")
DEFINE_IMPLICATION(use_strong, strong_mode)
+DEFINE_BOOL(strong_this, true, "don't allow 'this' to escape from constructors")
DEFINE_BOOL(es_staging, false, "enable all completed harmony features")
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
@@ -181,32 +182,36 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony fetaures")
DEFINE_IMPLICATION(harmony, es_staging)
DEFINE_IMPLICATION(es_staging, harmony)
+DEFINE_BOOL(legacy_const, true, "legacy semantics for const in sloppy mode")
+
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS(V) \
V(harmony_modules, "harmony modules") \
- V(harmony_arrays, "harmony array methods") \
V(harmony_array_includes, "harmony Array.prototype.includes") \
V(harmony_regexps, "harmony regular expression extensions") \
- V(harmony_arrow_functions, "harmony arrow functions") \
V(harmony_proxies, "harmony proxies") \
V(harmony_sloppy, "harmony features in sloppy mode") \
V(harmony_unicode_regexps, "harmony unicode regexps") \
V(harmony_reflect, "harmony Reflect API") \
V(harmony_destructuring, "harmony destructuring") \
- V(harmony_object, "harmony Object methods")
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_atomics, "harmony atomics") \
+ V(harmony_new_target, "harmony new.target")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_rest_parameters, "harmony rest parameters") \
- V(harmony_spreadcalls, "harmony spread-calls") \
- V(harmony_tostring, "harmony toString")
+#define HARMONY_STAGED(V) \
+ V(harmony_tostring, "harmony toString") \
+ V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
+ V(harmony_rest_parameters, "harmony rest parameters") \
+ V(harmony_spreadcalls, "harmony spread-calls") \
+ V(harmony_spread_arrays, "harmony spread in array literals")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_classes, "harmony classes (implies object literal extension)") \
- V(harmony_computed_property_names, "harmony computed property names") \
- V(harmony_object_literals, "harmony object literal extensions") \
- V(harmony_unicode, "harmony unicode escapes") \
+#define HARMONY_SHIPPING(V) \
+ V(harmony_arrow_functions, "harmony arrow functions") \
+ V(harmony_computed_property_names, "harmony computed property names") \
+ V(harmony_unicode, "harmony unicode escapes") \
+ V(harmony_object, "harmony Object methods")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -233,7 +238,6 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
// Feature dependencies.
-DEFINE_IMPLICATION(harmony_classes, harmony_object_literals)
DEFINE_IMPLICATION(harmony_unicode_regexps, harmony_unicode)
@@ -260,7 +264,6 @@ DEFINE_BOOL(track_field_types, true, "track field types")
DEFINE_IMPLICATION(track_field_types, track_fields)
DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
-DEFINE_BOOL(vector_ics, true, "support vector-based ics")
// Flags for optimization types.
DEFINE_BOOL(optimize_for_size, false,
@@ -383,9 +386,9 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
// Flags for TurboFan.
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
+DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
-DEFINE_IMPLICATION(turbo, turbo_deoptimization)
-DEFINE_IMPLICATION(turbo, turbo_type_feedback)
+DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_STRING(turbo_filter, "~~", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_BOOL(trace_turbo_graph, false, "trace generated TurboFan graphs")
@@ -398,6 +401,8 @@ DEFINE_BOOL(trace_turbo_reduction, false, "trace TurboFan's various reducers")
DEFINE_BOOL(trace_turbo_jt, false, "trace TurboFan's jump threading")
DEFINE_BOOL(trace_turbo_ceq, false, "trace TurboFan's control equivalence")
DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
+DEFINE_BOOL(turbo_asm_deoptimization, false,
+ "enable deoptimization in TurboFan for asm.js code")
DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
@@ -409,9 +414,7 @@ DEFINE_BOOL(turbo_source_positions, false,
DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
DEFINE_BOOL(context_specialization, false,
"enable context specialization in TurboFan")
-DEFINE_BOOL(turbo_deoptimization, false, "enable deoptimization in TurboFan")
DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
-DEFINE_BOOL(turbo_builtin_inlining, true, "enable builtin inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
@@ -420,11 +423,13 @@ DEFINE_BOOL(turbo_verify_allocation, DEBUG_BOOL,
DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
DEFINE_BOOL(turbo_osr, true, "enable OSR in TurboFan")
-DEFINE_BOOL(turbo_exceptions, false, "enable exception handling in TurboFan")
+DEFINE_BOOL(turbo_try_catch, true, "enable try-catch support in TurboFan")
+DEFINE_BOOL(turbo_try_finally, false, "enable try-finally support in TurboFan")
DEFINE_BOOL(turbo_stress_loop_peeling, false,
"stress loop peeling optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
+DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
@@ -608,16 +613,13 @@ DEFINE_INT(trace_allocation_stack_interval, -1,
DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
DEFINE_BOOL(trace_fragmentation_verbose, false,
"report fragmentation for old space (detailed)")
-DEFINE_BOOL(collect_maps, true,
- "garbage collect maps from which no objects can be reached")
+DEFINE_BOOL(trace_mutator_utilization, false,
+ "print mutator utilization, allocation speed, gc speed")
DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true,
"make maps embedded in optimized code weak")
DEFINE_BOOL(weak_embedded_objects_in_optimized_code, true,
"make objects embedded in optimized code weak")
-DEFINE_BOOL(flush_code, true,
- "flush code that we expect not to use again (during full gc)")
-DEFINE_BOOL(flush_code_incrementally, true,
- "flush code that we expect not to use again (incrementally)")
+DEFINE_BOOL(flush_code, true, "flush code that we expect not to use again")
DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress")
DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
@@ -664,6 +666,7 @@ DEFINE_BOOL(use_idle_notification, true,
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
+DEFINE_BOOL(vector_stores, false, "use vectors for store ics")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -673,17 +676,14 @@ DEFINE_BOOL(native_code_counters, false,
DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only")
-DEFINE_BOOL(compact_code_space, true,
- "Compact code space on full non-incremental collections")
-DEFINE_BOOL(incremental_code_compaction, true,
- "Compact code space on full incremental collections")
+DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
DEFINE_BOOL(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
DEFINE_BOOL(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
-DEFINE_BOOL(zap_code_space, true,
+DEFINE_BOOL(zap_code_space, DEBUG_BOOL,
"Zap free memory in code space with 0xCC while sweeping.")
DEFINE_INT(random_seed, 0,
"Default seed for initializing random generator "
@@ -723,7 +723,7 @@ DEFINE_INT(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
#endif
DEFINE_INT(sim_stack_size, 2 * MB / KB,
- "Stack size of the ARM64 and MIPS64 simulator "
+ "Stack size of the ARM64, MIPS64 and PPC64 simulator "
"in kBytes (default is 2 MB)")
DEFINE_BOOL(log_regs_modified, true,
"When logging register values, only print modified registers.")
@@ -929,9 +929,6 @@ DEFINE_BOOL(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_BOOL(perf_basic_prof, false,
"Enable perf linux profiler (basic support).")
DEFINE_NEG_IMPLICATION(perf_basic_prof, compact_code_space)
-DEFINE_BOOL(perf_jit_prof, false,
- "Enable perf linux profiler (experimental annotate support).")
-DEFINE_NEG_IMPLICATION(perf_jit_prof, compact_code_space)
DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
@@ -1037,8 +1034,8 @@ DEFINE_INT(dump_allocations_digest_at_alloc, 0,
#define FLAG FLAG_READONLY
// assembler.h
-DEFINE_BOOL(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
- "enable use of out-of-line constant pools (ARM only)")
+DEFINE_BOOL(enable_embedded_constant_pool, V8_EMBEDDED_CONSTANT_POOL,
+ "enable use of embedded constant pools (ARM/PPC only)")
DEFINE_BOOL(unbox_double_fields, V8_DOUBLE_FIELDS_UNBOXING,
"enable in-object double fields unboxing (64-bit only)")
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 88f06a301c..6f28ebb037 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -574,4 +574,5 @@ void FlagList::EnforceFlagImplications() {
uint32_t FlagList::Hash() { return flag_hash; }
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index f52b3ce4e4..723db4ae13 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -321,9 +321,6 @@ bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
if (!IsValidStackAddress(sp)) return false;
StackFrame::State state;
ExitFrame::FillState(fp, sp, &state);
- if (!IsValidStackAddress(reinterpret_cast<Address>(state.pc_address))) {
- return false;
- }
return *state.pc_address != NULL;
}
@@ -385,9 +382,8 @@ static bool GcSafeCodeContains(HeapObject* object, Address addr);
#endif
-void StackFrame::IteratePc(ObjectVisitor* v,
- Address* pc_address,
- Code* holder) {
+void StackFrame::IteratePc(ObjectVisitor* v, Address* pc_address,
+ Address* constant_pool_address, Code* holder) {
Address pc = *pc_address;
DCHECK(GcSafeCodeContains(holder, pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
@@ -397,6 +393,9 @@ void StackFrame::IteratePc(ObjectVisitor* v,
holder = reinterpret_cast<Code*>(code);
pc = holder->instruction_start() + pc_offset;
*pc_address = pc;
+ if (FLAG_enable_embedded_constant_pool && constant_pool_address) {
+ *constant_pool_address = holder->constant_pool();
+ }
}
}
@@ -425,10 +424,27 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// into the heap to determine the state. This is safe as long
// as nobody tries to GC...
if (!iterator->can_access_heap_objects_) return JAVA_SCRIPT;
- Code::Kind kind = GetContainingCode(iterator->isolate(),
- *(state->pc_address))->kind();
- DCHECK(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
- return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
+ Code* code_obj =
+ GetContainingCode(iterator->isolate(), *(state->pc_address));
+ switch (code_obj->kind()) {
+ case Code::FUNCTION:
+ return JAVA_SCRIPT;
+
+ case Code::HANDLER:
+#ifdef DEBUG
+ if (!code_obj->is_hydrogen_stub()) {
+ // There's currently no support for non-hydrogen stub handlers. If
+ // you this, you'll have to implement it yourself.
+ UNREACHABLE();
+ }
+#endif
+ case Code::OPTIMIZED_FUNCTION:
+ return OPTIMIZED;
+
+ default:
+ UNREACHABLE();
+ return JAVA_SCRIPT;
+ }
}
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
@@ -506,7 +522,7 @@ void ExitFrame::ComputeCallerState(State* state) const {
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
state->constant_pool_address = reinterpret_cast<Address*>(
fp() + ExitFrameConstants::kConstantPoolOffset);
}
@@ -521,11 +537,8 @@ void ExitFrame::SetCallerFp(Address caller_fp) {
void ExitFrame::Iterate(ObjectVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
- IteratePc(v, pc_address(), LookupCode());
+ IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
v->VisitPointer(&code_slot());
- if (FLAG_enable_ool_constant_pool) {
- v->VisitPointer(&constant_pool_slot());
- }
}
@@ -553,8 +566,11 @@ void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->fp = fp;
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(sp - 1 * kPCOnStackSize));
- state->constant_pool_address =
- reinterpret_cast<Address*>(fp + ExitFrameConstants::kConstantPoolOffset);
+ // The constant pool recorded in the exit frame is not associated
+ // with the pc in this state (the return address into a C entry
+ // stub). ComputeCallerState will retrieve the constant pool
+ // together with the associated caller pc.
+ state->constant_pool_address = NULL;
}
@@ -663,7 +679,7 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
}
// Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), code);
+ IteratePc(v, pc_address(), constant_pool_address(), code);
// Visit the context in stub frame and JavaScript frame.
// Visit the function in JavaScript frame.
@@ -714,9 +730,24 @@ bool JavaScriptFrame::IsConstructor() const {
}
+Object* JavaScriptFrame::GetOriginalConstructor() const {
+ Address fp = caller_fp();
+ if (has_adapted_arguments()) {
+ // Skip the arguments adaptor frame and look at the real caller.
+ fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
+ }
+ DCHECK(IsConstructFrame(fp));
+ STATIC_ASSERT(ConstructFrameConstants::kOriginalConstructorOffset ==
+ StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize);
+ return GetExpression(fp, 2);
+}
+
+
int JavaScriptFrame::GetArgumentsLength() const {
// If there is an arguments adaptor frame get the arguments length from it.
if (has_adapted_arguments()) {
+ STATIC_ASSERT(ArgumentsAdaptorFrameConstants::kLengthOffset ==
+ StandardFrameConstants::kExpressionsOffset);
return Smi::cast(GetExpression(caller_fp(), 0))->value();
} else {
return GetNumberOfIncomingArguments();
@@ -761,12 +792,13 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
}
-int JavaScriptFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+int JavaScriptFrame::LookupExceptionHandlerInTable(
+ int* stack_slots, HandlerTable::CatchPrediction* prediction) {
Code* code = LookupCode();
DCHECK(!code->is_optimized_code());
HandlerTable* table = HandlerTable::cast(code->handler_table());
int pc_offset = static_cast<int>(pc() - code->entry());
- return table->LookupRange(pc_offset, stack_slots);
+ return table->LookupRange(pc_offset, stack_slots, prediction);
}
@@ -863,85 +895,72 @@ void FrameSummary::Print() {
}
-JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
- int literal_id) {
- if (literal_id == Translation::kSelfLiteralId) {
- return function();
- }
-
- return JSFunction::cast(literal_array->get(literal_id));
-}
-
-
void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
DCHECK(frames->length() == 0);
DCHECK(is_optimized());
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
- if (LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+ if (LookupCode()->is_turbofanned() && function()->shared()->asm_function() &&
+ !FLAG_turbo_asm_deoptimization) {
return JavaScriptFrame::Summarize(frames);
}
+ DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
- FixedArray* literal_array = data->LiteralArray();
-
- // BUG(3243555): Since we don't have a lazy-deopt registered at
- // throw-statements, we can't use the translation at the call-site of
- // throw. An entry with no deoptimization index indicates a call-site
- // without a lazy-deopt. As a consequence we are not allowed to inline
- // functions containing throw.
- DCHECK(deopt_index != Safepoint::kNoDeoptimizationIndex);
+ DeoptimizationInputData* const data = GetDeoptimizationData(&deopt_index);
+ FixedArray* const literal_array = data->LiteralArray();
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK(opcode == Translation::BEGIN);
+ DCHECK_EQ(Translation::BEGIN, opcode);
it.Next(); // Drop frame count.
int jsframe_count = it.Next();
// We create the summary in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
bool is_constructor = IsConstructor();
- int i = jsframe_count;
- while (i > 0) {
+ while (jsframe_count != 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::JS_FRAME) {
- i--;
- BailoutId ast_id = BailoutId(it.Next());
- JSFunction* function = LiteralAt(literal_array, it.Next());
+ jsframe_count--;
+ BailoutId const ast_id = BailoutId(it.Next());
+ SharedFunctionInfo* const shared_info =
+ SharedFunctionInfo::cast(literal_array->get(it.Next()));
it.Next(); // Skip height.
- // The translation commands are ordered and the receiver is always
- // at the first position.
+ // The translation commands are ordered and the function is always
+ // at the first position, and the receiver is next.
+ opcode = static_cast<Translation::Opcode>(it.Next());
+
+ // Get the correct function in the optimized frame.
+ JSFunction* function;
+ if (opcode == Translation::LITERAL) {
+ function = JSFunction::cast(literal_array->get(it.Next()));
+ } else if (opcode == Translation::STACK_SLOT) {
+ function = JSFunction::cast(StackSlotAt(it.Next()));
+ } else {
+ CHECK_EQ(Translation::JS_FRAME_FUNCTION, opcode);
+ function = this->function();
+ }
+ DCHECK_EQ(shared_info, function->shared());
+
// If we are at a call, the receiver is always in a stack slot.
// Otherwise we are not guaranteed to get the receiver value.
opcode = static_cast<Translation::Opcode>(it.Next());
- int index = it.Next();
// Get the correct receiver in the optimized frame.
- Object* receiver = NULL;
+ Object* receiver;
if (opcode == Translation::LITERAL) {
- receiver = data->LiteralArray()->get(index);
+ receiver = literal_array->get(it.Next());
} else if (opcode == Translation::STACK_SLOT) {
- // Positive index means the value is spilled to the locals
- // area. Negative means it is stored in the incoming parameter
- // area.
- if (index >= 0) {
- receiver = GetExpression(index);
- } else {
- // Index -1 overlaps with last parameter, -n with the first parameter,
- // (-n - 1) with the receiver with n being the number of parameters
- // of the outermost, optimized frame.
- int parameter_count = ComputeParametersCount();
- int parameter_index = index + parameter_count;
- receiver = (parameter_index == -1)
- ? this->receiver()
- : this->GetParameter(parameter_index);
- }
+ receiver = StackSlotAt(it.Next());
+ } else if (opcode == Translation::JS_FRAME_FUNCTION) {
+ receiver = this->function();
} else {
// The receiver is not in a stack slot nor in a literal. We give up.
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
// TODO(3029): Materializing a captured object (or duplicated
// object) is hard, we return undefined for now. This breaks the
// produced stack trace, as constructor frames aren't marked as
@@ -949,15 +968,14 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
receiver = isolate()->heap()->undefined_value();
}
- Code* code = function->shared()->code();
- DeoptimizationOutputData* output_data =
+ Code* const code = shared_info->code();
+ DeoptimizationOutputData* const output_data =
DeoptimizationOutputData::cast(code->deoptimization_data());
- unsigned entry = Deoptimizer::GetOutputInfo(output_data,
- ast_id,
- function->shared());
- unsigned pc_offset =
+ unsigned const entry =
+ Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
+ unsigned const pc_offset =
FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
- DCHECK(pc_offset > 0);
+ DCHECK_NE(0U, pc_offset);
FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
frames->Add(summary);
@@ -976,13 +994,14 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
}
-int OptimizedFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+int OptimizedFrame::LookupExceptionHandlerInTable(
+ int* stack_slots, HandlerTable::CatchPrediction* prediction) {
Code* code = LookupCode();
DCHECK(code->is_optimized_code());
HandlerTable* table = HandlerTable::cast(code->handler_table());
int pc_offset = static_cast<int>(pc() - code->entry());
*stack_slots = code->stack_slots();
- return table->LookupReturn(pc_offset);
+ return table->LookupReturn(pc_offset, prediction);
}
@@ -1011,68 +1030,73 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
}
-int OptimizedFrame::GetInlineCount() {
- DCHECK(is_optimized());
-
- // Delegate to JS frame in absence of turbofan deoptimization.
- // TODO(turbofan): Revisit once we support deoptimization across the board.
- if (LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
- return JavaScriptFrame::GetInlineCount();
- }
-
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
-
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK(opcode == Translation::BEGIN);
- USE(opcode);
- it.Next(); // Drop frame count.
- int jsframe_count = it.Next();
- return jsframe_count;
-}
-
-
void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
DCHECK(functions->length() == 0);
DCHECK(is_optimized());
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
- if (LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+ if (LookupCode()->is_turbofanned() && function()->shared()->asm_function() &&
+ !FLAG_turbo_asm_deoptimization) {
return JavaScriptFrame::GetFunctions(functions);
}
+ DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
- DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
- FixedArray* literal_array = data->LiteralArray();
+ DeoptimizationInputData* const data = GetDeoptimizationData(&deopt_index);
+ FixedArray* const literal_array = data->LiteralArray();
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK(opcode == Translation::BEGIN);
- it.Next(); // Drop frame count.
+ DCHECK_EQ(Translation::BEGIN, opcode);
+ it.Next(); // Skip frame count.
int jsframe_count = it.Next();
// We insert the frames in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
- while (jsframe_count > 0) {
+ while (jsframe_count != 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
+ // Skip over operands to advance to the next opcode.
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
if (opcode == Translation::JS_FRAME) {
jsframe_count--;
- it.Next(); // Skip ast id.
- JSFunction* function = LiteralAt(literal_array, it.Next());
- it.Next(); // Skip height.
- functions->Add(function);
- } else {
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
+
+ // The translation commands are ordered and the function is always at the
+ // first position.
+ opcode = static_cast<Translation::Opcode>(it.Next());
+
+ // Get the correct function in the optimized frame.
+ Object* function;
+ if (opcode == Translation::LITERAL) {
+ function = literal_array->get(it.Next());
+ } else if (opcode == Translation::STACK_SLOT) {
+ function = StackSlotAt(it.Next());
+ } else {
+ CHECK_EQ(Translation::JS_FRAME_FUNCTION, opcode);
+ function = this->function();
+ }
+ functions->Add(JSFunction::cast(function));
}
}
}
+Object* OptimizedFrame::StackSlotAt(int index) const {
+ // Positive index means the value is spilled to the locals
+ // area. Negative means it is stored in the incoming parameter
+ // area.
+ if (index >= 0) return GetExpression(index);
+
+ // Index -1 overlaps with last parameter, -n with the first parameter,
+ // (-n - 1) with the receiver with n being the number of parameters
+ // of the outermost, optimized frame.
+ int const parameter_count = ComputeParametersCount();
+ int const parameter_index = index + parameter_count;
+ return (parameter_index == -1) ? receiver() : GetParameter(parameter_index);
+}
+
+
int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
return Smi::cast(GetExpression(0))->value();
}
@@ -1111,6 +1135,24 @@ void StackFrame::PrintIndex(StringStream* accumulator,
}
+namespace {
+
+
+void PrintFunctionSource(StringStream* accumulator, SharedFunctionInfo* shared,
+ Code* code) {
+ if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
+ std::ostringstream os;
+ os << "--------- s o u r c e c o d e ---------\n"
+ << SourceCodeOf(shared, FLAG_max_stack_trace_source_length)
+ << "\n-----------------------------------------\n";
+ accumulator->Add(os.str().c_str());
+ }
+}
+
+
+} // namespace
+
+
void JavaScriptFrame::Print(StringStream* accumulator,
PrintMode mode,
int index) const {
@@ -1148,7 +1190,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add(":~%d", line);
}
- accumulator->Add("] ");
+ accumulator->Add("] [pc=%p] ", pc);
}
accumulator->Add("(this=%o", receiver);
@@ -1173,7 +1215,9 @@ void JavaScriptFrame::Print(StringStream* accumulator,
return;
}
if (is_optimized()) {
- accumulator->Add(" {\n// optimized frame\n}\n");
+ accumulator->Add(" {\n// optimized frame\n");
+ PrintFunctionSource(accumulator, shared, code);
+ accumulator->Add("}\n");
return;
}
accumulator->Add(" {\n");
@@ -1240,15 +1284,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add(" [%02d] : %o\n", i, GetExpression(i));
}
- // Print details about the function.
- if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
- std::ostringstream os;
- SharedFunctionInfo* shared = function->shared();
- os << "--------- s o u r c e c o d e ---------\n"
- << SourceCodeOf(shared, FLAG_max_stack_trace_source_length)
- << "\n-----------------------------------------\n";
- accumulator->Add(os.str().c_str());
- }
+ PrintFunctionSource(accumulator, shared, code);
accumulator->Add("}\n\n");
}
@@ -1285,7 +1321,7 @@ void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
void EntryFrame::Iterate(ObjectVisitor* v) const {
- IteratePc(v, pc_address(), LookupCode());
+ IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
@@ -1299,7 +1335,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v);
- IteratePc(v, pc_address(), LookupCode());
+ IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
@@ -1307,7 +1343,7 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
// Internal frames only have object pointers on the expression stack
// as they never have any arguments.
IterateExpressions(v);
- IteratePc(v, pc_address(), LookupCode());
+ IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
@@ -1320,7 +1356,7 @@ void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
const int offset = StandardFrameConstants::kLastObjectOffset;
limit = &Memory::Object_at(fp() + offset) + 1;
v->VisitPointers(base, limit);
- IteratePc(v, pc_address(), LookupCode());
+ IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
@@ -1458,7 +1494,7 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
// -------------------------------------------------------------------------
-int NumRegs(RegList reglist) { return base::bits::CountPopulation32(reglist); }
+int NumRegs(RegList reglist) { return base::bits::CountPopulation(reglist); }
struct JSCallerSavedCodeData {
@@ -1520,4 +1556,5 @@ Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 397c7b5db9..910dc18cfb 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -114,25 +114,57 @@ class StackHandler BASE_EMBEDDED {
class StandardFrameConstants : public AllStatic {
public:
// Fixed part of the frame consists of return address, caller fp,
- // constant pool (if FLAG_enable_ool_constant_pool), context, and function.
- // StandardFrame::IterateExpressions assumes that kLastObjectOffset is the
- // last object pointer.
+ // constant pool (if FLAG_enable_embedded_constant_pool), context, and
+ // function. StandardFrame::IterateExpressions assumes that kLastObjectOffset
+ // is the last object pointer.
static const int kCPSlotSize =
- FLAG_enable_ool_constant_pool ? kPointerSize : 0;
+ FLAG_enable_embedded_constant_pool ? kPointerSize : 0;
static const int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
- static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
- kFixedFrameSizeFromFp;
- static const int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
- static const int kMarkerOffset = -2 * kPointerSize - kCPSlotSize;
- static const int kContextOffset = -1 * kPointerSize - kCPSlotSize;
- static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
- -1 * kPointerSize : 0;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kFPOnStackSize;
- static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
-
- static const int kLastObjectOffset = FLAG_enable_ool_constant_pool ?
- kConstantPoolOffset : kContextOffset;
+ static const int kFixedFrameSize =
+ kPCOnStackSize + kFPOnStackSize + kFixedFrameSizeFromFp;
+ static const int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
+ static const int kMarkerOffset = -2 * kPointerSize - kCPSlotSize;
+ static const int kContextOffset = -1 * kPointerSize - kCPSlotSize;
+ static const int kConstantPoolOffset = kCPSlotSize ? -1 * kPointerSize : 0;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kFPOnStackSize;
+ static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+
+ static const int kLastObjectOffset = kContextOffset;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset =
+ StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize;
+ static const int kOriginalConstructorOffset =
+ StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize;
+ static const int kLengthOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ static const int kCodeOffset =
+ StandardFrameConstants::kExpressionsOffset - 0 * kPointerSize;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
};
@@ -215,8 +247,8 @@ class StackFrame BASE_EMBEDDED {
void set_pc(Address pc) { *pc_address() = pc; }
Address constant_pool() const { return *constant_pool_address(); }
- void set_constant_pool(ConstantPoolArray* constant_pool) {
- *constant_pool_address() = reinterpret_cast<Address>(constant_pool);
+ void set_constant_pool(Address constant_pool) {
+ *constant_pool_address() = constant_pool;
}
virtual void SetCallerFp(Address caller_fp) = 0;
@@ -258,7 +290,8 @@ class StackFrame BASE_EMBEDDED {
unsigned* stack_slots);
virtual void Iterate(ObjectVisitor* v) const = 0;
- static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
+ static void IteratePc(ObjectVisitor* v, Address* pc_address,
+ Address* constant_pool_address, Code* holder);
// Sets a callback function for return-address rewriting profilers
// to resolve the location of a return address to the location of the
@@ -380,7 +413,6 @@ class ExitFrame: public StackFrame {
virtual Code* unchecked_code() const;
Object*& code_slot() const;
- Object*& constant_pool_slot() const;
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@@ -535,6 +567,10 @@ class JavaScriptFrame: public StandardFrame {
// Check if this frame is a constructor frame invoked through 'new'.
bool IsConstructor() const;
+ // Returns the original constructor function that was used in the constructor
+ // call to this frame. Note that this is only valid on constructor frames.
+ Object* GetOriginalConstructor() const;
+
// Check if this frame has "adapted" arguments in the sense that the
// actual passed arguments are available in an arguments adaptor
// frame below it on the stack.
@@ -552,9 +588,6 @@ class JavaScriptFrame: public StandardFrame {
// Determine the code for the frame.
virtual Code* unchecked_code() const;
- // Returns the levels of inlining for this frame.
- virtual int GetInlineCount() { return 1; }
-
// Return a list with JSFunctions of this frame.
virtual void GetFunctions(List<JSFunction*>* functions);
@@ -563,7 +596,8 @@ class JavaScriptFrame: public StandardFrame {
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns the expected number of stack slots at the handler site.
- virtual int LookupExceptionHandlerInTable(int* stack_slots);
+ virtual int LookupExceptionHandlerInTable(
+ int* stack_slots, HandlerTable::CatchPrediction* prediction);
// Architecture-specific register description.
static Register fp_register();
@@ -628,8 +662,6 @@ class OptimizedFrame : public JavaScriptFrame {
// GC support.
virtual void Iterate(ObjectVisitor* v) const;
- virtual int GetInlineCount();
-
// Return a list with JSFunctions of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
// is the top-most activation)
@@ -639,7 +671,8 @@ class OptimizedFrame : public JavaScriptFrame {
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns the expected number of stack slots at the handler site.
- virtual int LookupExceptionHandlerInTable(int* stack_slots);
+ virtual int LookupExceptionHandlerInTable(
+ int* stack_slots, HandlerTable::CatchPrediction* prediction);
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
@@ -647,9 +680,9 @@ class OptimizedFrame : public JavaScriptFrame {
inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
private:
- JSFunction* LiteralAt(FixedArray* literal_array, int literal_id);
-
friend class StackFrameIteratorBase;
+
+ Object* StackSlotAt(int index) const;
};
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 0dac7d2e0b..8ca40ccacd 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -21,262 +21,6 @@
namespace v8 {
namespace internal {
-void BreakableStatementChecker::Check(Statement* stmt) {
- Visit(stmt);
-}
-
-
-void BreakableStatementChecker::Check(Expression* expr) {
- Visit(expr);
-}
-
-
-void BreakableStatementChecker::VisitVariableDeclaration(
- VariableDeclaration* decl) {
-}
-
-
-void BreakableStatementChecker::VisitFunctionDeclaration(
- FunctionDeclaration* decl) {
-}
-
-
-void BreakableStatementChecker::VisitImportDeclaration(
- ImportDeclaration* decl) {
-}
-
-
-void BreakableStatementChecker::VisitExportDeclaration(
- ExportDeclaration* decl) {
-}
-
-
-void BreakableStatementChecker::VisitBlock(Block* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- // Check if expression is breakable.
- Visit(stmt->expression());
-}
-
-
-void BreakableStatementChecker::VisitEmptyStatement(EmptyStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitIfStatement(IfStatement* stmt) {
- // If the condition is breakable the if statement is breakable.
- Visit(stmt->condition());
-}
-
-
-void BreakableStatementChecker::VisitContinueStatement(
- ContinueStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitBreakStatement(BreakStatement* stmt) {
-}
-
-
-void BreakableStatementChecker::VisitReturnStatement(ReturnStatement* stmt) {
- // Return is breakable if the expression is.
- Visit(stmt->expression());
-}
-
-
-void BreakableStatementChecker::VisitWithStatement(WithStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void BreakableStatementChecker::VisitSwitchStatement(SwitchStatement* stmt) {
- // Switch statements breakable if the tag expression is.
- Visit(stmt->tag());
-}
-
-
-void BreakableStatementChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
- // Mark do while as breakable to avoid adding a break slot in front of it.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitWhileStatement(WhileStatement* stmt) {
- // Mark while statements breakable if the condition expression is.
- Visit(stmt->cond());
-}
-
-
-void BreakableStatementChecker::VisitForStatement(ForStatement* stmt) {
- // We set positions for both init and condition, if they exist.
- if (stmt->cond() != NULL || stmt->init() != NULL) is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitForInStatement(ForInStatement* stmt) {
- // For-in is breakable because we set the position for the enumerable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitForOfStatement(ForOfStatement* stmt) {
- // For-of is breakable because we set the position for the next() call.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitTryCatchStatement(
- TryCatchStatement* stmt) {
- // Mark try catch as breakable to avoid adding a break slot in front of it.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- // Mark try finally as breakable to avoid adding a break slot in front of it.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- // The debugger statement is breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCaseClause(CaseClause* clause) {
-}
-
-
-void BreakableStatementChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitClassLiteral(ClassLiteral* expr) {
- if (expr->extends() != NULL) {
- Visit(expr->extends());
- }
-}
-
-
-void BreakableStatementChecker::VisitNativeFunctionLiteral(
- NativeFunctionLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitConditional(Conditional* expr) {
-}
-
-
-void BreakableStatementChecker::VisitVariableProxy(VariableProxy* expr) {
-}
-
-
-void BreakableStatementChecker::VisitLiteral(Literal* expr) {
-}
-
-
-void BreakableStatementChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitObjectLiteral(ObjectLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitArrayLiteral(ArrayLiteral* expr) {
-}
-
-
-void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
- // If assigning to a property (including a global property) the assignment is
- // breakable.
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL || (proxy != NULL && proxy->var()->IsUnallocated())) {
- is_breakable_ = true;
- return;
- }
-
- // Otherwise the assignment is breakable if the assigned value is.
- Visit(expr->value());
-}
-
-
-void BreakableStatementChecker::VisitYield(Yield* expr) {
- // Yield is breakable if the expression is.
- Visit(expr->expression());
-}
-
-
-void BreakableStatementChecker::VisitThrow(Throw* expr) {
- // Throw is breakable if the expression is.
- Visit(expr->exception());
-}
-
-
-void BreakableStatementChecker::VisitProperty(Property* expr) {
- // Property load is breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCall(Call* expr) {
- // Function calls both through IC and call stub are breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCallNew(CallNew* expr) {
- // Function calls through new are breakable.
- is_breakable_ = true;
-}
-
-
-void BreakableStatementChecker::VisitCallRuntime(CallRuntime* expr) {
-}
-
-
-void BreakableStatementChecker::VisitUnaryOperation(UnaryOperation* expr) {
- Visit(expr->expression());
-}
-
-
-void BreakableStatementChecker::VisitCountOperation(CountOperation* expr) {
- Visit(expr->expression());
-}
-
-
-void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
- if (expr->op() != Token::AND &&
- expr->op() != Token::OR) {
- Visit(expr->right());
- }
-}
-
-
-void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
- Visit(expr->left());
- Visit(expr->right());
-}
-
-
-void BreakableStatementChecker::VisitSpread(Spread* expr) { UNREACHABLE(); }
-
-
-void BreakableStatementChecker::VisitThisFunction(ThisFunction* expr) {
-}
-
-
-void BreakableStatementChecker::VisitSuperReference(SuperReference* expr) {}
-
-
#define __ ACCESS_MASM(masm())
bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
@@ -310,14 +54,11 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
- code->set_optimizable(info->IsOptimizable() &&
- !info->function()->dont_optimize() &&
- info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
+ cgen.PopulateHandlerTable(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_has_reloc_info_for_serialization(info->will_serialize());
- code->set_handler_table(*cgen.handler_table());
code->set_compiled_optimizable(info->IsOptimizable());
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
@@ -397,6 +138,32 @@ void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
}
+void FullCodeGenerator::PopulateHandlerTable(Handle<Code> code) {
+ int handler_table_size = static_cast<int>(handler_table_.size());
+ Handle<HandlerTable> table =
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(handler_table_size), TENURED));
+ for (int i = 0; i < handler_table_size; ++i) {
+ HandlerTable::CatchPrediction prediction =
+ handler_table_[i].try_catch_depth > 0 ? HandlerTable::CAUGHT
+ : HandlerTable::UNCAUGHT;
+ table->SetRangeStart(i, handler_table_[i].range_start);
+ table->SetRangeEnd(i, handler_table_[i].range_end);
+ table->SetRangeHandler(i, handler_table_[i].handler_offset, prediction);
+ table->SetRangeDepth(i, handler_table_[i].stack_depth);
+ }
+ code->set_handler_table(*table);
+}
+
+
+int FullCodeGenerator::NewHandlerTableEntry() {
+ int index = static_cast<int>(handler_table_.size());
+ HandlerTableEntry entry = {0, 0, 0, 0, 0};
+ handler_table_.push_back(entry);
+ return index;
+}
+
+
bool FullCodeGenerator::MustCreateObjectLiteralWithRuntime(
ObjectLiteral* expr) const {
int literal_flags = expr->ComputeFlags();
@@ -413,7 +180,8 @@ bool FullCodeGenerator::MustCreateObjectLiteralWithRuntime(
bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
ArrayLiteral* expr) const {
- return expr->depth() > 1 ||
+ // TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
+ return expr->depth() > 1 || expr->is_strong() ||
expr->values()->length() > JSObject::kInitialMaxFastElementArray;
}
@@ -438,23 +206,14 @@ void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
+ LanguageMode language_mode,
TypeFeedbackId id) {
- Handle<Code> ic = CodeFactory::LoadIC(isolate(), contextual_mode).code();
+ Handle<Code> ic =
+ CodeFactory::LoadIC(isolate(), contextual_mode, language_mode).code();
CallIC(ic, id);
}
-void FullCodeGenerator::CallGlobalLoadIC(Handle<String> name) {
- if (masm()->serializer_enabled() || FLAG_vector_ics) {
- // Vector-ICs don't work with LoadGlobalIC.
- return CallLoadIC(CONTEXTUAL);
- }
- Handle<Code> ic = CodeFactory::LoadGlobalIC(
- isolate(), isolate()->global_object(), name).code();
- CallIC(ic, TypeFeedbackId::None());
-}
-
-
void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
Handle<Code> ic = CodeFactory::StoreIC(isolate(), language_mode()).code();
CallIC(ic, id);
@@ -643,73 +402,67 @@ int FullCodeGenerator::DeclareGlobalsFlags() {
}
+bool RecordStatementPosition(MacroAssembler* masm, int pos) {
+ if (pos == RelocInfo::kNoPosition) return false;
+ masm->positions_recorder()->RecordStatementPosition(pos);
+ masm->positions_recorder()->RecordPosition(pos);
+ return masm->positions_recorder()->WriteRecordedPositions();
+}
+
+
+bool RecordPosition(MacroAssembler* masm, int pos) {
+ if (pos == RelocInfo::kNoPosition) return false;
+ masm->positions_recorder()->RecordPosition(pos);
+ return masm->positions_recorder()->WriteRecordedPositions();
+}
+
+
void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
- CodeGenerator::RecordPositions(masm_, fun->start_position());
+ RecordPosition(masm_, fun->start_position());
}
void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
- CodeGenerator::RecordPositions(masm_, fun->end_position() - 1);
+ RecordStatementPosition(masm_, fun->end_position() - 1);
}
-void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
- if (!info_->is_debug()) {
- CodeGenerator::RecordPositions(masm_, stmt->position());
- } else {
- // Check if the statement will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker(info_->isolate(), zone());
- checker.Check(stmt);
- // Record the statement position right here if the statement is not
- // breakable. For breakable statements the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, stmt->position(), !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- DebugCodegen::GenerateSlot(masm_);
- }
+void FullCodeGenerator::SetStatementPosition(
+ Statement* stmt, FullCodeGenerator::InsertBreak insert_break) {
+ if (stmt->position() == RelocInfo::kNoPosition) return;
+ bool recorded = RecordStatementPosition(masm_, stmt->position());
+ if (recorded && insert_break == INSERT_BREAK && info_->is_debug() &&
+ !stmt->IsDebuggerStatement()) {
+ DebugCodegen::GenerateSlot(masm_);
}
}
-void FullCodeGenerator::VisitSuperReference(SuperReference* super) {
- __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+void FullCodeGenerator::SetExpressionPosition(
+ Expression* expr, FullCodeGenerator::InsertBreak insert_break) {
+ if (expr->position() == RelocInfo::kNoPosition) return;
+ bool recorded = RecordPosition(masm_, expr->position());
+ if (recorded && insert_break == INSERT_BREAK && info_->is_debug()) {
+ DebugCodegen::GenerateSlot(masm_);
+ }
}
-void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
- if (!info_->is_debug()) {
- CodeGenerator::RecordPositions(masm_, expr->position());
- } else {
- // Check if the expression will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker(info_->isolate(), zone());
- checker.Check(expr);
- // Record a statement position right here if the expression is not
- // breakable. For breakable expressions the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- // NOTE this will record a statement position for something which might
- // not be a statement. As stepping in the debugger will only stop at
- // statement positions this is used for e.g. the condition expression of
- // a do while loop.
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, expr->position(), !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- DebugCodegen::GenerateSlot(masm_);
- }
- }
+void FullCodeGenerator::SetExpressionAsStatementPosition(Expression* expr) {
+ if (expr->position() == RelocInfo::kNoPosition) return;
+ bool recorded = RecordStatementPosition(masm_, expr->position());
+ if (recorded && info_->is_debug()) DebugCodegen::GenerateSlot(masm_);
}
-void FullCodeGenerator::SetSourcePosition(int pos) {
- if (pos != RelocInfo::kNoPosition) {
- masm_->positions_recorder()->RecordPosition(pos);
- }
+void FullCodeGenerator::VisitSuperPropertyReference(
+ SuperPropertyReference* super) {
+ __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+}
+
+
+void FullCodeGenerator::VisitSuperCallReference(SuperCallReference* super) {
+ __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
}
@@ -845,7 +598,7 @@ void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
VisitForStackValue(left);
VisitForAccumulatorValue(right);
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr, op, left, right);
} else {
@@ -854,6 +607,22 @@ void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
}
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ DCHECK(!context()->IsEffect());
+ DCHECK(!context()->IsTest());
+
+ if (proxy != NULL && (proxy->var()->IsUnallocatedOrGlobalSlot() ||
+ proxy->var()->IsLookupSlot())) {
+ EmitVariableLoad(proxy, INSIDE_TYPEOF);
+ PrepareForBailout(proxy, TOS_REG);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+
void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
NestedBlock nested_block(this, stmt);
@@ -985,6 +754,12 @@ void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
}
+void FullCodeGenerator::EmitLoadSuperConstructor(SuperCallReference* ref) {
+ VisitForStackValue(ref->this_function_var());
+ __ CallRuntime(Runtime::kGetPrototype, 1);
+}
+
+
void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt);
@@ -1021,7 +796,9 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
- SetStatementPosition(stmt);
+ // Do not insert break location as we do that below.
+ SetStatementPosition(stmt, SKIP_BREAK);
+
Label body, book_keeping;
Iteration loop_statement(this, stmt);
@@ -1034,7 +811,9 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// possible to break on the condition.
__ bind(loop_statement.continue_label());
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
- SetExpressionPosition(stmt->cond());
+
+ // Here is the actual 'while' keyword.
+ SetExpressionAsStatementPosition(stmt->cond());
VisitForControl(stmt->cond(),
&book_keeping,
loop_statement.break_label(),
@@ -1061,7 +840,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
__ bind(&loop);
- SetExpressionPosition(stmt->cond());
+ SetExpressionAsStatementPosition(stmt->cond());
VisitForControl(stmt->cond(),
&body,
loop_statement.break_label(),
@@ -1085,13 +864,13 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
Comment cmnt(masm_, "[ ForStatement");
+ // Do not insert break location as we do it below.
+ SetStatementPosition(stmt, SKIP_BREAK);
+
Label test, body;
Iteration loop_statement(this, stmt);
- // Set statement position for a break slot before entering the for-body.
- SetStatementPosition(stmt);
-
if (stmt->init() != NULL) {
SetStatementPosition(stmt->init());
Visit(stmt->init());
@@ -1112,16 +891,12 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
Visit(stmt->next());
}
- // Emit the statement position here as this is where the for
- // statement code starts.
- SetStatementPosition(stmt);
-
// Check stack before looping.
EmitBackEdgeBookkeeping(stmt, &body);
__ bind(&test);
if (stmt->cond() != NULL) {
- SetExpressionPosition(stmt->cond());
+ SetExpressionAsStatementPosition(stmt->cond());
VisitForControl(stmt->cond(),
&body,
loop_statement.break_label(),
@@ -1138,7 +913,6 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Comment cmnt(masm_, "[ ForOfStatement");
- SetStatementPosition(stmt);
Iteration loop_statement(this, stmt);
increment_loop_depth();
@@ -1150,7 +924,7 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
__ bind(loop_statement.continue_label());
// result = iterator.next()
- SetExpressionPosition(stmt->next_result());
+ SetExpressionAsStatementPosition(stmt->next_result());
VisitForEffect(stmt->next_result());
// if (result.done) break;
@@ -1179,7 +953,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Comment cmnt(masm_, "[ TryCatchStatement");
- SetStatementPosition(stmt);
+ SetStatementPosition(stmt, SKIP_BREAK);
+
// The try block adds a handler to the exception handler chain before
// entering, and removes it again when exiting normally. If an exception
// is thrown during execution of the try block, the handler is consumed
@@ -1189,6 +964,9 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Label try_entry, handler_entry, exit;
__ jmp(&try_entry);
__ bind(&handler_entry);
+ PrepareForBailoutForId(stmt->HandlerId(), NO_REGISTERS);
+ ClearPendingMessage();
+
// Exception handler code, the exception is in the result register.
// Extend the context before executing the catch block.
{ Comment cmnt(masm_, "[ Extend catch context");
@@ -1214,18 +992,23 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Try block code. Sets up the exception handler chain.
__ bind(&try_entry);
- EnterTryBlock(stmt->index(), &handler_entry);
+
+ try_catch_depth_++;
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &handler_entry);
{ TryCatch try_body(this);
Visit(stmt->try_block());
}
- ExitTryBlock(stmt->index());
+ ExitTryBlock(handler_index);
+ try_catch_depth_--;
__ bind(&exit);
}
void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
Comment cmnt(masm_, "[ TryFinallyStatement");
- SetStatementPosition(stmt);
+ SetStatementPosition(stmt, SKIP_BREAK);
+
// Try finally is compiled by setting up a try-handler on the stack while
// executing the try body, and removing it again afterwards.
//
@@ -1252,6 +1035,8 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Jump to try-handler setup and try-block code.
__ jmp(&try_entry);
__ bind(&handler_entry);
+ PrepareForBailoutForId(stmt->HandlerId(), NO_REGISTERS);
+
// Exception handler code. This code is only executed when an exception
// is thrown. The exception is in the result register, and must be
// preserved by the finally block. Call the finally block and then
@@ -1270,11 +1055,12 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Set up try handler.
__ bind(&try_entry);
- EnterTryBlock(stmt->index(), &handler_entry);
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &handler_entry);
{ TryFinally try_body(this, &finally_entry);
Visit(stmt->try_block());
}
- ExitTryBlock(stmt->index());
+ ExitTryBlock(handler_index);
// Execute the finally block on the way out. Clobber the unpredictable
// value in the result register with one that's safe for GC because the
// finally block will unconditionally preserve the result register on the
@@ -1341,7 +1127,7 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
// Build the function boilerplate and instantiate it.
Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(expr, script(), info_);
+ Compiler::GetSharedFunctionInfo(expr, script(), info_);
if (function_info.is_null()) {
SetStackOverflow();
return;
@@ -1377,13 +1163,22 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
__ CallRuntime(Runtime::kDefineClass, 6);
PrepareForBailoutForId(lit->CreateLiteralId(), TOS_REG);
- EmitClassDefineProperties(lit);
+
+ int store_slot_index = 0;
+ EmitClassDefineProperties(lit, &store_slot_index);
if (lit->scope() != NULL) {
DCHECK_NOT_NULL(lit->class_variable_proxy());
+ FeedbackVectorICSlot slot = FLAG_vector_stores
+ ? lit->GetNthSlot(store_slot_index++)
+ : FeedbackVectorICSlot::Invalid();
EmitVariableAssignment(lit->class_variable_proxy()->var(),
- Token::INIT_CONST);
+ Token::INIT_CONST, slot);
}
+
+ // Verify that compilation exactly consumed the number of store ic slots
+ // that the ClassLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == lit->slot_count());
}
context()->Plug(result_register());
@@ -1394,15 +1189,19 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
Comment cmnt(masm_, "[ NativeFunctionLiteral");
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate());
+
// Compute the function template for the native function.
Handle<String> name = expr->name();
- v8::Handle<v8::FunctionTemplate> fun_template =
- expr->extension()->GetNativeFunctionTemplate(
- reinterpret_cast<v8::Isolate*>(isolate()), v8::Utils::ToLocal(name));
+ v8::Local<v8::FunctionTemplate> fun_template =
+ expr->extension()->GetNativeFunctionTemplate(v8_isolate,
+ v8::Utils::ToLocal(name));
DCHECK(!fun_template.IsEmpty());
// Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
+ Handle<JSFunction> fun = Utils::OpenHandle(
+ *fun_template->GetFunction(v8_isolate->GetCurrentContext())
+ .ToLocalChecked());
const int literals = fun->NumberOfLiterals();
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
@@ -1425,22 +1224,24 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
__ CallRuntime(Runtime::kThrow, 1);
// Never returns here.
}
-void FullCodeGenerator::EnterTryBlock(int index, Label* handler) {
- handler_table()->SetRangeStart(index, masm()->pc_offset());
- handler_table()->SetRangeHandler(index, handler->pos());
+void FullCodeGenerator::EnterTryBlock(int handler_index, Label* handler) {
+ HandlerTableEntry* entry = &handler_table_[handler_index];
+ entry->range_start = masm()->pc_offset();
+ entry->handler_offset = handler->pos();
+ entry->try_catch_depth = try_catch_depth_;
// Determine expression stack depth of try statement.
int stack_depth = info_->scope()->num_stack_slots(); // Include stack locals.
for (NestedStatement* current = nesting_stack_; current != NULL; /*nop*/) {
current = current->AccumulateDepth(&stack_depth);
}
- handler_table()->SetRangeDepth(index, stack_depth);
+ entry->stack_depth = stack_depth;
// Push context onto operand stack.
STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
@@ -1448,8 +1249,9 @@ void FullCodeGenerator::EnterTryBlock(int index, Label* handler) {
}
-void FullCodeGenerator::ExitTryBlock(int index) {
- handler_table()->SetRangeEnd(index, masm()->pc_offset());
+void FullCodeGenerator::ExitTryBlock(int handler_index) {
+ HandlerTableEntry* entry = &handler_table_[handler_index];
+ entry->range_end = masm()->pc_offset();
// Drop context from operand stack.
__ Drop(TryBlockConstant::kElementCount);
@@ -1648,4 +1450,5 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::~EnterBlockScopeIfNeeded() {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 34e93eedf4..b4294f5856 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -24,35 +24,6 @@ namespace internal {
// Forward declarations.
class JumpPatchSite;
-// AST node visitor which can tell whether a given statement will be breakable
-// when the code is compiled by the full compiler in the debugger. This means
-// that there will be an IC (load/store/call) in the code generated for the
-// debugger to piggybag on.
-class BreakableStatementChecker: public AstVisitor {
- public:
- BreakableStatementChecker(Isolate* isolate, Zone* zone)
- : is_breakable_(false) {
- InitializeAstVisitor(isolate, zone);
- }
-
- void Check(Statement* stmt);
- void Check(Expression* stmt);
-
- bool is_breakable() { return is_breakable_; }
-
- private:
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- bool is_breakable_;
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
- DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
-};
-
-
// -----------------------------------------------------------------------------
// Full code generator.
@@ -69,12 +40,15 @@ class FullCodeGenerator: public AstVisitor {
scope_(info->scope()),
nesting_stack_(NULL),
loop_depth_(0),
+ try_catch_depth_(0),
globals_(NULL),
context_(NULL),
bailout_entries_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0,
+ ? info->function()->ast_node_count()
+ : 0,
info->zone()),
back_edges_(2, info->zone()),
+ handler_table_(info->zone()),
ic_total_count_(0) {
DCHECK(!info->IsStub());
Initialize();
@@ -518,6 +492,7 @@ class FullCodeGenerator: public AstVisitor {
F(IsSmi) \
F(IsNonNegativeSmi) \
F(IsArray) \
+ F(IsTypedArray) \
F(IsRegExp) \
F(IsJSProxy) \
F(IsConstructCall) \
@@ -527,6 +502,7 @@ class FullCodeGenerator: public AstVisitor {
F(Arguments) \
F(ValueOf) \
F(SetValueOf) \
+ F(IsDate) \
F(DateField) \
F(StringCharFromCode) \
F(StringCharAt) \
@@ -576,7 +552,9 @@ class FullCodeGenerator: public AstVisitor {
TypeofState typeof_state,
Label* slow,
Label* done);
- void EmitVariableLoad(VariableProxy* proxy);
+ void EmitGlobalVariableLoad(VariableProxy* proxy, TypeofState typeof_state);
+ void EmitVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void EmitAccessor(Expression* expression);
@@ -591,26 +569,6 @@ class FullCodeGenerator: public AstVisitor {
void EmitLoadJSRuntimeFunction(CallRuntime* expr);
void EmitCallJSRuntimeFunction(CallRuntime* expr);
- // Platform-specific support for compiling assignments.
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind {
- VARIABLE,
- NAMED_PROPERTY,
- KEYED_PROPERTY,
- NAMED_SUPER_PROPERTY,
- KEYED_SUPER_PROPERTY
- };
-
- static LhsKind GetAssignType(Property* property) {
- if (property == NULL) return VARIABLE;
- bool super_access = property->IsSuperAccess();
- return (property->key()->IsPropertyName())
- ? (super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY)
- : (super_access ? KEYED_SUPER_PROPERTY : KEYED_PROPERTY);
- }
-
// Load a value from a named property.
// The receiver is left on the stack by the IC.
void EmitNamedPropertyLoad(Property* expr);
@@ -630,7 +588,7 @@ class FullCodeGenerator: public AstVisitor {
// Adds the properties to the class (function) object and to its prototype.
// Expects the class (function) in the accumulator. The class (function) is
// in the accumulator after installing all the properties.
- void EmitClassDefineProperties(ClassLiteral* lit);
+ void EmitClassDefineProperties(ClassLiteral* lit, int* used_store_slots);
// Pushes the property key as a Name on the stack.
void EmitPropertyKey(ObjectLiteralProperty* property, BailoutId bailout_id);
@@ -647,13 +605,14 @@ class FullCodeGenerator: public AstVisitor {
Expression* right);
// Assign to the given expression as if via '='. The right-hand-side value
- // is expected in the accumulator.
- void EmitAssignment(Expression* expr);
+ // is expected in the accumulator. slot is only used if FLAG_vector_stores
+ // is true.
+ void EmitAssignment(Expression* expr, FeedbackVectorICSlot slot);
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
- void EmitVariableAssignment(Variable* var,
- Token::Value op);
+ void EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorICSlot slot);
// Helper functions to EmitVariableAssignment
void EmitStoreToStackLocalOrContextSlot(Variable* var,
@@ -676,8 +635,6 @@ class FullCodeGenerator: public AstVisitor {
// accumulator.
void EmitKeyedPropertyAssignment(Assignment* expr);
- void EmitLoadHomeObject(SuperReference* expr);
-
static bool NeedsHomeObject(Expression* expr) {
return FunctionLiteral::NeedsHomeObject(expr);
}
@@ -685,30 +642,46 @@ class FullCodeGenerator: public AstVisitor {
// Adds the [[HomeObject]] to |initializer| if it is a FunctionLiteral.
// The value of the initializer is expected to be at the top of the stack.
// |offset| is the offset in the stack where the home object can be found.
- void EmitSetHomeObjectIfNeeded(Expression* initializer, int offset);
+ void EmitSetHomeObjectIfNeeded(
+ Expression* initializer, int offset,
+ FeedbackVectorICSlot slot = FeedbackVectorICSlot::Invalid());
- void EmitLoadSuperConstructor();
- void EmitInitializeThisAfterSuper(SuperReference* super_ref);
+ void EmitLoadSuperConstructor(SuperCallReference* super_call_ref);
+ void EmitInitializeThisAfterSuper(
+ SuperCallReference* super_call_ref,
+ FeedbackVectorICSlot slot = FeedbackVectorICSlot::Invalid());
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
- void CallLoadIC(ContextualMode mode,
+ void CallLoadIC(ContextualMode mode, LanguageMode language_mode = SLOPPY,
TypeFeedbackId id = TypeFeedbackId::None());
- void CallGlobalLoadIC(Handle<String> name);
void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
- void SetStatementPosition(Statement* stmt);
- void SetExpressionPosition(Expression* expr);
- void SetSourcePosition(int pos);
+
+ enum InsertBreak { INSERT_BREAK, SKIP_BREAK };
+
+ // During stepping we want to be able to break at each statement, but not at
+ // every (sub-)expression. That is why by default we insert breaks at every
+ // statement position, but not at every expression position, unless stated
+ // otherwise.
+ void SetStatementPosition(Statement* stmt,
+ InsertBreak insert_break = INSERT_BREAK);
+ void SetExpressionPosition(Expression* expr,
+ InsertBreak insert_break = SKIP_BREAK);
+
+ // Consider an expression a statement. As such, we also insert a break.
+ // This is used in loop headers where we want to break for each iteration.
+ void SetExpressionAsStatementPosition(Expression* expr);
// Non-local control flow support.
void EnterTryBlock(int handler_index, Label* handler);
void ExitTryBlock(int handler_index);
void EnterFinallyBlock();
void ExitFinallyBlock();
+ void ClearPendingMessage();
// Loop nesting counter.
int loop_depth() { return loop_depth_; }
@@ -747,6 +720,8 @@ class FullCodeGenerator: public AstVisitor {
// and PushCatchContext.
void PushFunctionArgumentForContextAllocation();
+ void PushCalleeAndWithBaseObject(Call* expr);
+
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
@@ -761,11 +736,14 @@ class FullCodeGenerator: public AstVisitor {
void Generate();
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
+ void PopulateHandlerTable(Handle<Code> code);
bool MustCreateObjectLiteralWithRuntime(ObjectLiteral* expr) const;
bool MustCreateArrayLiteralWithRuntime(ArrayLiteral* expr) const;
- Handle<HandlerTable> handler_table() { return handler_table_; }
+ void EmitLoadStoreICSlot(FeedbackVectorICSlot slot);
+
+ int NewHandlerTableEntry();
struct BailoutEntry {
BailoutId id;
@@ -778,6 +756,14 @@ class FullCodeGenerator: public AstVisitor {
uint32_t loop_depth;
};
+ struct HandlerTableEntry {
+ unsigned range_start;
+ unsigned range_end;
+ unsigned handler_offset;
+ int stack_depth;
+ int try_catch_depth;
+ };
+
class ExpressionContext BASE_EMBEDDED {
public:
explicit ExpressionContext(FullCodeGenerator* codegen)
@@ -976,14 +962,15 @@ class FullCodeGenerator: public AstVisitor {
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
+ int try_catch_depth_;
ZoneList<Handle<Object> >* globals_;
Handle<FixedArray> modules_;
int module_index_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BackEdgeEntry> back_edges_;
+ ZoneVector<HandlerTableEntry> handler_table_;
int ic_total_count_;
- Handle<HandlerTable> handler_table_;
Handle<Cell> profiling_counter_;
bool generate_debug_code_;
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc
index b3a64b26f8..9415b8985d 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/func-name-inferrer.cc
@@ -82,4 +82,5 @@ void FuncNameInferrer::InferFunctionsNames() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/generator.js b/deps/v8/src/generator.js
index ae34ed3a4b..d041c5b223 100644
--- a/deps/v8/src/generator.js
+++ b/deps/v8/src/generator.js
@@ -2,14 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalFunction = global.Function;
+var NewFunctionString;
+
+utils.Import(function(from) {
+ NewFunctionString = from.NewFunctionString;
+});
+
// ----------------------------------------------------------------------------
// Generator functions and objects are specified by ES6, sections 15.19.3 and
@@ -66,13 +75,8 @@ function GeneratorObjectThrow(exn) {
}
-function GeneratorObjectIterator() {
- return this;
-}
-
-
function GeneratorFunctionConstructor(arg1) { // length == 1
- var source = $newFunctionString(arguments, 'function*');
+ var source = NewFunctionString(arguments, 'function*');
var global_proxy = %GlobalProxy(GeneratorFunctionConstructor);
// Compile the string in the constructor and not a helper so that errors
// appear to come from here.
@@ -90,14 +94,11 @@ function GeneratorFunctionConstructor(arg1) { // length == 1
// Set up non-enumerable functions on the generator prototype object.
var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
-$installFunctions(GeneratorObjectPrototype,
- DONT_ENUM,
- ["next", GeneratorObjectNext,
- "throw", GeneratorObjectThrow]);
-
-$setFunctionName(GeneratorObjectIterator, symbolIterator);
-%AddNamedProperty(GeneratorObjectPrototype, symbolIterator,
- GeneratorObjectIterator, DONT_ENUM | DONT_DELETE | READ_ONLY);
+utils.InstallFunctions(GeneratorObjectPrototype,
+ DONT_ENUM,
+ ["next", GeneratorObjectNext,
+ "throw", GeneratorObjectThrow]);
+
%AddNamedProperty(GeneratorObjectPrototype, "constructor",
GeneratorFunctionPrototype, DONT_ENUM | READ_ONLY);
%AddNamedProperty(GeneratorObjectPrototype,
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 8e0ff13d33..aa6542baee 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -39,13 +39,13 @@ class GlobalHandles::Node {
// Maps handle location (slot) to the containing node.
static Node* FromLocation(Object** location) {
- DCHECK(OFFSET_OF(Node, object_) == 0);
+ DCHECK(offsetof(Node, object_) == 0);
return reinterpret_cast<Node*>(location);
}
Node() {
- DCHECK(OFFSET_OF(Node, class_id_) == Internals::kNodeClassIdOffset);
- DCHECK(OFFSET_OF(Node, flags_) == Internals::kNodeFlagsOffset);
+ DCHECK(offsetof(Node, class_id_) == Internals::kNodeClassIdOffset);
+ DCHECK(offsetof(Node, flags_) == Internals::kNodeFlagsOffset);
STATIC_ASSERT(static_cast<int>(NodeState::kMask) ==
Internals::kNodeStateMask);
STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue);
@@ -76,6 +76,7 @@ class GlobalHandles::Node {
index_ = static_cast<uint8_t>(index);
DCHECK(static_cast<int>(index_) == index);
set_state(FREE);
+ set_weakness_type(NORMAL_WEAK);
set_in_new_space_list(false);
parameter_or_next_free_.next_free = *first_free;
*first_free = this;
@@ -1230,4 +1231,5 @@ void EternalHandles::Create(Isolate* isolate, Object* object, int* index) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 5b6aa26665..f85e92985a 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -32,11 +32,21 @@
#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_X87
+
#define V8_TURBOFAN_BACKEND 1
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS64 || \
+ V8_TARGET_ARCH_PPC64
+// 64-bit TurboFan backends support 64-bit integer arithmetic.
+#define V8_TURBOFAN_BACKEND_64 1
+#else
+#define V8_TURBOFAN_BACKEND_64 0
+#endif
+
#else
#define V8_TURBOFAN_BACKEND 0
#endif
+
#if V8_TURBOFAN_BACKEND
#define V8_TURBOFAN_TARGET 1
#else
@@ -74,8 +84,13 @@ namespace internal {
#endif
#endif
-// Determine whether the architecture uses an out-of-line constant pool.
-#define V8_OOL_CONSTANT_POOL 0
+// Determine whether the architecture uses an embedded constant pool
+// (contiguous constant pool embedded in code object).
+#if V8_TARGET_ARCH_PPC
+#define V8_EMBEDDED_CONSTANT_POOL 1
+#else
+#define V8_EMBEDDED_CONSTANT_POOL 0
+#endif
#ifdef V8_TARGET_ARCH_ARM
// Set stack limit lower for ARM than for other architectures because
@@ -124,6 +139,7 @@ const int kShortSize = sizeof(short); // NOLINT
const int kIntSize = sizeof(int); // NOLINT
const int kInt32Size = sizeof(int32_t); // NOLINT
const int kInt64Size = sizeof(int64_t); // NOLINT
+const int kFloatSize = sizeof(float); // NOLINT
const int kDoubleSize = sizeof(double); // NOLINT
const int kIntptrSize = sizeof(intptr_t); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT
@@ -142,7 +158,14 @@ const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
const bool kRequiresCodeRange = true;
+#if V8_TARGET_ARCH_MIPS64
+// To use pseudo-relative jumps such as j/jal instructions which have 28-bit
+// encoded immediate, the addresses have to be in range of 256MB aligned
+// region. Used only for large object space.
+const size_t kMaximalCodeRangeSize = 256 * MB;
+#else
const size_t kMaximalCodeRangeSize = 512 * MB;
+#endif
#if V8_OS_WIN
const size_t kMinimumCodeRangeSize = 4 * MB;
const size_t kReservedCodeRangePages = 1;
@@ -197,6 +220,8 @@ typedef int32_t uc32;
const int kOneByteSize = kCharSize;
const int kUC16Size = sizeof(uc16); // NOLINT
+// 128 bit SIMD value size.
+const int kSimd128Size = 16;
// Round up n to be a multiple of sz, where sz is a power of 2.
#define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
@@ -240,7 +265,7 @@ enum LanguageMode {
};
-inline std::ostream& operator<<(std::ostream& os, LanguageMode mode) {
+inline std::ostream& operator<<(std::ostream& os, const LanguageMode& mode) {
switch (mode) {
case SLOPPY:
return os << "sloppy";
@@ -284,6 +309,35 @@ inline LanguageMode construct_language_mode(bool strict_bit, bool strong_bit) {
}
+// Strong mode behaviour must sometimes be signalled by a two valued enum where
+// caching is involved, to prevent sloppy and strict mode from being incorrectly
+// differentiated.
+enum class Strength : bool {
+ WEAK, // sloppy, strict behaviour
+ STRONG // strong behaviour
+};
+
+
+inline bool is_strong(Strength strength) {
+ return strength == Strength::STRONG;
+}
+
+
+inline std::ostream& operator<<(std::ostream& os, const Strength& strength) {
+ return os << (is_strong(strength) ? "strong" : "weak");
+}
+
+
+inline Strength strength(LanguageMode language_mode) {
+ return is_strong(language_mode) ? Strength::STRONG : Strength::WEAK;
+}
+
+
+inline size_t hash_value(Strength strength) {
+ return static_cast<size_t>(strength);
+}
+
+
// Mask for the sign bit in a smi.
const intptr_t kSmiSignMask = kIntptrSignBit;
@@ -299,6 +353,10 @@ const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
const intptr_t kDoubleAlignment = 8;
const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+// Desired alignment for 128 bit SIMD values.
+const intptr_t kSimd128Alignment = 16;
+const intptr_t kSimd128AlignmentMask = kSimd128Alignment - 1;
+
// Desired alignment for generated code is 32 bytes (to improve cache line
// utilization).
const int kCodeAlignmentBits = 5;
@@ -377,6 +435,7 @@ class MemoryChunk;
class SeededNumberDictionary;
class UnseededNumberDictionary;
class NameDictionary;
+class GlobalDictionary;
template <typename T> class MaybeHandle;
template <typename T> class Handle;
class Heap;
@@ -438,6 +497,12 @@ enum AllocationSpace {
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
+enum AllocationAlignment {
+ kWordAligned,
+ kDoubleAligned,
+ kDoubleUnaligned,
+ kSimd128Unaligned
+};
// A flag that indicates whether objects should be pretenured when
// allocated (allocated directly into the old generation) or not
@@ -445,6 +510,17 @@ const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
// allows).
enum PretenureFlag { NOT_TENURED, TENURED };
+inline std::ostream& operator<<(std::ostream& os, const PretenureFlag& flag) {
+ switch (flag) {
+ case NOT_TENURED:
+ return os << "NotTenured";
+ case TENURED:
+ return os << "Tenured";
+ }
+ UNREACHABLE();
+ return os;
+}
+
enum MinimumCapacity {
USE_DEFAULT_MINIMUM_CAPACITY,
USE_CUSTOM_MINIMUM_CAPACITY
@@ -475,13 +551,15 @@ enum ParseRestriction {
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
// and grow forward, the relocation information starts at the end of
-// the buffer and grows backward.
+// the buffer and grows backward. A constant pool may exist at the
+// end of the instructions.
//
-// |<--------------- buffer_size ---------------->|
-// |<-- instr_size -->| |<-- reloc_size -->|
-// +==================+========+==================+
-// | instructions | free | reloc info |
-// +==================+========+==================+
+// |<--------------- buffer_size ----------------------------------->|
+// |<------------- instr_size ---------->| |<-- reloc_size -->|
+// | |<- const_pool_size ->| |
+// +=====================================+========+==================+
+// | instructions | data | free | reloc info |
+// +=====================================+========+==================+
// ^
// |
// buffer
@@ -491,6 +569,7 @@ struct CodeDesc {
int buffer_size;
int instr_size;
int reloc_size;
+ int constant_pool_size;
Assembler* origin;
};
@@ -644,26 +723,6 @@ struct AccessorDescriptor {
#define DOUBLE_POINTER_ALIGN(value) \
(((value) + kDoubleAlignmentMask) & ~kDoubleAlignmentMask)
-// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
-// inside a C++ class and new and delete will be overloaded so logging is
-// performed.
-// This file (globals.h) is included before log.h, so we use direct calls to
-// the Logger rather than the LOG macro.
-#ifdef DEBUG
-#define TRACK_MEMORY(name) \
- void* operator new(size_t size) { \
- void* result = ::operator new(size); \
- Logger::NewEventStatic(name, result, size); \
- return result; \
- } \
- void operator delete(void* object) { \
- Logger::DeleteEventStatic(name, object); \
- ::operator delete(object); \
- }
-#else
-#define TRACK_MEMORY(name)
-#endif
-
// CPU feature flags.
enum CpuFeature {
@@ -792,6 +851,40 @@ inline bool IsImmutableVariableMode(VariableMode mode) {
}
+enum class VariableLocation {
+ // Before and during variable allocation, a variable whose location is
+ // not yet determined. After allocation, a variable looked up as a
+ // property on the global object (and possibly absent). name() is the
+ // variable name, index() is invalid.
+ UNALLOCATED,
+
+ // A slot in the parameter section on the stack. index() is the
+ // parameter index, counting left-to-right. The receiver is index -1;
+ // the first parameter is index 0.
+ PARAMETER,
+
+ // A slot in the local section on the stack. index() is the variable
+ // index in the stack frame, starting at 0.
+ LOCAL,
+
+ // An indexed slot in a heap context. index() is the variable index in
+ // the context object on the heap, starting at 0. scope() is the
+ // corresponding scope.
+ CONTEXT,
+
+ // An indexed slot in a script context that contains a respective global
+ // property cell. name() is the variable name, index() is the variable
+ // index in the context object on the heap, starting at 0. scope() is the
+ // corresponding script scope.
+ GLOBAL,
+
+ // A named slot in a heap context. name() is the variable name in the
+ // context object on the heap, with lookup starting at the current
+ // context. index() is invalid.
+ LOOKUP
+};
+
+
// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
// and immutable bindings that can be in two states: initialized and
// uninitialized. In ES5 only immutable bindings have these two states. When
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 34b3f32d96..3022f288a3 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -92,7 +92,19 @@ HandleScope::HandleScope(Isolate* isolate) {
HandleScope::~HandleScope() {
- CloseScope(isolate_, prev_next_, prev_limit_);
+#ifdef DEBUG
+ if (FLAG_check_handle_count) {
+ int before = NumberOfHandles(isolate_);
+ CloseScope(isolate_, prev_next_, prev_limit_);
+ int after = NumberOfHandles(isolate_);
+ DCHECK(after - before < kCheckHandleThreshold);
+ DCHECK(before < kCheckHandleThreshold);
+ } else {
+#endif // DEBUG
+ CloseScope(isolate_, prev_next_, prev_limit_);
+#ifdef DEBUG
+ }
+#endif // DEBUG
}
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index d9b130f3ca..d415315986 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -9,7 +9,6 @@
namespace v8 {
namespace internal {
-
int HandleScope::NumberOfHandles(Isolate* isolate) {
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
int n = impl->blocks()->length();
@@ -127,4 +126,5 @@ DeferredHandles* DeferredHandleScope::Detach() {
return deferred;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index eb57f0e260..162b6d282f 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -66,14 +66,32 @@ class MaybeHandle {
bool is_null() const { return location_ == NULL; }
+ template <typename S>
+ bool operator==(MaybeHandle<S> that) const {
+ return this->location_ == that.location_;
+ }
+ template <typename S>
+ bool operator!=(MaybeHandle<S> that) const {
+ return !(*this == that);
+ }
+
+
protected:
T** location_;
// MaybeHandles of different classes are allowed to access each
// other's location_.
template<class S> friend class MaybeHandle;
+ template <typename S>
+ friend size_t hash_value(MaybeHandle<S>);
};
+template <typename S>
+inline size_t hash_value(MaybeHandle<S> maybe_handle) {
+ return bit_cast<size_t>(maybe_handle.location_);
+}
+
+
// ----------------------------------------------------------------------------
// A Handle provides a reference to an object that survives relocation by
// the garbage collector.
@@ -156,13 +174,6 @@ inline Handle<T> handle(T* t) {
}
-// Key comparison function for Map handles.
-inline bool operator<(const Handle<Map>& lhs, const Handle<Map>& rhs) {
- // This is safe because maps don't move.
- return *lhs < *rhs;
-}
-
-
class DeferredHandles;
class HandleScopeImplementer;
@@ -208,6 +219,11 @@ class HandleScope {
Isolate* isolate() { return isolate_; }
+ // Limit for number of handles with --check-handle-count. This is
+ // large enough to compile natives and pass unit tests with some
+ // slack for future changes to natives.
+ static const int kCheckHandleThreshold = 30 * 1024;
+
private:
// Prevent heap allocation or illegal handle scopes.
HandleScope(const HandleScope&);
diff --git a/deps/v8/src/harmony-array-includes.js b/deps/v8/src/harmony-array-includes.js
index 109499ea6b..b133f1ec8c 100644
--- a/deps/v8/src/harmony-array-includes.js
+++ b/deps/v8/src/harmony-array-includes.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
'use strict';
@@ -52,7 +52,7 @@ function ArrayIncludes(searchElement, fromIndex) {
%FunctionSetLength(ArrayIncludes, 1);
// Set up the non-enumerable functions on the Array prototype object.
-$installFunctions(GlobalArray.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"includes", ArrayIncludes
]);
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
index f9283b9c0c..e94134b81a 100644
--- a/deps/v8/src/harmony-array.js
+++ b/deps/v8/src/harmony-array.js
@@ -2,49 +2,62 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
'use strict';
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalArray = global.Array;
var GlobalSymbol = global.Symbol;
-// -------------------------------------------------------------------
-
-// ES6 draft 03-17-15, section 22.1.3.3
-function ArrayCopyWithin(target, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
+var GetIterator;
+var GetMethod;
+var MathMax;
+var MathMin;
+var ObjectIsFrozen;
+var ObjectDefineProperty;
+
+utils.Import(function(from) {
+ GetIterator = from.GetIterator;
+ GetMethod = from.GetMethod;
+ MathMax = from.MathMax;
+ MathMin = from.MathMin;
+ ObjectIsFrozen = from.ObjectIsFrozen;
+ ObjectDefineProperty = from.ObjectDefineProperty;
+});
- var array = TO_OBJECT_INLINE(this);
- var length = $toLength(array.length);
+// -------------------------------------------------------------------
+function InnerArrayCopyWithin(target, start, end, array, length) {
target = TO_INTEGER(target);
var to;
if (target < 0) {
- to = $max(length + target, 0);
+ to = MathMax(length + target, 0);
} else {
- to = $min(target, length);
+ to = MathMin(target, length);
}
start = TO_INTEGER(start);
var from;
if (start < 0) {
- from = $max(length + start, 0);
+ from = MathMax(length + start, 0);
} else {
- from = $min(start, length);
+ from = MathMin(start, length);
}
end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
var final;
if (end < 0) {
- final = $max(length + end, 0);
+ final = MathMax(length + end, 0);
} else {
- final = $min(end, length);
+ final = MathMin(end, length);
}
- var count = $min(final - from, length - to);
+ var count = MathMin(final - from, length - to);
var direction = 1;
if (from < to && to < (from + count)) {
direction = -1;
@@ -66,22 +79,21 @@ function ArrayCopyWithin(target, start, end) {
return array;
}
-// ES6 draft 07-15-13, section 15.4.3.23
-function ArrayFind(predicate /* thisArg */) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
+// ES6 draft 03-17-15, section 22.1.3.3
+function ArrayCopyWithin(target, start, end) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
- var array = $toObject(this);
- var length = $toInteger(array.length);
+ var array = TO_OBJECT_INLINE(this);
+ var length = $toLength(array.length);
+
+ return InnerArrayCopyWithin(target, start, end, array, length);
+}
+function InnerArrayFind(predicate, thisArg, array, length) {
if (!IS_SPEC_FUNCTION(predicate)) {
throw MakeTypeError(kCalledNonCallable, predicate);
}
- var thisArg;
- if (%_ArgumentsLength() > 1) {
- thisArg = %_Arguments(1);
- }
-
var needs_wrapper = false;
if (IS_NULL(thisArg)) {
if (%IsSloppyModeFunction(predicate)) thisArg = UNDEFINED;
@@ -90,35 +102,31 @@ function ArrayFind(predicate /* thisArg */) { // length == 1
}
for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- var newThisArg = needs_wrapper ? $toObject(thisArg) : thisArg;
- if (%_CallFunction(newThisArg, element, i, array, predicate)) {
- return element;
- }
+ var element = array[i];
+ var newThisArg = needs_wrapper ? $toObject(thisArg) : thisArg;
+ if (%_CallFunction(newThisArg, element, i, array, predicate)) {
+ return element;
}
}
return;
}
-
-// ES6 draft 07-15-13, section 15.4.3.24
-function ArrayFindIndex(predicate /* thisArg */) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
+// ES6 draft 07-15-13, section 15.4.3.23
+function ArrayFind(predicate, thisArg) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
var array = $toObject(this);
var length = $toInteger(array.length);
+ return InnerArrayFind(predicate, thisArg, array, length);
+}
+
+function InnerArrayFindIndex(predicate, thisArg, array, length) {
if (!IS_SPEC_FUNCTION(predicate)) {
throw MakeTypeError(kCalledNonCallable, predicate);
}
- var thisArg;
- if (%_ArgumentsLength() > 1) {
- thisArg = %_Arguments(1);
- }
-
var needs_wrapper = false;
if (IS_NULL(thisArg)) {
if (%IsSloppyModeFunction(predicate)) thisArg = UNDEFINED;
@@ -127,37 +135,30 @@ function ArrayFindIndex(predicate /* thisArg */) { // length == 1
}
for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- var newThisArg = needs_wrapper ? $toObject(thisArg) : thisArg;
- if (%_CallFunction(newThisArg, element, i, array, predicate)) {
- return i;
- }
+ var element = array[i];
+ var newThisArg = needs_wrapper ? $toObject(thisArg) : thisArg;
+ if (%_CallFunction(newThisArg, element, i, array, predicate)) {
+ return i;
}
}
return -1;
}
-
-// ES6, draft 04-05-14, section 22.1.3.6
-function ArrayFill(value /* [, start [, end ] ] */) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
+// ES6 draft 07-15-13, section 15.4.3.24
+function ArrayFindIndex(predicate, thisArg) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
var array = $toObject(this);
- var length = TO_UINT32(array.length);
+ var length = $toInteger(array.length);
- var i = 0;
- var end = length;
+ return InnerArrayFindIndex(predicate, thisArg, array, length);
+}
- if (%_ArgumentsLength() > 1) {
- i = %_Arguments(1);
- i = IS_UNDEFINED(i) ? 0 : TO_INTEGER(i);
- if (%_ArgumentsLength() > 2) {
- end = %_Arguments(2);
- end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
- }
- }
+// ES6, draft 04-05-14, section 22.1.3.6
+function InnerArrayFill(value, start, end, array, length) {
+ var i = IS_UNDEFINED(start) ? 0 : TO_INTEGER(start);
+ var end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
if (i < 0) {
i += length;
@@ -173,7 +174,7 @@ function ArrayFill(value /* [, start [, end ] ] */) { // length == 1
if (end > length) end = length;
}
- if ((end - i) > 0 && $objectIsFrozen(array)) {
+ if ((end - i) > 0 && ObjectIsFrozen(array)) {
throw MakeTypeError(kArrayFunctionsOnFrozen);
}
@@ -182,6 +183,26 @@ function ArrayFill(value /* [, start [, end ] ] */) { // length == 1
return array;
}
+// ES6, draft 04-05-14, section 22.1.3.6
+function ArrayFill(value, start, end) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
+
+ var array = $toObject(this);
+ var length = TO_UINT32(array.length);
+
+ return InnerArrayFill(value, start, end, array, length);
+}
+
+function AddArrayElement(constructor, array, i, value) {
+ if (constructor === GlobalArray) {
+ %AddElement(array, i, value);
+ } else {
+ ObjectDefineProperty(array, i, {
+ value: value, writable: true, configurable: true, enumerable: true
+ });
+ }
+}
+
// ES6, draft 10-14-14, section 22.1.2.1
function ArrayFrom(arrayLike, mapfn, receiver) {
var items = $toObject(arrayLike);
@@ -199,7 +220,7 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
}
}
- var iterable = $getMethod(items, symbolIterator);
+ var iterable = GetMethod(items, symbolIterator);
var k;
var result;
var mappedValue;
@@ -208,7 +229,7 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
if (!IS_UNDEFINED(iterable)) {
result = %IsConstructor(this) ? new this() : [];
- var iterator = $getIterator(items, iterable);
+ var iterator = GetIterator(items, iterable);
k = 0;
while (true) {
@@ -229,7 +250,8 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
} else {
mappedValue = nextValue;
}
- %AddElement(result, k++, mappedValue, NONE);
+ AddArrayElement(this, result, k, mappedValue);
+ k++;
}
} else {
var len = $toLength(items.length);
@@ -242,7 +264,7 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
} else {
mappedValue = nextValue;
}
- %AddElement(result, k, mappedValue, NONE);
+ AddArrayElement(this, result, k, mappedValue);
}
result.length = k;
@@ -257,7 +279,7 @@ function ArrayOf() {
// TODO: Implement IsConstructor (ES6 section 7.2.5)
var array = %IsConstructor(constructor) ? new constructor(length) : [];
for (var i = 0; i < length; i++) {
- %AddElement(array, i, %_Arguments(i), NONE);
+ AddArrayElement(constructor, array, i, %_Arguments(i));
}
array.length = length;
return array;
@@ -265,26 +287,35 @@ function ArrayOf() {
// -------------------------------------------------------------------
-$installConstants(GlobalSymbol, [
- // TODO(dslomov, caitp): Move to symbol.js when shipping
- "isConcatSpreadable", symbolIsConcatSpreadable
-]);
-
%FunctionSetLength(ArrayCopyWithin, 2);
%FunctionSetLength(ArrayFrom, 1);
+%FunctionSetLength(ArrayFill, 1);
+%FunctionSetLength(ArrayFind, 1);
+%FunctionSetLength(ArrayFindIndex, 1);
// Set up non-enumerable functions on the Array object.
-$installFunctions(GlobalArray, DONT_ENUM, [
+utils.InstallFunctions(GlobalArray, DONT_ENUM, [
"from", ArrayFrom,
"of", ArrayOf
]);
// Set up the non-enumerable functions on the Array prototype object.
-$installFunctions(GlobalArray.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"copyWithin", ArrayCopyWithin,
"find", ArrayFind,
"findIndex", ArrayFindIndex,
"fill", ArrayFill
]);
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.ArrayFrom = ArrayFrom;
+ to.InnerArrayCopyWithin = InnerArrayCopyWithin;
+ to.InnerArrayFill = InnerArrayFill;
+ to.InnerArrayFind = InnerArrayFind;
+ to.InnerArrayFindIndex = InnerArrayFindIndex;
+});
+
})
diff --git a/deps/v8/src/harmony-atomics.js b/deps/v8/src/harmony-atomics.js
new file mode 100644
index 0000000000..aa81822d1e
--- /dev/null
+++ b/deps/v8/src/harmony-atomics.js
@@ -0,0 +1,143 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalObject = global.Object;
+
+// -------------------------------------------------------------------
+
+
+function CheckSharedTypedArray(sta) {
+ if (!%_IsSharedTypedArray(sta)) {
+ throw MakeTypeError(kNotSharedTypedArray, sta);
+ }
+}
+
+function CheckSharedIntegerTypedArray(ia) {
+ if (!%_IsSharedIntegerTypedArray(ia)) {
+ throw MakeTypeError(kNotIntegerSharedTypedArray, ia);
+ }
+}
+
+//-------------------------------------------------------------------
+
+function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
+ CheckSharedTypedArray(sta);
+ index = $toInteger(index);
+ if (index < 0 || index >= sta.length) {
+ return UNDEFINED;
+ }
+ return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
+}
+
+function AtomicsLoadJS(sta, index) {
+ CheckSharedTypedArray(sta);
+ index = $toInteger(index);
+ if (index < 0 || index >= sta.length) {
+ return UNDEFINED;
+ }
+ return %_AtomicsLoad(sta, index);
+}
+
+function AtomicsStoreJS(sta, index, value) {
+ CheckSharedTypedArray(sta);
+ index = $toInteger(index);
+ if (index < 0 || index >= sta.length) {
+ return UNDEFINED;
+ }
+ return %_AtomicsStore(sta, index, value);
+}
+
+function AtomicsAddJS(ia, index, value) {
+ CheckSharedIntegerTypedArray(ia);
+ index = $toInteger(index);
+ if (index < 0 || index >= ia.length) {
+ return UNDEFINED;
+ }
+ return %_AtomicsAdd(ia, index, value);
+}
+
+function AtomicsSubJS(ia, index, value) {
+ CheckSharedIntegerTypedArray(ia);
+ index = $toInteger(index);
+ if (index < 0 || index >= ia.length) {
+ return UNDEFINED;
+ }
+ return %_AtomicsSub(ia, index, value);
+}
+
+function AtomicsAndJS(ia, index, value) {
+ CheckSharedIntegerTypedArray(ia);
+ index = $toInteger(index);
+ if (index < 0 || index >= ia.length) {
+ return UNDEFINED;
+ }
+ return %_AtomicsAnd(ia, index, value);
+}
+
+function AtomicsOrJS(ia, index, value) {
+ CheckSharedIntegerTypedArray(ia);
+ index = $toInteger(index);
+ if (index < 0 || index >= ia.length) {
+ return UNDEFINED;
+ }
+ return %_AtomicsOr(ia, index, value);
+}
+
+function AtomicsXorJS(ia, index, value) {
+ CheckSharedIntegerTypedArray(ia);
+ index = $toInteger(index);
+ if (index < 0 || index >= ia.length) {
+ return UNDEFINED;
+ }
+ return %_AtomicsXor(ia, index, value);
+}
+
+function AtomicsExchangeJS(ia, index, value) {
+ CheckSharedIntegerTypedArray(ia);
+ index = $toInteger(index);
+ if (index < 0 || index >= ia.length) {
+ return UNDEFINED;
+ }
+ return %_AtomicsExchange(ia, index, value);
+}
+
+function AtomicsIsLockFreeJS(size) {
+ return %_AtomicsIsLockFree(size);
+}
+
+// -------------------------------------------------------------------
+
+function AtomicsConstructor() {}
+
+var Atomics = new AtomicsConstructor();
+
+%InternalSetPrototype(Atomics, GlobalObject.prototype);
+%AddNamedProperty(global, "Atomics", Atomics, DONT_ENUM);
+%FunctionSetInstanceClassName(AtomicsConstructor, 'Atomics');
+
+%AddNamedProperty(Atomics, symbolToStringTag, "Atomics", READ_ONLY | DONT_ENUM);
+
+utils.InstallFunctions(Atomics, DONT_ENUM, [
+ "compareExchange", AtomicsCompareExchangeJS,
+ "load", AtomicsLoadJS,
+ "store", AtomicsStoreJS,
+ "add", AtomicsAddJS,
+ "sub", AtomicsSubJS,
+ "and", AtomicsAndJS,
+ "or", AtomicsOrJS,
+ "xor", AtomicsXorJS,
+ "exchange", AtomicsExchangeJS,
+ "isLockFree", AtomicsIsLockFreeJS,
+]);
+
+})
diff --git a/deps/v8/src/harmony-concat-spreadable.js b/deps/v8/src/harmony-concat-spreadable.js
new file mode 100644
index 0000000000..362701c123
--- /dev/null
+++ b/deps/v8/src/harmony-concat-spreadable.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+'use strict';
+
+%CheckIsBootstrapping();
+
+utils.InstallConstants(global.Symbol, [
+ // TODO(littledan): Move to symbol.js when shipping
+ "isConcatSpreadable", symbolIsConcatSpreadable
+]);
+
+})
diff --git a/deps/v8/src/harmony-object.js b/deps/v8/src/harmony-object.js
index acf74dd326..382f7f4252 100644
--- a/deps/v8/src/harmony-object.js
+++ b/deps/v8/src/harmony-object.js
@@ -3,14 +3,25 @@
// found in the LICENSE file.
//
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalObject = global.Object;
+var OwnPropertyKeys;
+
+utils.Import(function(from) {
+ OwnPropertyKeys = from.OwnPropertyKeys;
+});
+
+// -------------------------------------------------------------------
+
// ES6, draft 04-03-15, section 19.1.2.1
function ObjectAssign(target, sources) {
var to = TO_OBJECT_INLINE(target);
@@ -24,7 +35,7 @@ function ObjectAssign(target, sources) {
}
var from = TO_OBJECT_INLINE(nextSource);
- var keys = $ownPropertyKeys(from);
+ var keys = OwnPropertyKeys(from);
var len = keys.length;
for (var j = 0; j < len; ++j) {
@@ -39,7 +50,7 @@ function ObjectAssign(target, sources) {
}
// Set up non-enumerable functions on the Object object.
-$installFunctions(GlobalObject, DONT_ENUM, [
+utils.InstallFunctions(GlobalObject, DONT_ENUM, [
"assign", ObjectAssign
]);
diff --git a/deps/v8/src/harmony-reflect.js b/deps/v8/src/harmony-reflect.js
index eee6df40d5..5ad63e1a5c 100644
--- a/deps/v8/src/harmony-reflect.js
+++ b/deps/v8/src/harmony-reflect.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
'use strict';
@@ -10,7 +10,7 @@
var GlobalReflect = global.Reflect;
-$installFunctions(GlobalReflect, DONT_ENUM, [
+utils.InstallFunctions(GlobalReflect, DONT_ENUM, [
"apply", $reflectApply,
"construct", $reflectConstruct
]);
diff --git a/deps/v8/src/harmony-regexp.js b/deps/v8/src/harmony-regexp.js
index 6a32f16c75..f4e1cb0f3f 100644
--- a/deps/v8/src/harmony-regexp.js
+++ b/deps/v8/src/harmony-regexp.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
'use strict';
diff --git a/deps/v8/src/harmony-sharedarraybuffer.js b/deps/v8/src/harmony-sharedarraybuffer.js
new file mode 100644
index 0000000000..4ebfaadb2a
--- /dev/null
+++ b/deps/v8/src/harmony-sharedarraybuffer.js
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalSharedArrayBuffer = global.SharedArrayBuffer;
+var GlobalObject = global.Object;
+
+// -------------------------------------------------------------------
+
+function SharedArrayBufferConstructor(length) { // length = 1
+ if (%_IsConstructCall()) {
+ var byteLength = $toPositiveInteger(length, kInvalidArrayBufferLength);
+ %ArrayBufferInitialize(this, byteLength, kShared);
+ } else {
+ throw MakeTypeError(kConstructorNotFunction, "SharedArrayBuffer");
+ }
+}
+
+function SharedArrayBufferGetByteLen() {
+ if (!IS_SHAREDARRAYBUFFER(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'SharedArrayBuffer.prototype.byteLength', this);
+ }
+ return %_ArrayBufferGetByteLength(this);
+}
+
+function SharedArrayBufferIsViewJS(obj) {
+ return %ArrayBufferIsView(obj);
+}
+
+
+// Set up the SharedArrayBuffer constructor function.
+%SetCode(GlobalSharedArrayBuffer, SharedArrayBufferConstructor);
+%FunctionSetPrototype(GlobalSharedArrayBuffer, new GlobalObject());
+
+// Set up the constructor property on the SharedArrayBuffer prototype object.
+%AddNamedProperty(GlobalSharedArrayBuffer.prototype, "constructor",
+ GlobalSharedArrayBuffer, DONT_ENUM);
+
+%AddNamedProperty(GlobalSharedArrayBuffer.prototype,
+ symbolToStringTag, "SharedArrayBuffer", DONT_ENUM | READ_ONLY);
+
+utils.InstallGetter(GlobalSharedArrayBuffer.prototype, "byteLength",
+ SharedArrayBufferGetByteLen);
+
+utils.InstallFunctions(GlobalSharedArrayBuffer, DONT_ENUM, [
+ "isView", SharedArrayBufferIsViewJS
+]);
+
+})
diff --git a/deps/v8/src/harmony-spread.js b/deps/v8/src/harmony-spread.js
index a523d26e38..bfd6acb3a1 100644
--- a/deps/v8/src/harmony-spread.js
+++ b/deps/v8/src/harmony-spread.js
@@ -5,10 +5,16 @@
var $spreadArguments;
var $spreadIterable;
-(function(global, shared, exports) {
+(function(global, utils) {
'use strict';
+// -------------------------------------------------------------------
+// Imports
+var InternalArray = utils.InternalArray;
+
+// -------------------------------------------------------------------
+
function SpreadArguments() {
var count = %_ArgumentsLength();
var args = new InternalArray();
diff --git a/deps/v8/src/harmony-tostring.js b/deps/v8/src/harmony-tostring.js
index b3783c444e..e234781da8 100644
--- a/deps/v8/src/harmony-tostring.js
+++ b/deps/v8/src/harmony-tostring.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
@@ -10,7 +10,7 @@
var GlobalSymbol = global.Symbol;
-$installConstants(GlobalSymbol, [
+utils.InstallConstants(GlobalSymbol, [
// TODO(dslomov, caitp): Move to symbol.js when shipping
"toStringTag", symbolToStringTag
]);
diff --git a/deps/v8/src/harmony-typedarray.js b/deps/v8/src/harmony-typedarray.js
index 900cd8f457..b9cc798ad2 100644
--- a/deps/v8/src/harmony-typedarray.js
+++ b/deps/v8/src/harmony-typedarray.js
@@ -2,12 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
macro TYPED_ARRAYS(FUNCTION)
// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
FUNCTION(Uint8Array)
@@ -26,74 +29,336 @@ var GlobalNAME = global.NAME;
endmacro
TYPED_ARRAYS(DECLARE_GLOBALS)
+DECLARE_GLOBALS(Array)
+
+var ArrayFrom;
+var ArrayToString;
+var InnerArrayCopyWithin;
+var InnerArrayEvery;
+var InnerArrayFill;
+var InnerArrayFilter;
+var InnerArrayFind;
+var InnerArrayFindIndex;
+var InnerArrayForEach;
+var InnerArrayIndexOf;
+var InnerArrayJoin;
+var InnerArrayLastIndexOf;
+var InnerArrayMap;
+var InnerArrayReverse;
+var InnerArraySome;
+var InnerArraySort;
+var InnerArrayToLocaleString;
+var IsNaN;
+var MathMax;
+var MathMin;
+
+utils.Import(function(from) {
+ ArrayFrom = from.ArrayFrom;
+ ArrayToString = from.ArrayToString;
+ InnerArrayCopyWithin = from.InnerArrayCopyWithin;
+ InnerArrayEvery = from.InnerArrayEvery;
+ InnerArrayFill = from.InnerArrayFill;
+ InnerArrayFilter = from.InnerArrayFilter;
+ InnerArrayFind = from.InnerArrayFind;
+ InnerArrayFindIndex = from.InnerArrayFindIndex;
+ InnerArrayForEach = from.InnerArrayForEach;
+ InnerArrayIndexOf = from.InnerArrayIndexOf;
+ InnerArrayJoin = from.InnerArrayJoin;
+ InnerArrayLastIndexOf = from.InnerArrayLastIndexOf;
+ InnerArrayMap = from.InnerArrayMap;
+ InnerArrayReduce = from.InnerArrayReduce;
+ InnerArrayReduceRight = from.InnerArrayReduceRight;
+ InnerArrayReverse = from.InnerArrayReverse;
+ InnerArraySome = from.InnerArraySome;
+ InnerArraySort = from.InnerArraySort;
+ InnerArrayToLocaleString = from.InnerArrayToLocaleString;
+ IsNaN = from.IsNaN;
+ MathMax = from.MathMax;
+ MathMin = from.MathMin;
+});
// -------------------------------------------------------------------
-// ES6 draft 05-05-15, section 22.2.3.7
-function TypedArrayEvery(f /* thisArg */) { // length == 1
- if (!%IsTypedArray(this)) {
- throw MakeTypeError('not_typed_array', []);
+function ConstructTypedArray(constructor, arg) {
+ // TODO(littledan): This is an approximation of the spec, which requires
+ // that only real TypedArray classes should be accepted (22.2.2.1.1)
+ if (!%IsConstructor(constructor) || IS_UNDEFINED(constructor.prototype) ||
+ !%HasOwnProperty(constructor.prototype, "BYTES_PER_ELEMENT")) {
+ throw MakeTypeError(kNotTypedArray);
}
- if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
+
+ // TODO(littledan): The spec requires that, rather than directly calling
+ // the constructor, a TypedArray is created with the proper proto and
+ // underlying size and element size, and elements are put in one by one.
+ // By contrast, this would allow subclasses to make a radically different
+ // constructor with different semantics.
+ return new constructor(arg);
+}
+
+function ConstructTypedArrayLike(typedArray, arg) {
+ // TODO(littledan): The spec requires that we actuallly use
+ // typedArray.constructor[Symbol.species] (bug v8:4093)
+ // Also, it should default to the default constructor from
+ // table 49 if typedArray.constructor doesn't exist.
+ return ConstructTypedArray(typedArray.constructor, arg);
+}
+
+function TypedArrayCopyWithin(target, start, end) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
- var receiver;
- if (%_ArgumentsLength() > 1) {
- receiver = %_Arguments(1);
- }
+ // TODO(littledan): Replace with a memcpy for better performance
+ return InnerArrayCopyWithin(target, start, end, this, length);
+}
+%FunctionSetLength(TypedArrayCopyWithin, 2);
- var needs_wrapper = false;
- if (IS_NULL(receiver)) {
- if (%IsSloppyModeFunction(mapfn)) receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
- }
+// ES6 draft 05-05-15, section 22.2.3.7
+function TypedArrayEvery(f, receiver) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
- for (var i = 0; i < length; i++) {
- var element = this[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
- if (!%_CallFunction(new_receiver, TO_OBJECT_INLINE(element), i, this, f)) {
- return false;
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayEvery(f, receiver, this, length);
+}
+%FunctionSetLength(TypedArrayEvery, 1);
+
+// ES6 draft 08-24-14, section 22.2.3.12
+function TypedArrayForEach(f, receiver) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ InnerArrayForEach(f, receiver, this, length);
+}
+%FunctionSetLength(TypedArrayForEach, 1);
+
+// ES6 draft 04-05-14 section 22.2.3.8
+function TypedArrayFill(value, start, end) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayFill(value, start, end, this, length);
+}
+%FunctionSetLength(TypedArrayFill, 1);
+
+// ES6 draft 07-15-13, section 22.2.3.9
+function TypedArrayFilter(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ var array = InnerArrayFilter(predicate, thisArg, this, length);
+ return ConstructTypedArrayLike(this, array);
+}
+%FunctionSetLength(TypedArrayFilter, 1);
+
+// ES6 draft 07-15-13, section 22.2.3.10
+function TypedArrayFind(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayFind(predicate, thisArg, this, length);
+}
+%FunctionSetLength(TypedArrayFind, 1);
+
+// ES6 draft 07-15-13, section 22.2.3.11
+function TypedArrayFindIndex(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayFindIndex(predicate, thisArg, this, length);
+}
+%FunctionSetLength(TypedArrayFindIndex, 1);
+
+// ES6 draft 05-18-15, section 22.2.3.21
+function TypedArrayReverse() {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayReverse(this, length);
+}
+
+
+function TypedArrayComparefn(x, y) {
+ if (IsNaN(x) && IsNaN(y)) {
+ return IsNaN(y) ? 0 : 1;
+ }
+ if (IsNaN(x)) {
+ return 1;
+ }
+ if (x === 0 && x === y) {
+ if (%_IsMinusZero(x)) {
+ if (!%_IsMinusZero(y)) {
+ return -1;
+ }
+ } else if (%_IsMinusZero(y)) {
+ return 1;
}
}
- return true;
+ return x - y;
}
-// ES6 draft 08-24-14, section 22.2.3.12
-function TypedArrayForEach(f /* thisArg */) { // length == 1
- if (!%IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
- if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
+
+// ES6 draft 05-18-15, section 22.2.3.25
+function TypedArraySort(comparefn) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
- var receiver;
- if (%_ArgumentsLength() > 1) {
- receiver = %_Arguments(1);
+ if (IS_UNDEFINED(comparefn)) {
+ comparefn = TypedArrayComparefn;
}
- var needs_wrapper = false;
- if (IS_NULL(receiver)) {
- if (%IsSloppyModeFunction(mapfn)) receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
+ return %_CallFunction(this, length, comparefn, InnerArraySort);
+}
+
+
+// ES6 section 22.2.3.13
+function TypedArrayIndexOf(element, index) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return %_CallFunction(this, element, index, length, InnerArrayIndexOf);
+}
+%FunctionSetLength(TypedArrayIndexOf, 1);
+
+
+// ES6 section 22.2.3.16
+function TypedArrayLastIndexOf(element, index) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return %_CallFunction(this, element, index, length,
+ %_ArgumentsLength(), InnerArrayLastIndexOf);
+}
+%FunctionSetLength(TypedArrayLastIndexOf, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.18
+function TypedArrayMap(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ // TODO(littledan): Preallocate rather than making an intermediate
+ // InternalArray, for better performance.
+ var length = %_TypedArrayGetLength(this);
+ var array = InnerArrayMap(predicate, thisArg, this, length);
+ return ConstructTypedArrayLike(this, array);
+}
+%FunctionSetLength(TypedArrayMap, 1);
+
+
+// ES6 draft 05-05-15, section 22.2.3.24
+function TypedArraySome(f, receiver) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArraySome(f, receiver, this, length);
+}
+%FunctionSetLength(TypedArraySome, 1);
+
+
+// ES6 section 22.2.3.27
+function TypedArrayToLocaleString() {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayToLocaleString(this, length);
+}
+
+
+// ES6 section 22.2.3.28
+function TypedArrayToString() {
+ return %_CallFunction(this, ArrayToString);
+}
+
+
+// ES6 section 22.2.3.14
+function TypedArrayJoin(separator) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayJoin(separator, this, length);
+}
+
+
+// ES6 draft 07-15-13, section 22.2.3.19
+function TypedArrayReduce(callback, current) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ return InnerArrayReduce(callback, current, this, length,
+ %_ArgumentsLength());
+}
+%FunctionSetLength(TypedArrayReduce, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.19
+function TypedArrayReduceRight(callback, current) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ return InnerArrayReduceRight(callback, current, this, length,
+ %_ArgumentsLength());
+}
+%FunctionSetLength(TypedArrayReduceRight, 1);
+
+
+function TypedArraySlice(start, end) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ var len = %_TypedArrayGetLength(this);
+
+ var relativeStart = TO_INTEGER(start);
+
+ var k;
+ if (relativeStart < 0) {
+ k = MathMax(len + relativeStart, 0);
+ } else {
+ k = MathMin(relativeStart, len);
}
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
- for (var i = 0; i < length; i++) {
- var element = this[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
- %_CallFunction(new_receiver, TO_OBJECT_INLINE(element), i, this, f);
+ var relativeEnd;
+ if (IS_UNDEFINED(end)) {
+ relativeEnd = len;
+ } else {
+ relativeEnd = TO_INTEGER(end);
+ }
+
+ var final;
+ if (relativeEnd < 0) {
+ final = MathMax(len + relativeEnd, 0);
+ } else {
+ final = MathMin(relativeEnd, len);
}
+
+ var count = MathMax(final - k, 0);
+ var array = ConstructTypedArrayLike(this, count);
+ // The code below is the 'then' branch; the 'else' branch species
+ // a memcpy. Because V8 doesn't canonicalize NaN, the difference is
+ // unobservable.
+ var n = 0;
+ while (k < final) {
+ var kValue = this[k];
+ // TODO(littledan): The spec says to throw on an error in setting;
+ // does this throw?
+ array[n] = kValue;
+ k++;
+ n++;
+ }
+ return array;
}
+
// ES6 draft 08-24-14, section 22.2.2.2
-function TypedArrayOf() { // length == 0
+function TypedArrayOf() {
var length = %_ArgumentsLength();
var array = new this(length);
for (var i = 0; i < length; i++) {
@@ -102,16 +367,44 @@ function TypedArrayOf() { // length == 0
return array;
}
+
+function TypedArrayFrom(source, mapfn, thisArg) {
+ // TODO(littledan): Investigate if there is a receiver which could be
+ // faster to accumulate on than Array, e.g., a TypedVector.
+ var array = %_CallFunction(GlobalArray, source, mapfn, thisArg, ArrayFrom);
+ return ConstructTypedArray(this, array);
+}
+%FunctionSetLength(TypedArrayFrom, 1);
+
+// TODO(littledan): Fix the TypedArray proto chain (bug v8:4085).
macro EXTEND_TYPED_ARRAY(NAME)
// Set up non-enumerable functions on the object.
- $installFunctions(GlobalNAME, DONT_ENUM | DONT_DELETE | READ_ONLY, [
+ utils.InstallFunctions(GlobalNAME, DONT_ENUM | DONT_DELETE | READ_ONLY, [
+ "from", TypedArrayFrom,
"of", TypedArrayOf
]);
// Set up non-enumerable functions on the prototype object.
- $installFunctions(GlobalNAME.prototype, DONT_ENUM, [
+ utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
+ "copyWithin", TypedArrayCopyWithin,
"every", TypedArrayEvery,
- "forEach", TypedArrayForEach
+ "fill", TypedArrayFill,
+ "filter", TypedArrayFilter,
+ "find", TypedArrayFind,
+ "findIndex", TypedArrayFindIndex,
+ "indexOf", TypedArrayIndexOf,
+ "join", TypedArrayJoin,
+ "lastIndexOf", TypedArrayLastIndexOf,
+ "forEach", TypedArrayForEach,
+ "map", TypedArrayMap,
+ "reduce", TypedArrayReduce,
+ "reduceRight", TypedArrayReduceRight,
+ "reverse", TypedArrayReverse,
+ "slice", TypedArraySlice,
+ "some", TypedArraySome,
+ "sort", TypedArraySort,
+ "toString", TypedArrayToString,
+ "toLocaleString", TypedArrayToLocaleString
]);
endmacro
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/hashmap.h
index 688fc2cf36..ee3797fe59 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/hashmap.h
@@ -43,7 +43,7 @@ class TemplateHashMapImpl {
// If an entry with matching key is found, returns that entry.
// Otherwise, NULL is returned.
- Entry* Lookup(void* key, uint32_t hash);
+ Entry* Lookup(void* key, uint32_t hash) const;
// If an entry with matching key is found, returns that entry.
// If no matching entry is found, a new entry is inserted with
@@ -90,7 +90,7 @@ class TemplateHashMapImpl {
uint32_t occupancy_;
Entry* map_end() const { return map_ + capacity_; }
- Entry* Probe(void* key, uint32_t hash);
+ Entry* Probe(void* key, uint32_t hash) const;
void Initialize(uint32_t capacity, AllocationPolicy allocator);
void Resize(AllocationPolicy allocator);
};
@@ -113,7 +113,7 @@ TemplateHashMapImpl<AllocationPolicy>::~TemplateHashMapImpl() {
template <class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Lookup(void* key, uint32_t hash) {
+TemplateHashMapImpl<AllocationPolicy>::Lookup(void* key, uint32_t hash) const {
Entry* p = Probe(key, hash);
return p->key != NULL ? p : NULL;
}
@@ -242,7 +242,7 @@ typename TemplateHashMapImpl<AllocationPolicy>::Entry*
template <class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
-TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) {
+TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) const {
DCHECK(key != NULL);
DCHECK(base::bits::IsPowerOfTwo32(capacity_));
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 0fe50340e7..3f11be4ce4 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -186,4 +186,5 @@ void HeapProfiler::ClearHeapObjectMap() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index 34db11ba5d..f1bdc71cca 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -154,6 +154,7 @@ const char* HeapEntry::TypeAsString() {
case kConsString: return "/concatenated string/";
case kSlicedString: return "/sliced string/";
case kSymbol: return "/symbol/";
+ case kSimdValue: return "/simd/";
default: return "???";
}
}
@@ -322,7 +323,8 @@ List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
for (int i = 0; i < entries_.length(); ++i) {
sorted_entries_[i] = &entries_[i];
}
- sorted_entries_.Sort(SortByIds);
+ sorted_entries_.Sort<int (*)(HeapEntry* const*, HeapEntry* const*)>(
+ SortByIds);
}
return &sorted_entries_;
}
@@ -861,6 +863,8 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
return AddEntry(object, HeapEntry::kArray, "");
} else if (object->IsHeapNumber()) {
return AddEntry(object, HeapEntry::kHeapNumber, "number");
+ } else if (object->IsFloat32x4()) {
+ return AddEntry(object, HeapEntry::kSimdValue, "simd");
}
return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
}
@@ -1291,7 +1295,7 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
TransitionArray::cast(raw_transitions_or_prototype_info);
int transitions_entry = GetEntry(transitions)->index();
- if (FLAG_collect_maps && map->CanTransition()) {
+ if (map->CanTransition()) {
if (transitions->HasPrototypeTransitions()) {
FixedArray* prototype_transitions =
transitions->GetPrototypeTransitions();
@@ -1500,9 +1504,6 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
SetInternalReference(code, entry,
"gc_metadata", code->gc_metadata(),
Code::kGCMetadataOffset);
- SetInternalReference(code, entry,
- "constant_pool", code->constant_pool(),
- Code::kConstantPoolOffset);
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
SetWeakReference(code, entry,
"next_code_link", code->next_code_link(),
@@ -1648,17 +1649,33 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
break;
}
}
+ } else if (js_obj->IsGlobalObject()) {
+ // We assume that global objects can only have slow properties.
+ GlobalDictionary* dictionary = js_obj->global_dictionary();
+ int length = dictionary->Capacity();
+ for (int i = 0; i < length; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
+ PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
+ Object* value = cell->value();
+ if (k == heap_->hidden_string()) {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(js_obj, entry, "hidden_properties", value);
+ continue;
+ }
+ PropertyDetails details = cell->property_details();
+ SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
+ Name::cast(k), value);
+ }
+ }
} else {
NameDictionary* dictionary = js_obj->property_dictionary();
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
if (dictionary->IsKey(k)) {
- Object* target = dictionary->ValueAt(i);
- // We assume that global objects can only have slow properties.
- Object* value = target->IsPropertyCell()
- ? PropertyCell::cast(target)->value()
- : target;
+ Object* value = dictionary->ValueAt(i);
if (k == heap_->hidden_string()) {
TagObject(value, "(hidden properties)");
SetInternalReference(js_obj, entry, "hidden_properties", value);
@@ -3166,4 +3183,5 @@ void HeapSnapshotJSONSerializer::SerializeStrings() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index 5859eb88b5..ed0ca89839 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -82,7 +82,8 @@ class HeapEntry BASE_EMBEDDED {
kSynthetic = v8::HeapGraphNode::kSynthetic,
kConsString = v8::HeapGraphNode::kConsString,
kSlicedString = v8::HeapGraphNode::kSlicedString,
- kSymbol = v8::HeapGraphNode::kSymbol
+ kSymbol = v8::HeapGraphNode::kSymbol,
+ kSimdValue = v8::HeapGraphNode::kSimdValue
};
static const int kNoEntry;
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 5baf024efe..f76c48bf9e 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/flags.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/utils.h"
@@ -49,8 +50,8 @@ void GCIdleTimeHandler::HeapState::Print() {
PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
PrintF("size_of_objects=%" V8_PTR_PREFIX "d ", size_of_objects);
PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
- PrintF("can_start_incremental_marking=%d ", can_start_incremental_marking);
PrintF("sweeping_in_progress=%d ", sweeping_in_progress);
+ PrintF("has_low_allocation_rate=%d", has_low_allocation_rate);
PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ",
mark_compact_speed_in_bytes_per_ms);
PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ",
@@ -58,7 +59,7 @@ void GCIdleTimeHandler::HeapState::Print() {
PrintF("scavenge_speed=%" V8_PTR_PREFIX "d ", scavenge_speed_in_bytes_per_ms);
PrintF("new_space_size=%" V8_PTR_PREFIX "d ", used_new_space_size);
PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity);
- PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d",
+ PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
new_space_allocation_throughput_in_bytes_per_ms);
}
@@ -126,8 +127,8 @@ bool GCIdleTimeHandler::ShouldDoScavenge(
new_space_allocation_limit = new_space_size;
}
- // We do not know the allocation throughput before the first Scavenge.
- // TODO(hpayer): Estimate allocation throughput before the first Scavenge.
+ // We do not know the allocation throughput before the first scavenge.
+ // TODO(hpayer): Estimate allocation throughput before the first scavenge.
if (new_space_allocation_throughput_in_bytes_per_ms == 0) {
new_space_allocation_limit =
static_cast<size_t>(new_space_size * kConservativeTimeRatio);
@@ -135,10 +136,17 @@ bool GCIdleTimeHandler::ShouldDoScavenge(
// We have to trigger scavenge before we reach the end of new space.
size_t adjust_limit = new_space_allocation_throughput_in_bytes_per_ms *
kTimeUntilNextIdleEvent;
- if (adjust_limit > new_space_allocation_limit)
+ if (adjust_limit > new_space_allocation_limit) {
new_space_allocation_limit = 0;
- else
+ } else {
new_space_allocation_limit -= adjust_limit;
+ }
+ }
+
+ // The allocated new space limit to trigger a scavange has to be at least
+ // kMinimumNewSpaceSizeToPerformScavenge.
+ if (new_space_allocation_limit < kMinimumNewSpaceSizeToPerformScavenge) {
+ new_space_allocation_limit = kMinimumNewSpaceSizeToPerformScavenge;
}
if (scavenge_speed_in_bytes_per_ms == 0) {
@@ -193,105 +201,47 @@ GCIdleTimeAction GCIdleTimeHandler::NothingOrDone(double idle_time_in_ms) {
if (idle_time_in_ms >= kMinBackgroundIdleTime) {
return GCIdleTimeAction::Nothing();
}
- if (idle_times_which_made_no_progress_per_mode_ >=
- kMaxNoProgressIdleTimesPerMode) {
+ if (idle_times_which_made_no_progress_ >= kMaxNoProgressIdleTimes) {
return GCIdleTimeAction::Done();
} else {
- idle_times_which_made_no_progress_per_mode_++;
+ idle_times_which_made_no_progress_++;
return GCIdleTimeAction::Nothing();
}
}
-// The idle time handler has three modes and transitions between them
-// as shown in the diagram:
-//
-// kReduceLatency -----> kReduceMemory -----> kDone
-// ^ ^ | |
-// | | | |
-// | +------------------+ |
-// | |
-// +----------------------------------------+
-//
-// In kReduceLatency mode the handler only starts incremental marking
-// if can_start_incremental_marking is false.
-// In kReduceMemory mode the handler can force a new GC cycle by starting
-// incremental marking even if can_start_incremental_marking is false. It can
-// cause at most X idle GCs.
-// In kDone mode the idle time handler does nothing.
-//
-// The initial mode is kReduceLatency.
-//
-// kReduceLatency => kReduceMemory transition happens if there were Y
-// consecutive long idle notifications without any mutator GC. This is our
-// notion of "mutator is idle".
-//
-// kReduceMemory => kDone transition happens after X idle GCs.
-//
-// kReduceMemory => kReduceLatency transition happens if N mutator GCs
-// were performed meaning that the mutator is active.
-//
-// kDone => kReduceLatency transition happens if there were M mutator GCs or
-// context was disposed.
-//
-// X = kMaxIdleMarkCompacts
-// Y = kLongIdleNotificationsBeforeMutatorIsIdle
-// N = #(idle GCs)
-// M = kGCsBeforeMutatorIsActive
-GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
- HeapState heap_state) {
- Mode next_mode = NextMode(heap_state);
-
- // Immediately go from reduce latency to reduce memory mode in
- // background tab.
- if (next_mode == kReduceLatency &&
- idle_time_in_ms >= kMinBackgroundIdleTime) {
- next_mode = kReduceMemory;
- }
-
- if (next_mode != mode_) {
- mode_ = next_mode;
- ResetCounters();
- }
-
- UpdateCounters(idle_time_in_ms);
-
- if (mode_ == kDone) {
- return GCIdleTimeAction::Done();
- } else {
- return Action(idle_time_in_ms, heap_state, mode_ == kReduceMemory);
- }
-}
-
-
// The following logic is implemented by the controller:
// (1) If we don't have any idle time, do nothing, unless a context was
// disposed, incremental marking is stopped, and the heap is small. Then do
// a full GC.
-// (2) If the new space is almost full and we can afford a Scavenge or if the
-// next Scavenge will very likely take long, then a Scavenge is performed.
-// (3) If incremental marking is done, we perform a full garbage collection
-// if we are allowed to still do full garbage collections during this idle
-// round or if we are not allowed to start incremental marking. Otherwise we
-// do not perform garbage collection to keep system utilization low.
+// (2) If the context disposal rate is high and we cannot perform a full GC,
+// we do nothing until the context disposal rate becomes lower.
+// (3) If the new space is almost full and we can affort a scavenge or if the
+// next scavenge will very likely take long, then a scavenge is performed.
// (4) If sweeping is in progress and we received a large enough idle time
// request, we finalize sweeping here.
// (5) If incremental marking is in progress, we perform a marking step. Note,
// that this currently may trigger a full garbage collection.
-GCIdleTimeAction GCIdleTimeHandler::Action(double idle_time_in_ms,
- const HeapState& heap_state,
- bool reduce_memory) {
+GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
+ HeapState heap_state) {
if (static_cast<int>(idle_time_in_ms) <= 0) {
if (heap_state.incremental_marking_stopped) {
if (ShouldDoContextDisposalMarkCompact(
heap_state.contexts_disposed,
heap_state.contexts_disposal_rate)) {
- return GCIdleTimeAction::FullGC(false);
+ return GCIdleTimeAction::FullGC();
}
}
return GCIdleTimeAction::Nothing();
}
+ // We are in a context disposal GC scenario. Don't do anything if we do not
+ // get the right idle signal.
+ if (ShouldDoContextDisposalMarkCompact(heap_state.contexts_disposed,
+ heap_state.contexts_disposal_rate)) {
+ return NothingOrDone(idle_time_in_ms);
+ }
+
if (ShouldDoScavenge(
static_cast<size_t>(idle_time_in_ms), heap_state.new_space_capacity,
heap_state.used_new_space_size,
@@ -300,14 +250,6 @@ GCIdleTimeAction GCIdleTimeHandler::Action(double idle_time_in_ms,
return GCIdleTimeAction::Scavenge();
}
- if (heap_state.incremental_marking_stopped && reduce_memory) {
- if (ShouldDoMarkCompact(static_cast<size_t>(idle_time_in_ms),
- heap_state.size_of_objects,
- heap_state.mark_compact_speed_in_bytes_per_ms)) {
- return GCIdleTimeAction::FullGC(reduce_memory);
- }
- }
-
if (heap_state.sweeping_in_progress) {
if (heap_state.sweeping_completed) {
return GCIdleTimeAction::FinalizeSweeping();
@@ -316,93 +258,16 @@ GCIdleTimeAction GCIdleTimeHandler::Action(double idle_time_in_ms,
}
}
- if (heap_state.incremental_marking_stopped &&
- !heap_state.can_start_incremental_marking && !reduce_memory) {
- return NothingOrDone(idle_time_in_ms);
+ if (!FLAG_incremental_marking || heap_state.incremental_marking_stopped) {
+ return GCIdleTimeAction::Done();
}
size_t step_size = EstimateMarkingStepSize(
static_cast<size_t>(kIncrementalMarkingStepTimeInMs),
heap_state.incremental_marking_speed_in_bytes_per_ms);
- return GCIdleTimeAction::IncrementalMarking(step_size, reduce_memory);
-}
-
-
-void GCIdleTimeHandler::UpdateCounters(double idle_time_in_ms) {
- if (mode_ == kReduceLatency) {
- int gcs = scavenges_ + mark_compacts_;
- if (gcs > 0) {
- // There was a GC since the last notification.
- long_idle_notifications_ = 0;
- background_idle_notifications_ = 0;
- }
- idle_mark_compacts_ = 0;
- mark_compacts_ = 0;
- scavenges_ = 0;
- if (idle_time_in_ms >= kMinBackgroundIdleTime) {
- background_idle_notifications_++;
- } else if (idle_time_in_ms >= kMinLongIdleTime) {
- long_idle_notifications_++;
- }
- }
-}
-
-
-void GCIdleTimeHandler::ResetCounters() {
- long_idle_notifications_ = 0;
- background_idle_notifications_ = 0;
- idle_mark_compacts_ = 0;
- mark_compacts_ = 0;
- scavenges_ = 0;
- idle_times_which_made_no_progress_per_mode_ = 0;
-}
-
-
-bool GCIdleTimeHandler::IsMutatorActive(int contexts_disposed,
- int mark_compacts) {
- return contexts_disposed > 0 ||
- mark_compacts >= kMarkCompactsBeforeMutatorIsActive;
-}
-
-
-bool GCIdleTimeHandler::IsMutatorIdle(int long_idle_notifications,
- int background_idle_notifications,
- int mutator_gcs) {
- return mutator_gcs == 0 &&
- (long_idle_notifications >=
- kLongIdleNotificationsBeforeMutatorIsIdle ||
- background_idle_notifications >=
- kBackgroundIdleNotificationsBeforeMutatorIsIdle);
+ return GCIdleTimeAction::IncrementalMarking(step_size);
}
-GCIdleTimeHandler::Mode GCIdleTimeHandler::NextMode(
- const HeapState& heap_state) {
- DCHECK(mark_compacts_ >= idle_mark_compacts_);
- int mutator_gcs = scavenges_ + mark_compacts_ - idle_mark_compacts_;
- switch (mode_) {
- case kDone:
- DCHECK(idle_mark_compacts_ == 0);
- if (IsMutatorActive(heap_state.contexts_disposed, mark_compacts_)) {
- return kReduceLatency;
- }
- break;
- case kReduceLatency:
- if (IsMutatorIdle(long_idle_notifications_,
- background_idle_notifications_, mutator_gcs)) {
- return kReduceMemory;
- }
- break;
- case kReduceMemory:
- if (idle_mark_compacts_ >= kMaxIdleMarkCompacts) {
- return kDone;
- }
- if (mutator_gcs > idle_mark_compacts_) {
- return kReduceLatency;
- }
- break;
- }
- return mode_;
-}
}
}
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 1ab506d817..8f12a446f2 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -27,7 +27,6 @@ class GCIdleTimeAction {
result.type = DONE;
result.parameter = 0;
result.additional_work = false;
- result.reduce_memory = false;
return result;
}
@@ -36,17 +35,14 @@ class GCIdleTimeAction {
result.type = DO_NOTHING;
result.parameter = 0;
result.additional_work = false;
- result.reduce_memory = false;
return result;
}
- static GCIdleTimeAction IncrementalMarking(intptr_t step_size,
- bool reduce_memory) {
+ static GCIdleTimeAction IncrementalMarking(intptr_t step_size) {
GCIdleTimeAction result;
result.type = DO_INCREMENTAL_MARKING;
result.parameter = step_size;
result.additional_work = false;
- result.reduce_memory = reduce_memory;
return result;
}
@@ -55,18 +51,14 @@ class GCIdleTimeAction {
result.type = DO_SCAVENGE;
result.parameter = 0;
result.additional_work = false;
- // TODO(ulan): add reduce_memory argument and shrink new space size if
- // reduce_memory = true.
- result.reduce_memory = false;
return result;
}
- static GCIdleTimeAction FullGC(bool reduce_memory) {
+ static GCIdleTimeAction FullGC() {
GCIdleTimeAction result;
result.type = DO_FULL_GC;
result.parameter = 0;
result.additional_work = false;
- result.reduce_memory = reduce_memory;
return result;
}
@@ -75,7 +67,6 @@ class GCIdleTimeAction {
result.type = DO_FINALIZE_SWEEPING;
result.parameter = 0;
result.additional_work = false;
- result.reduce_memory = false;
return result;
}
@@ -84,7 +75,6 @@ class GCIdleTimeAction {
GCIdleTimeActionType type;
intptr_t parameter;
bool additional_work;
- bool reduce_memory;
};
@@ -128,6 +118,8 @@ class GCIdleTimeHandler {
// The maximum idle time when frames are rendered is 16.66ms.
static const size_t kMaxFrameRenderingIdleTime = 17;
+ static const int kMinBackgroundIdleTime = 900;
+
// We conservatively assume that in the next kTimeUntilNextIdleEvent ms
// no idle notification happens.
static const size_t kTimeUntilNextIdleEvent = 100;
@@ -136,6 +128,9 @@ class GCIdleTimeHandler {
// lower bound for the scavenger speed.
static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
+ // The minimum size of allocated new space objects to trigger a scavenge.
+ static const size_t kMinimumNewSpaceSizeToPerformScavenge = MB / 2;
+
// If contexts are disposed at a higher rate a full gc is triggered.
static const double kHighContextDisposalRate;
@@ -144,28 +139,10 @@ class GCIdleTimeHandler {
static const size_t kMinTimeForOverApproximatingWeakClosureInMs;
- // The number of idle MarkCompact GCs to perform before transitioning to
- // the kDone mode.
- static const int kMaxIdleMarkCompacts = 3;
-
- // The number of mutator MarkCompact GCs before transitioning to the
- // kReduceLatency mode.
- static const int kMarkCompactsBeforeMutatorIsActive = 1;
-
- // Mutator is considered idle if
- // 1) there are N idle notification with time >= kMinBackgroundIdleTime,
- // 2) or there are M idle notifications with time >= kMinLongIdleTime
- // without any mutator GC in between.
- // Where N = kBackgroundIdleNotificationsBeforeMutatorIsIdle,
- // M = kLongIdleNotificationsBeforeMutatorIsIdle
- static const int kMinLongIdleTime = kMaxFrameRenderingIdleTime + 1;
- static const int kMinBackgroundIdleTime = 900;
- static const int kBackgroundIdleNotificationsBeforeMutatorIsIdle = 2;
- static const int kLongIdleNotificationsBeforeMutatorIsIdle = 50;
// Number of times we will return a Nothing action in the current mode
// despite having idle time available before we returning a Done action to
// ensure we don't keep scheduling idle tasks and making no progress.
- static const int kMaxNoProgressIdleTimesPerMode = 10;
+ static const int kMaxNoProgressIdleTimes = 10;
class HeapState {
public:
@@ -175,9 +152,9 @@ class GCIdleTimeHandler {
double contexts_disposal_rate;
size_t size_of_objects;
bool incremental_marking_stopped;
- bool can_start_incremental_marking;
bool sweeping_in_progress;
bool sweeping_completed;
+ bool has_low_allocation_rate;
size_t mark_compact_speed_in_bytes_per_ms;
size_t incremental_marking_speed_in_bytes_per_ms;
size_t final_incremental_mark_compact_speed_in_bytes_per_ms;
@@ -187,22 +164,11 @@ class GCIdleTimeHandler {
size_t new_space_allocation_throughput_in_bytes_per_ms;
};
- GCIdleTimeHandler()
- : idle_mark_compacts_(0),
- mark_compacts_(0),
- scavenges_(0),
- long_idle_notifications_(0),
- background_idle_notifications_(0),
- idle_times_which_made_no_progress_per_mode_(0),
- mode_(kReduceLatency) {}
+ GCIdleTimeHandler() : idle_times_which_made_no_progress_(0) {}
GCIdleTimeAction Compute(double idle_time_in_ms, HeapState heap_state);
- void NotifyIdleMarkCompact() { ++idle_mark_compacts_; }
-
- void NotifyMarkCompact() { ++mark_compacts_; }
-
- void NotifyScavenge() { ++scavenges_; }
+ void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
static size_t EstimateMarkingStepSize(size_t idle_time_in_ms,
size_t marking_speed_in_bytes_per_ms);
@@ -231,38 +197,11 @@ class GCIdleTimeHandler {
size_t scavenger_speed_in_bytes_per_ms,
size_t new_space_allocation_throughput_in_bytes_per_ms);
- bool ShouldGrowHeapSlowly() {
- return mode() == kDone;
- }
-
- enum Mode { kReduceLatency, kReduceMemory, kDone };
-
- Mode mode() { return mode_; }
-
private:
- bool IsMutatorActive(int contexts_disposed, int gcs);
- bool IsMutatorIdle(int long_idle_notifications,
- int background_idle_notifications, int gcs);
- void UpdateCounters(double idle_time_in_ms);
- void ResetCounters();
- Mode NextMode(const HeapState& heap_state);
- GCIdleTimeAction Action(double idle_time_in_ms, const HeapState& heap_state,
- bool reduce_memory);
GCIdleTimeAction NothingOrDone(double idle_time_in_ms);
- int idle_mark_compacts_;
- int mark_compacts_;
- int scavenges_;
- // The number of long idle notifications with no GC happening
- // between the notifications.
- int long_idle_notifications_;
- // The number of background idle notifications with no GC happening
- // between the notifications.
- int background_idle_notifications_;
- // Idle notifications with no progress in the current mode.
- int idle_times_which_made_no_progress_per_mode_;
-
- Mode mode_;
+ // Idle notifications with no progress.
+ int idle_times_which_made_no_progress_;
DISALLOW_COPY_AND_ASSIGN(GCIdleTimeHandler);
};
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 1c5226d6c4..6728f09bda 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -20,7 +20,7 @@ static intptr_t CountTotalHolesSize(Heap* heap) {
GCTracer::AllocationEvent::AllocationEvent(double duration,
- intptr_t allocation_in_bytes) {
+ size_t allocation_in_bytes) {
duration_ = duration;
allocation_in_bytes_ = allocation_in_bytes;
}
@@ -99,7 +99,13 @@ GCTracer::GCTracer(Heap* heap)
longest_incremental_marking_step_(0.0),
cumulative_marking_duration_(0.0),
cumulative_sweeping_duration_(0.0),
- new_space_top_after_gc_(0),
+ allocation_time_ms_(0.0),
+ new_space_allocation_counter_bytes_(0),
+ old_generation_allocation_counter_bytes_(0),
+ allocation_duration_since_gc_(0.0),
+ new_space_allocation_in_bytes_since_gc_(0),
+ old_generation_allocation_in_bytes_since_gc_(0),
+ combined_mark_compact_speed_cache_(0.0),
start_counter_(0) {
current_ = Event(Event::START, NULL, NULL);
current_.end_time = base::OS::TimeCurrentMillis();
@@ -114,12 +120,8 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
previous_ = current_;
double start_time = heap_->MonotonicallyIncreasingTimeInMs();
- if (new_space_top_after_gc_ != 0) {
- AddNewSpaceAllocationTime(
- start_time - previous_.end_time,
- reinterpret_cast<intptr_t>((heap_->new_space()->top()) -
- new_space_top_after_gc_));
- }
+ SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
+ heap_->OldGenerationAllocationCounter());
if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR)
previous_incremental_mark_compactor_event_ = current_;
@@ -166,11 +168,9 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
void GCTracer::Stop(GarbageCollector collector) {
start_counter_--;
if (start_counter_ != 0) {
- if (FLAG_trace_gc) {
- PrintF("[Finished reentrant %s during %s.]\n",
- collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
- current_.TypeName(false));
- }
+ Output("[Finished reentrant %s during %s.]\n",
+ collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
+ current_.TypeName(false));
return;
}
@@ -184,8 +184,9 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.end_object_size = heap_->SizeOfObjects();
current_.end_memory_size = heap_->isolate()->memory_allocator()->Size();
current_.end_holes_size = CountTotalHolesSize(heap_);
- new_space_top_after_gc_ =
- reinterpret_cast<intptr_t>(heap_->new_space()->top());
+ current_.survived_new_space_object_size = heap_->SurvivedNewSpaceObjectSize();
+
+ AddAllocation(current_.end_time);
int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
int used_memory = static_cast<int>(current_.end_object_size / KB);
@@ -227,18 +228,18 @@ void GCTracer::Stop(GarbageCollector collector) {
.cumulative_pure_incremental_marking_duration;
longest_incremental_marking_step_ = 0.0;
incremental_mark_compactor_events_.push_front(current_);
+ combined_mark_compact_speed_cache_ = 0.0;
} else {
DCHECK(current_.incremental_marking_bytes == 0);
DCHECK(current_.incremental_marking_duration == 0);
DCHECK(current_.pure_incremental_marking_duration == 0);
longest_incremental_marking_step_ = 0.0;
mark_compactor_events_.push_front(current_);
+ combined_mark_compact_speed_cache_ = 0.0;
}
// TODO(ernstm): move the code below out of GCTracer.
- if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
-
double duration = current_.end_time - current_.start_time;
double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
@@ -248,20 +249,54 @@ void GCTracer::Stop(GarbageCollector collector) {
if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
return;
- if (FLAG_trace_gc) {
- if (FLAG_trace_gc_nvp)
- PrintNVP();
- else
- Print();
+ if (FLAG_trace_gc_nvp)
+ PrintNVP();
+ else
+ Print();
+ if (FLAG_trace_gc) {
heap_->PrintShortHeapStatistics();
}
}
-void GCTracer::AddNewSpaceAllocationTime(double duration,
- intptr_t allocation_in_bytes) {
- allocation_events_.push_front(AllocationEvent(duration, allocation_in_bytes));
+void GCTracer::SampleAllocation(double current_ms,
+ size_t new_space_counter_bytes,
+ size_t old_generation_counter_bytes) {
+ if (allocation_time_ms_ == 0) {
+ // It is the first sample.
+ allocation_time_ms_ = current_ms;
+ new_space_allocation_counter_bytes_ = new_space_counter_bytes;
+ old_generation_allocation_counter_bytes_ = old_generation_counter_bytes;
+ return;
+ }
+ // This assumes that counters are unsigned integers so that the subtraction
+ // below works even if the new counter is less then the old counter.
+ size_t new_space_allocated_bytes =
+ new_space_counter_bytes - new_space_allocation_counter_bytes_;
+ size_t old_generation_allocated_bytes =
+ old_generation_counter_bytes - old_generation_allocation_counter_bytes_;
+ double duration = current_ms - allocation_time_ms_;
+ allocation_time_ms_ = current_ms;
+ new_space_allocation_counter_bytes_ = new_space_counter_bytes;
+ old_generation_allocation_counter_bytes_ = old_generation_counter_bytes;
+ allocation_duration_since_gc_ += duration;
+ new_space_allocation_in_bytes_since_gc_ += new_space_allocated_bytes;
+ old_generation_allocation_in_bytes_since_gc_ +=
+ old_generation_allocated_bytes;
+}
+
+
+void GCTracer::AddAllocation(double current_ms) {
+ allocation_time_ms_ = current_ms;
+ new_space_allocation_events_.push_front(AllocationEvent(
+ allocation_duration_since_gc_, new_space_allocation_in_bytes_since_gc_));
+ old_generation_allocation_events_.push_front(
+ AllocationEvent(allocation_duration_since_gc_,
+ old_generation_allocation_in_bytes_since_gc_));
+ allocation_duration_since_gc_ = 0;
+ new_space_allocation_in_bytes_since_gc_ = 0;
+ old_generation_allocation_in_bytes_since_gc_ = 0;
}
@@ -288,30 +323,51 @@ void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
}
+void GCTracer::Output(const char* format, ...) const {
+ if (FLAG_trace_gc) {
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VPrint(format, arguments);
+ va_end(arguments);
+ }
+
+ const int kBufferSize = 256;
+ char raw_buffer[kBufferSize];
+ Vector<char> buffer(raw_buffer, kBufferSize);
+ va_list arguments2;
+ va_start(arguments2, format);
+ VSNPrintF(buffer, format, arguments2);
+ va_end(arguments2);
+
+ heap_->AddToRingBuffer(buffer.start());
+}
+
+
void GCTracer::Print() const {
- PrintIsolate(heap_->isolate(), "%8.0f ms: ",
- heap_->isolate()->time_millis_since_init());
+ if (FLAG_trace_gc) {
+ PrintIsolate(heap_->isolate(), "");
+ }
+ Output("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
- PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ", current_.TypeName(false),
+ Output("%s %.1f (%.1f) -> %.1f (%.1f) MB, ", current_.TypeName(false),
static_cast<double>(current_.start_object_size) / MB,
static_cast<double>(current_.start_memory_size) / MB,
static_cast<double>(current_.end_object_size) / MB,
static_cast<double>(current_.end_memory_size) / MB);
int external_time = static_cast<int>(current_.scopes[Scope::EXTERNAL]);
- if (external_time > 0) PrintF("%d / ", external_time);
-
double duration = current_.end_time - current_.start_time;
- PrintF("%.1f ms", duration);
+ Output("%.1f / %d ms", duration, external_time);
+
if (current_.type == Event::SCAVENGER) {
if (current_.incremental_marking_steps > 0) {
- PrintF(" (+ %.1f ms in %d steps since last GC)",
+ Output(" (+ %.1f ms in %d steps since last GC)",
current_.incremental_marking_duration,
current_.incremental_marking_steps);
}
} else {
if (current_.incremental_marking_steps > 0) {
- PrintF(
+ Output(
" (+ %.1f ms in %d steps since start of marking, "
"biggest step %.1f ms)",
current_.incremental_marking_duration,
@@ -321,14 +377,14 @@ void GCTracer::Print() const {
}
if (current_.gc_reason != NULL) {
- PrintF(" [%s]", current_.gc_reason);
+ Output(" [%s]", current_.gc_reason);
}
if (current_.collector_reason != NULL) {
- PrintF(" [%s]", current_.collector_reason);
+ Output(" [%s]", current_.collector_reason);
}
- PrintF(".\n");
+ Output(".\n");
}
@@ -501,24 +557,26 @@ intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
}
if (durations == 0.0) return 0;
-
- return static_cast<intptr_t>(bytes / durations);
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
-intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond() const {
+intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
+ ScavengeSpeedMode mode) const {
intptr_t bytes = 0;
double durations = 0.0;
EventBuffer::const_iterator iter = scavenger_events_.begin();
while (iter != scavenger_events_.end()) {
- bytes += iter->new_space_object_size;
+ bytes += mode == kForAllObjects ? iter->new_space_object_size
+ : iter->survived_new_space_object_size;
durations += iter->end_time - iter->start_time;
++iter;
}
if (durations == 0.0) return 0;
-
- return static_cast<intptr_t>(bytes / durations);
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
@@ -533,8 +591,8 @@ intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
}
if (durations == 0.0) return 0;
-
- return static_cast<intptr_t>(bytes / durations);
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
}
@@ -550,24 +608,89 @@ intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()
}
if (durations == 0.0) return 0;
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
+
- return static_cast<intptr_t>(bytes / durations);
+double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
+ if (combined_mark_compact_speed_cache_ > 0)
+ return combined_mark_compact_speed_cache_;
+ const double kMinimumMarkingSpeed = 0.5;
+ double speed1 =
+ static_cast<double>(IncrementalMarkingSpeedInBytesPerMillisecond());
+ double speed2 = static_cast<double>(
+ FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
+ if (speed1 < kMinimumMarkingSpeed || speed2 < kMinimumMarkingSpeed) {
+ // No data for the incremental marking speed.
+ // Return the non-incremental mark-compact speed.
+ combined_mark_compact_speed_cache_ =
+ static_cast<double>(MarkCompactSpeedInBytesPerMillisecond());
+ } else {
+ // Combine the speed of incremental step and the speed of the final step.
+ // 1 / (1 / speed1 + 1 / speed2) = speed1 * speed2 / (speed1 + speed2).
+ combined_mark_compact_speed_cache_ = speed1 * speed2 / (speed1 + speed2);
+ }
+ return combined_mark_compact_speed_cache_;
}
-intptr_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond() const {
- intptr_t bytes = 0;
- double durations = 0.0;
- AllocationEventBuffer::const_iterator iter = allocation_events_.begin();
- while (iter != allocation_events_.end()) {
+size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
+ double time_ms) const {
+ size_t bytes = new_space_allocation_in_bytes_since_gc_;
+ double durations = allocation_duration_since_gc_;
+ AllocationEventBuffer::const_iterator iter =
+ new_space_allocation_events_.begin();
+ const size_t max_bytes = static_cast<size_t>(-1);
+ while (iter != new_space_allocation_events_.end() &&
+ bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
bytes += iter->allocation_in_bytes_;
durations += iter->duration_;
++iter;
}
if (durations == 0.0) return 0;
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
+
+
+size_t GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
+ double time_ms) const {
+ size_t bytes = old_generation_allocation_in_bytes_since_gc_;
+ double durations = allocation_duration_since_gc_;
+ AllocationEventBuffer::const_iterator iter =
+ old_generation_allocation_events_.begin();
+ const size_t max_bytes = static_cast<size_t>(-1);
+ while (iter != old_generation_allocation_events_.end() &&
+ bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
+ bytes += iter->allocation_in_bytes_;
+ durations += iter->duration_;
+ ++iter;
+ }
- return static_cast<intptr_t>(bytes / durations);
+ if (durations == 0.0) return 0;
+ // Make sure the result is at least 1.
+ return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
+
+
+size_t GCTracer::AllocationThroughputInBytesPerMillisecond(
+ double time_ms) const {
+ return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
+ OldGenerationAllocationThroughputInBytesPerMillisecond(time_ms);
+}
+
+
+size_t GCTracer::CurrentAllocationThroughputInBytesPerMillisecond() const {
+ return AllocationThroughputInBytesPerMillisecond(kThroughputTimeFrameMs);
+}
+
+
+size_t GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
+ const {
+ return OldGenerationAllocationThroughputInBytesPerMillisecond(
+ kThroughputTimeFrameMs);
}
@@ -607,5 +730,5 @@ bool GCTracer::SurvivalEventsRecorded() const {
void GCTracer::ResetSurvivalEvents() { survival_events_.reset(); }
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index ca144b24b4..e26fc898f9 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -85,6 +85,9 @@ class RingBuffer {
};
+enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
+
+
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
// TODO(ernstm): Unit tests.
@@ -141,15 +144,15 @@ class GCTracer {
// Default constructor leaves the event uninitialized.
AllocationEvent() {}
- AllocationEvent(double duration, intptr_t allocation_in_bytes);
+ AllocationEvent(double duration, size_t allocation_in_bytes);
- // Time spent in the mutator during the end of the last garbage collection
- // to the beginning of the next garbage collection.
+ // Time spent in the mutator during the end of the last sample to the
+ // beginning of the next sample.
double duration_;
- // Memory allocated in the new space during the end of the last garbage
- // collection to the beginning of the next garbage collection.
- intptr_t allocation_in_bytes_;
+ // Memory allocated in the new space during the end of the last sample
+ // to the beginning of the next sample
+ size_t allocation_in_bytes_;
};
@@ -227,6 +230,8 @@ class GCTracer {
// Size of new space objects in constructor.
intptr_t new_space_object_size;
+ // Size of survived new space objects in desctructor.
+ intptr_t survived_new_space_object_size;
// Number of incremental marking steps since creation of tracer.
// (value at start of event)
@@ -286,6 +291,8 @@ class GCTracer {
typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
+ static const int kThroughputTimeFrameMs = 5000;
+
explicit GCTracer(Heap* heap);
// Start collecting data.
@@ -295,8 +302,12 @@ class GCTracer {
// Stop collecting data and print results.
void Stop(GarbageCollector collector);
- // Log an allocation throughput event.
- void AddNewSpaceAllocationTime(double duration, intptr_t allocation_in_bytes);
+ // Sample and accumulate bytes allocated since the last GC.
+ void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
+ size_t old_generation_counter_bytes);
+
+ // Log the accumulated new space allocation bytes.
+ void AddAllocation(double current_ms);
void AddContextDisposalTime(double time);
@@ -367,7 +378,8 @@ class GCTracer {
// Compute the average scavenge speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
- intptr_t ScavengeSpeedInBytesPerMillisecond() const;
+ intptr_t ScavengeSpeedInBytesPerMillisecond(
+ ScavengeSpeedMode mode = kForAllObjects) const;
// Compute the average mark-sweep speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
@@ -378,9 +390,35 @@ class GCTracer {
// Returns 0 if no events have been recorded.
intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
+ // Compute the overall mark compact speed including incremental steps
+ // and the final mark-compact step.
+ double CombinedMarkCompactSpeedInBytesPerMillisecond();
+
// Allocation throughput in the new space in bytes/millisecond.
- // Returns 0 if no events have been recorded.
- intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
+ // Returns 0 if no allocation events have been recorded.
+ size_t NewSpaceAllocationThroughputInBytesPerMillisecond(
+ double time_ms = 0) const;
+
+ // Allocation throughput in the old generation in bytes/millisecond in the
+ // last time_ms milliseconds.
+ // Returns 0 if no allocation events have been recorded.
+ size_t OldGenerationAllocationThroughputInBytesPerMillisecond(
+ double time_ms = 0) const;
+
+ // Allocation throughput in heap in bytes/millisecond in the last time_ms
+ // milliseconds.
+ // Returns 0 if no allocation events have been recorded.
+ size_t AllocationThroughputInBytesPerMillisecond(double time_ms) const;
+
+ // Allocation throughput in heap in bytes/milliseconds in
+ // the last five seconds.
+ // Returns 0 if no allocation events have been recorded.
+ size_t CurrentAllocationThroughputInBytesPerMillisecond() const;
+
+ // Allocation throughput in old generation in bytes/milliseconds in
+ // the last five seconds.
+ // Returns 0 if no allocation events have been recorded.
+ size_t CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
// Computes the context disposal rate in milliseconds. It takes the time
// frame of the first recorded context disposal to the current time and
@@ -408,6 +446,10 @@ class GCTracer {
// TODO(ernstm): Move to Heap.
void Print() const;
+ // Prints a line and also adds it to the heap's ring buffer so that
+ // it can be included in later crash dumps.
+ void Output(const char* format, ...) const;
+
// Compute the mean duration of the events in the given ring buffer.
double MeanDuration(const EventBuffer& events) const;
@@ -447,7 +489,8 @@ class GCTracer {
EventBuffer incremental_mark_compactor_events_;
// RingBuffer for allocation events.
- AllocationEventBuffer allocation_events_;
+ AllocationEventBuffer new_space_allocation_events_;
+ AllocationEventBuffer old_generation_allocation_events_;
// RingBuffer for context disposal events.
ContextDisposalEventBuffer context_disposal_events_;
@@ -485,9 +528,17 @@ class GCTracer {
// all sweeping operations performed on the main thread.
double cumulative_sweeping_duration_;
- // Holds the new space top pointer recorded at the end of the last garbage
- // collection.
- intptr_t new_space_top_after_gc_;
+ // Timestamp and allocation counter at the last sampled allocation event.
+ double allocation_time_ms_;
+ size_t new_space_allocation_counter_bytes_;
+ size_t old_generation_allocation_counter_bytes_;
+
+ // Accumulated duration and allocated bytes since the last GC.
+ double allocation_duration_since_gc_;
+ size_t new_space_allocation_in_bytes_since_gc_;
+ size_t old_generation_allocation_in_bytes_since_gc_;
+
+ double combined_mark_compact_speed_cache_;
// Counts how many tracers were started without stopping.
int start_counter_;
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index d0078c879f..fdb1d7345b 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -149,15 +149,9 @@ AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
}
-AllocationResult Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
- if (src->length() == 0) return src;
- return CopyConstantPoolArrayWithMap(src, src->map());
-}
-
-
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationSpace retry_space,
- Alignment alignment) {
+ AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(gc_state_ == NOT_IN_GC);
@@ -173,15 +167,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
HeapObject* object;
AllocationResult allocation;
if (NEW_SPACE == space) {
-#ifndef V8_HOST_ARCH_64_BIT
- if (alignment == kDoubleAligned) {
- allocation = new_space_.AllocateRawDoubleAligned(size_in_bytes);
- } else {
- allocation = new_space_.AllocateRaw(size_in_bytes);
- }
-#else
- allocation = new_space_.AllocateRaw(size_in_bytes);
-#endif
+ allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) {
space = retry_space;
} else {
@@ -193,18 +179,10 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
}
if (OLD_SPACE == space) {
-#ifndef V8_HOST_ARCH_64_BIT
- if (alignment == kDoubleAligned) {
- allocation = old_space_->AllocateRawDoubleAligned(size_in_bytes);
- } else {
- allocation = old_space_->AllocateRaw(size_in_bytes);
- }
-#else
- allocation = old_space_->AllocateRaw(size_in_bytes);
-#endif
+ allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
} else if (CODE_SPACE == space) {
if (size_in_bytes <= code_space()->AreaSize()) {
- allocation = code_space_->AllocateRaw(size_in_bytes);
+ allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
// Large code objects are allocated in large object space.
allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
@@ -213,7 +191,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else {
DCHECK(MAP_SPACE == space);
- allocation = map_space_->AllocateRaw(size_in_bytes);
+ allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
}
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
@@ -474,7 +452,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
Address top = NewSpaceTop();
DCHECK(memento_address == top ||
memento_address + HeapObject::kHeaderSize <= top ||
- !NewSpacePage::OnSamePage(memento_address, top));
+ !NewSpacePage::OnSamePage(memento_address, top - 1));
if (memento_address == top) return NULL;
AllocationMemento* memento = AllocationMemento::cast(candidate);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index f971359198..47717c162b 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -20,6 +20,7 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-reducer.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/store-buffer.h"
@@ -107,17 +108,20 @@ Heap::Heap()
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
- idle_old_generation_allocation_limit_(
- kMinimumOldGenerationAllocationLimit),
old_gen_exhausted_(false),
+ optimize_for_memory_usage_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
hidden_string_(NULL),
gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(this),
+ new_space_high_promotion_mode_active_(false),
+ gathering_lifetime_feedback_(0),
high_survival_rate_period_length_(0),
promoted_objects_size_(0),
+ low_survival_rate_period_length_(0),
+ survival_rate_(0),
promotion_ratio_(0),
semi_space_copied_object_size_(0),
previous_semi_space_copied_object_size_(0),
@@ -126,6 +130,8 @@ Heap::Heap()
nodes_copied_in_new_space_(0),
nodes_promoted_(0),
maximum_size_scavenges_(0),
+ previous_survival_rate_trend_(Heap::STABLE),
+ survival_rate_trend_(Heap::STABLE),
max_gc_pause_(0.0),
total_gc_time_ms_(0.0),
max_alive_after_gc_(0),
@@ -133,15 +139,21 @@ Heap::Heap()
marking_time_(0.0),
sweeping_time_(0.0),
last_idle_notification_time_(0.0),
+ last_gc_time_(0.0),
mark_compact_collector_(this),
store_buffer_(this),
marking_(this),
incremental_marking_(this),
- gc_count_at_last_idle_gc_(0),
+ memory_reducer_(this),
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
+ new_space_allocation_counter_(0),
+ old_generation_allocation_counter_(0),
+ old_generation_size_at_last_gc_(0),
gcs_since_last_deopt_(0),
allocation_sites_scratchpad_length_(0),
+ ring_buffer_full_(false),
+ ring_buffer_end_(0),
promotion_queue_(this),
configured_(false),
external_string_table_(this),
@@ -227,8 +239,12 @@ void Heap::UpdateMaximumCommitted() {
intptr_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
- return new_space_.Available() + old_space_->Available() +
- code_space_->Available() + map_space_->Available();
+ intptr_t total = 0;
+ AllSpaces spaces(this);
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ total += space->Available();
+ }
+ return total;
}
@@ -395,6 +411,20 @@ void Heap::ReportStatisticsAfterGC() {
#else
if (FLAG_log_gc) new_space_.ReportStatistics();
#endif // DEBUG
+ for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
+ ++i) {
+ int count = deferred_counters_[i];
+ deferred_counters_[i] = 0;
+ while (count > 0) {
+ count--;
+ isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
+ }
+ }
+}
+
+
+void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
+ deferred_counters_[feature]++;
}
@@ -405,7 +435,7 @@ void Heap::GarbageCollectionPrologue() {
gc_count_++;
unflattened_strings_length_ = 0;
- if (FLAG_flush_code && FLAG_flush_code_incrementally) {
+ if (FLAG_flush_code) {
mark_compact_collector()->EnableCodeFlushing(true);
}
@@ -446,6 +476,7 @@ void Heap::GarbageCollectionPrologue() {
maximum_size_scavenges_ = 0;
}
CheckNewSpaceExpansionCriteria();
+ UpdateNewSpaceAllocationCounter();
}
@@ -501,7 +532,8 @@ void Heap::RepairFreeListsAfterDeserialization() {
}
-void Heap::ProcessPretenuringFeedback() {
+bool Heap::ProcessPretenuringFeedback() {
+ bool trigger_deoptimization = false;
if (FLAG_allocation_site_pretenuring) {
int tenure_decisions = 0;
int dont_tenure_decisions = 0;
@@ -522,7 +554,6 @@ void Heap::ProcessPretenuringFeedback() {
int i = 0;
Object* list_element = allocation_sites_list();
- bool trigger_deoptimization = false;
bool maximum_size_scavenge = MaximumSizeScavenge();
while (use_scratchpad ? i < allocation_sites_scratchpad_length_
: list_element->IsAllocationSite()) {
@@ -574,6 +605,7 @@ void Heap::ProcessPretenuringFeedback() {
dont_tenure_decisions);
}
}
+ return trigger_deoptimization;
}
@@ -603,9 +635,6 @@ void Heap::GarbageCollectionEpilogue() {
ZapFromSpace();
}
- // Process pretenuring feedback and update allocation sites.
- ProcessPretenuringFeedback();
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -619,8 +648,8 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
if (FLAG_code_stats) ReportCodeStatistics("After GC");
-#endif
if (FLAG_check_handle_count) CheckHandleCount();
+#endif
if (FLAG_deopt_every_n_garbage_collections > 0) {
// TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
// the topmost optimized frame can be deoptimized safely, because it
@@ -711,6 +740,9 @@ void Heap::GarbageCollectionEpilogue() {
// Remember the last top pointer so that we can later find out
// whether we allocated in new space since the last GC.
new_space_top_after_last_gc_ = new_space()->top();
+ last_gc_time_ = MonotonicallyIncreasingTimeInMs();
+
+ ReduceNewSpaceSize();
}
@@ -743,7 +775,8 @@ void Heap::PreprocessStackTraces() {
void Heap::HandleGCRequest() {
if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
- CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
+ CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt",
+ incremental_marking()->CallbackFlags());
return;
}
DCHECK(FLAG_overapproximate_weak_closure);
@@ -896,6 +929,11 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
}
bool next_gc_likely_to_collect_more = false;
+ intptr_t committed_memory_before = 0;
+
+ if (collector == MARK_COMPACTOR) {
+ committed_memory_before = CommittedOldGenerationMemory();
+ }
{
tracer()->Start(collector, gc_reason, collector_reason);
@@ -917,20 +955,38 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
}
if (collector == MARK_COMPACTOR) {
- gc_idle_time_handler_.NotifyMarkCompact();
- } else {
- gc_idle_time_handler_.NotifyScavenge();
+ intptr_t committed_memory_after = CommittedOldGenerationMemory();
+ intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kMarkCompact;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ // Trigger one more GC if
+ // - this GC decreased committed memory,
+ // - there is high fragmentation,
+ // - there are live detached contexts.
+ event.next_gc_likely_to_collect_more =
+ (committed_memory_before - committed_memory_after) > MB ||
+ HasHighFragmentation(used_memory_after, committed_memory_after) ||
+ (detached_contexts()->length() > 0);
+ if (deserialization_complete_) {
+ memory_reducer_.NotifyMarkCompact(event);
+ }
}
tracer()->Stop(collector);
}
+ if (collector == MARK_COMPACTOR &&
+ (gc_callback_flags & kGCCallbackFlagForced) != 0) {
+ isolate()->CountUsage(v8::Isolate::kForcedGC);
+ }
+
// Start incremental marking for the next cycle. The heap snapshot
// generator needs incremental marking to stay off after it aborted.
if (!mark_compact_collector()->abort_incremental_marking() &&
incremental_marking()->IsStopped() &&
incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
- incremental_marking()->Start();
+ incremental_marking()->Start(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
}
return next_gc_likely_to_collect_more;
@@ -949,10 +1005,29 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
AgeInlineCaches();
set_retained_maps(ArrayList::cast(empty_fixed_array()));
tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kContextDisposed;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ memory_reducer_.NotifyContextDisposed(event);
return ++contexts_disposed_;
}
+void Heap::StartIncrementalMarking(int gc_flags,
+ const GCCallbackFlags gc_callback_flags,
+ const char* reason) {
+ DCHECK(incremental_marking()->IsStopped());
+ incremental_marking()->Start(gc_flags, gc_callback_flags, reason);
+}
+
+
+void Heap::StartIdleIncrementalMarking() {
+ gc_idle_time_handler_.ResetNoProgressCounter();
+ StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags,
+ "idle");
+}
+
+
void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
int len) {
if (len == 0) return;
@@ -1017,9 +1092,9 @@ bool Heap::ReserveSpace(Reservation* reservations) {
DCHECK_LE(size, MemoryAllocator::PageAreaSize(
static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
- allocation = new_space()->AllocateRaw(size);
+ allocation = new_space()->AllocateRawUnaligned(size);
} else {
- allocation = paged_space(space)->AllocateRaw(size);
+ allocation = paged_space(space)->AllocateRawUnaligned(size);
}
HeapObject* free_space;
if (allocation.To(&free_space)) {
@@ -1138,6 +1213,24 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
} else {
high_survival_rate_period_length_ = 0;
}
+
+ if (survival_rate < kYoungSurvivalRateLowThreshold) {
+ low_survival_rate_period_length_++;
+ } else {
+ low_survival_rate_period_length_ = 0;
+ }
+
+ double survival_rate_diff = survival_rate_ - survival_rate;
+
+ if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
+ set_survival_rate_trend(DECREASING);
+ } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
+ set_survival_rate_trend(INCREASING);
+ } else {
+ set_survival_rate_trend(STABLE);
+ }
+
+ survival_rate_ = survival_rate;
}
bool Heap::PerformGarbageCollection(
@@ -1180,27 +1273,46 @@ bool Heap::PerformGarbageCollection(
}
if (collector == MARK_COMPACTOR) {
+ UpdateOldGenerationAllocationCounter();
// Perform mark-sweep with optional compaction.
MarkCompact();
sweep_generation_++;
- // Temporarily set the limit for case when PostGarbageCollectionProcessing
- // allocates and triggers GC. The real limit is set at after
- // PostGarbageCollectionProcessing.
- SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which can
+ // cause another GC. Take into account the objects promoted during GC.
+ old_generation_allocation_counter_ +=
+ static_cast<size_t>(promoted_objects_size_);
+ old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
} else {
Scavenge();
}
+ bool deopted = ProcessPretenuringFeedback();
UpdateSurvivalStatistics(start_new_space_size);
+
+ // When pretenuring is collecting new feedback, we do not shrink the new space
+ // right away.
+ if (deopted) {
+ RecordDeoptForPretenuring();
+ } else {
+ ConfigureNewGenerationSize();
+ }
ConfigureInitialOldGenerationSize();
isolate_->counters()->objs_since_last_young()->Set(0);
- // Callbacks that fire after this point might trigger nested GCs and
- // restart incremental marking, the assertion can't be moved down.
- DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
+ if (collector != SCAVENGER) {
+ // Callbacks that fire after this point might trigger nested GCs and
+ // restart incremental marking, the assertion can't be moved down.
+ DCHECK(incremental_marking()->IsStopped());
+
+ // We finished a marking cycle. We can uncommit the marking deque until
+ // we start marking again.
+ mark_compact_collector_.marking_deque()->Uninitialize();
+ mark_compact_collector_.EnsureMarkingDequeIsCommitted(
+ MarkCompactCollector::kMinMarkingDequeSize);
+ }
gc_post_processing_depth_++;
{
@@ -1216,15 +1328,19 @@ bool Heap::PerformGarbageCollection(
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing(isolate_);
+ double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
+ double mutator_speed = static_cast<double>(
+ tracer()
+ ->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond());
+ intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
- SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
- freed_global_handles);
- // We finished a marking cycle. We can uncommit the marking deque until
- // we start marking again.
- mark_compact_collector_.UncommitMarkingDeque();
+ SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
+ } else if (HasLowYoungGenerationAllocationRate() &&
+ old_generation_size_configured_) {
+ DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
}
{
@@ -1405,7 +1521,8 @@ void Heap::CheckNewSpaceExpansionCriteria() {
survived_since_last_expansion_ = 0;
}
} else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_.TotalCapacity()) {
+ survived_since_last_expansion_ > new_space_.TotalCapacity() &&
+ !new_space_high_promotion_mode_active_) {
// Grow the size of new space if there is room to grow, and enough data
// has survived scavenge since the last expansion.
new_space_.Grow();
@@ -1559,8 +1676,6 @@ void Heap::Scavenge() {
SelectScavengingVisitorsTable();
- incremental_marking()->PrepareForScavenge();
-
PrepareArrayBufferDiscoveryInNewSpace();
// Flip the semispaces. After flipping, to space is empty, from space has
@@ -2038,27 +2153,68 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
0); // NOLINT
-STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
- 0); // NOLINT
-STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
- kDoubleAlignmentMask) == 0); // NOLINT
STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
0); // NOLINT
+#ifdef V8_HOST_ARCH_32_BIT
+STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
+ 0); // NOLINT
+#endif
-HeapObject* Heap::EnsureDoubleAligned(HeapObject* object, int size) {
- if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
- CreateFillerObjectAt(object->address(), kPointerSize);
- return HeapObject::FromAddress(object->address() + kPointerSize);
- } else {
- CreateFillerObjectAt(object->address() + size - kPointerSize, kPointerSize);
- return object;
+int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
+ switch (alignment) {
+ case kWordAligned:
+ return 0;
+ case kDoubleAligned:
+ case kDoubleUnaligned:
+ return kDoubleSize - kPointerSize;
+ case kSimd128Unaligned:
+ return kSimd128Size - kPointerSize;
+ default:
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+
+int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
+ intptr_t offset = OffsetFrom(address);
+ if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
+ return kPointerSize;
+ if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
+ return kDoubleSize - kPointerSize; // No fill if double is always aligned.
+ if (alignment == kSimd128Unaligned) {
+ return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
+ kSimd128AlignmentMask;
+ }
+ return 0;
+}
+
+
+HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
+ CreateFillerObjectAt(object->address(), filler_size);
+ return HeapObject::FromAddress(object->address() + filler_size);
+}
+
+
+HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
+ int allocation_size,
+ AllocationAlignment alignment) {
+ int filler_size = allocation_size - object_size;
+ DCHECK(filler_size > 0);
+ int pre_filler = GetFillToAlign(object->address(), alignment);
+ if (pre_filler) {
+ object = PrecedeWithFiller(object, pre_filler);
+ filler_size -= pre_filler;
}
+ if (filler_size)
+ CreateFillerObjectAt(object->address() + object_size, filler_size);
+ return object;
}
HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
- return EnsureDoubleAligned(object, size);
+ return AlignWithFiller(object, size - kPointerSize, size, kDoubleAligned);
}
@@ -2201,22 +2357,14 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
- template <int alignment>
+ template <AllocationAlignment alignment>
static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
- if (alignment == kDoubleAlignment) {
- allocation = heap->new_space()->AllocateRawDoubleAligned(object_size);
- } else {
- allocation = heap->new_space()->AllocateRaw(object_size);
- }
-#else
- allocation = heap->new_space()->AllocateRaw(object_size);
-#endif
+ AllocationResult allocation =
+ heap->new_space()->AllocateRaw(object_size, alignment);
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
@@ -2238,21 +2386,13 @@ class ScavengingVisitor : public StaticVisitorBase {
}
- template <ObjectContents object_contents, int alignment>
+ template <ObjectContents object_contents, AllocationAlignment alignment>
static inline bool PromoteObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
- AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
- if (alignment == kDoubleAlignment) {
- allocation = heap->old_space()->AllocateRawDoubleAligned(object_size);
- } else {
- allocation = heap->old_space()->AllocateRaw(object_size);
- }
-#else
- allocation = heap->old_space()->AllocateRaw(object_size);
-#endif
+ AllocationResult allocation =
+ heap->old_space()->AllocateRaw(object_size, alignment);
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
@@ -2276,7 +2416,7 @@ class ScavengingVisitor : public StaticVisitorBase {
}
- template <ObjectContents object_contents, int alignment>
+ template <ObjectContents object_contents, AllocationAlignment alignment>
static inline void EvacuateObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
@@ -2330,8 +2470,8 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+ object_size);
}
@@ -2339,24 +2479,33 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
int object_size = FixedDoubleArray::SizeFor(length);
- EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
}
static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
- EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+
+ MapWord map_word = object->map_word();
+ DCHECK(map_word.IsForwardingAddress());
+ FixedTypedArrayBase* target =
+ reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
+ target->set_base_pointer(target, SKIP_WRITE_BARRIER);
}
static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
- EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
+
+ MapWord map_word = object->map_word();
+ DCHECK(map_word.IsForwardingAddress());
+ FixedTypedArrayBase* target =
+ reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
+ target->set_base_pointer(target, SKIP_WRITE_BARRIER);
}
@@ -2375,8 +2524,7 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void EvacuateByteArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
}
@@ -2384,8 +2532,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int object_size = SeqOneByteString::cast(object)
->SeqOneByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
}
@@ -2393,8 +2540,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)
->SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
}
@@ -2431,8 +2577,8 @@ class ScavengingVisitor : public StaticVisitorBase {
}
int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+ object_size);
}
template <ObjectContents object_contents>
@@ -2441,14 +2587,14 @@ class ScavengingVisitor : public StaticVisitorBase {
template <int object_size>
static inline void VisitSpecialized(Map* map, HeapObject** slot,
HeapObject* object) {
- EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<object_contents, kWordAligned>(map, slot, object,
+ object_size);
}
static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
int object_size = map->instance_size();
- EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
- object_size);
+ EvacuateObject<object_contents, kWordAligned>(map, slot, object,
+ object_size);
}
};
@@ -2530,6 +2676,48 @@ void Heap::ConfigureInitialOldGenerationSize() {
}
+void Heap::ConfigureNewGenerationSize() {
+ bool still_gathering_lifetime_data = gathering_lifetime_feedback_ != 0;
+ if (gathering_lifetime_feedback_ != 0) gathering_lifetime_feedback_--;
+ if (!new_space_high_promotion_mode_active_ &&
+ new_space_.TotalCapacity() == new_space_.MaximumCapacity() &&
+ IsStableOrIncreasingSurvivalTrend() && IsHighSurvivalRate()) {
+ // Stable high survival rates even though young generation is at
+ // maximum capacity indicates that most objects will be promoted.
+ // To decrease scavenger pauses and final mark-sweep pauses, we
+ // have to limit maximal capacity of the young generation.
+ if (still_gathering_lifetime_data) {
+ if (FLAG_trace_gc) {
+ PrintPID(
+ "Postpone entering high promotion mode as optimized pretenuring "
+ "code is still being generated\n");
+ }
+ } else {
+ new_space_high_promotion_mode_active_ = true;
+ if (FLAG_trace_gc) {
+ PrintPID("Limited new space size due to high promotion rate: %d MB\n",
+ new_space_.InitialTotalCapacity() / MB);
+ }
+ }
+ } else if (new_space_high_promotion_mode_active_ &&
+ IsStableOrDecreasingSurvivalTrend() && IsLowSurvivalRate()) {
+ // Decreasing low survival rates might indicate that the above high
+ // promotion mode is over and we should allow the young generation
+ // to grow again.
+ new_space_high_promotion_mode_active_ = false;
+ if (FLAG_trace_gc) {
+ PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
+ new_space_.MaximumCapacity() / MB);
+ }
+ }
+
+ if (new_space_high_promotion_mode_active_ &&
+ new_space_.TotalCapacity() > new_space_.InitialTotalCapacity()) {
+ new_space_.Shrink();
+ }
+}
+
+
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
@@ -2605,7 +2793,8 @@ AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
AllocationSpace space) {
HeapObject* obj;
{
- AllocationResult allocation = AllocateRaw(size, space, space);
+ AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
+ AllocationResult allocation = AllocateRaw(size, space, space, align);
if (!allocation.To(&obj)) return allocation;
}
#ifdef DEBUG
@@ -2667,8 +2856,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
- ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
- constant_pool_array);
#undef ALLOCATE_PARTIAL_MAP
}
@@ -2705,13 +2892,6 @@ bool Heap::CreateInitialMaps() {
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
- // Allocate the constant pool array.
- {
- AllocationResult allocation = AllocateEmptyConstantPoolArray();
- if (!allocation.To(&obj)) return false;
- }
- set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
-
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
@@ -2748,16 +2928,6 @@ bool Heap::CreateInitialMaps() {
null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
- constant_pool_array_map()->set_code_cache(empty_fixed_array());
- constant_pool_array_map()->set_dependent_code(
- DependentCode::cast(empty_fixed_array()));
- constant_pool_array_map()->set_raw_transitions(Smi::FromInt(0));
- constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
- if (FLAG_unbox_double_fields) {
- constant_pool_array_map()->set_layout_descriptor(
- LayoutDescriptor::FastPointerLayout());
- }
-
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
meta_map()->set_constructor_or_backpointer(null_value());
@@ -2771,9 +2941,6 @@ bool Heap::CreateInitialMaps() {
null_map()->set_prototype(null_value());
null_map()->set_constructor_or_backpointer(null_value());
- constant_pool_array_map()->set_prototype(null_value());
- constant_pool_array_map()->set_constructor_or_backpointer(null_value());
-
{ // Map allocation
#define ALLOCATE_MAP(instance_type, size, field_name) \
{ \
@@ -2792,6 +2959,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
mutable_heap_number)
+ ALLOCATE_MAP(FLOAT32X4_TYPE, Float32x4::kSize, float32x4)
ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
@@ -2929,7 +3097,8 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
HeapObject* result;
{
- AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kDoubleUnaligned);
if (!allocation.To(&result)) return allocation;
}
@@ -2940,6 +3109,32 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
}
+AllocationResult Heap::AllocateFloat32x4(float w, float x, float y, float z,
+ PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate SIMD values in paged
+ // spaces.
+ int size = Float32x4::kSize;
+ STATIC_ASSERT(Float32x4::kSize <= Page::kMaxRegularHeapObjectSize);
+
+ AllocationSpace space = SelectSpace(size, pretenure);
+
+ HeapObject* result;
+ {
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_no_write_barrier(float32x4_map());
+ Float32x4* float32x4 = Float32x4::cast(result);
+ float32x4->set_lane(0, w);
+ float32x4->set_lane(1, x);
+ float32x4->set_lane(2, y);
+ float32x4->set_lane(3, z);
+ return result;
+}
+
+
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
@@ -2967,6 +3162,7 @@ AllocationResult Heap::AllocatePropertyCell() {
PropertyCell* cell = PropertyCell::cast(result);
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
+ cell->set_property_details(PropertyDetails(Smi::FromInt(0)));
cell->set_value(the_hole_value());
return result;
}
@@ -2982,7 +3178,7 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
}
result->set_map_no_write_barrier(weak_cell_map());
WeakCell::cast(result)->initialize(value);
- WeakCell::cast(result)->set_next(undefined_value(), SKIP_WRITE_BARRIER);
+ WeakCell::cast(result)->clear_next(this);
return result;
}
@@ -3061,6 +3257,8 @@ void Heap::CreateInitialObjects() {
set_nan_value(*factory->NewHeapNumber(
std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
+ set_minus_infinity_value(
+ *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
// The hole has not been created yet, but we want to put something
// predictable in the gaps in the string table, so lets make that Smi zero.
@@ -3142,9 +3340,12 @@ void Heap::CreateInitialObjects() {
{
HandleScope scope(isolate());
-#define SYMBOL_INIT(name) \
- Handle<Symbol> name = factory->NewPrivateOwnSymbol(); \
- roots_[k##name##RootIndex] = *name;
+#define SYMBOL_INIT(name) \
+ { \
+ Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
+ Handle<Object> symbol(isolate()->factory()->NewPrivateSymbol(name##d)); \
+ roots_[k##name##RootIndex] = *symbol; \
+ }
PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
#undef SYMBOL_INIT
}
@@ -3204,7 +3405,7 @@ void Heap::CreateInitialObjects() {
// Number of queued microtasks stored in Isolate::pending_microtask_count().
set_microtask_queue(empty_fixed_array());
- if (FLAG_vector_ics) {
+ {
FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> dummy_vector =
factory->NewTypeFeedbackVector(&spec);
@@ -3212,8 +3413,18 @@ void Heap::CreateInitialObjects() {
*TypeFeedbackVector::MegamorphicSentinel(isolate()),
SKIP_WRITE_BARRIER);
set_keyed_load_dummy_vector(*dummy_vector);
+ }
+
+ if (FLAG_vector_stores) {
+ FeedbackVectorSpec spec(0, Code::KEYED_STORE_IC);
+ Handle<TypeFeedbackVector> dummy_vector =
+ factory->NewTypeFeedbackVector(&spec);
+ dummy_vector->Set(FeedbackVectorICSlot(0),
+ *TypeFeedbackVector::MegamorphicSentinel(isolate()),
+ SKIP_WRITE_BARRIER);
+ set_keyed_store_dummy_vector(*dummy_vector);
} else {
- set_keyed_load_dummy_vector(empty_fixed_array());
+ set_keyed_store_dummy_vector(empty_fixed_array());
}
set_detached_contexts(empty_fixed_array());
@@ -3237,6 +3448,10 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
set_array_protector(*cell);
+ cell = factory->NewPropertyCell();
+ cell->set_value(the_hole_value());
+ set_empty_property_cell(*cell);
+
set_weak_stack_trace_list(Smi::FromInt(0));
set_allocation_sites_scratchpad(
@@ -3257,6 +3472,19 @@ void Heap::CreateInitialObjects() {
}
+void Heap::AddPrivateGlobalSymbols(Handle<Object> private_intern_table) {
+#define ADD_SYMBOL_TO_PRIVATE_INTERN_TABLE(name_arg) \
+ { \
+ Handle<Symbol> symbol(Symbol::cast(roots_[k##name_arg##RootIndex])); \
+ Handle<String> name_arg##d(String::cast(symbol->name())); \
+ JSObject::AddProperty(Handle<JSObject>::cast(private_intern_table), \
+ name_arg##d, symbol, NONE); \
+ }
+ PRIVATE_SYMBOL_LIST(ADD_SYMBOL_TO_PRIVATE_INTERN_TABLE)
+#undef ADD_SYMBOL_TO_PRIVATE_INTERN_TABLE
+}
+
+
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
switch (root_index) {
case kStoreBufferTopRootIndex:
@@ -3574,19 +3802,18 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
- // At this point, we may be deserializing the heap from a snapshot, and
- // none of the maps have been created yet and are NULL.
if (size == kPointerSize) {
filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
- DCHECK(filler->map() == NULL || filler->map() == one_pointer_filler_map());
} else if (size == 2 * kPointerSize) {
filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
- DCHECK(filler->map() == NULL || filler->map() == two_pointer_filler_map());
} else {
filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
- DCHECK(filler->map() == NULL || filler->map() == free_space_map());
FreeSpace::cast(filler)->nobarrier_set_size(size);
}
+ // At this point, we may be deserializing the heap from a snapshot, and
+ // none of the maps have been created yet and are NULL.
+ DCHECK((filler->map() == NULL && !deserialization_complete_) ||
+ filler->map()->IsMap());
}
@@ -3771,6 +3998,7 @@ static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
AllocationResult Heap::AllocateFixedTypedArray(int length,
ExternalArrayType array_type,
+ bool initialize,
PretenureFlag pretenure) {
int element_size;
ElementsKind elements_kind;
@@ -3787,8 +4015,9 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
object->set_map(MapForFixedTypedArray(array_type));
FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
+ elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
elements->set_length(length);
- memset(elements->DataPtr(), 0, elements->DataSize());
+ if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
return elements;
}
@@ -3821,7 +4050,8 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
Code* code = Code::cast(result);
DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
- isolate_->code_range()->contains(code->address()));
+ isolate_->code_range()->contains(code->address()) ||
+ object_size <= code_space()->AreaSize());
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
return code;
@@ -3830,16 +4060,6 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
AllocationResult Heap::CopyCode(Code* code) {
AllocationResult allocation;
- HeapObject* new_constant_pool;
- if (FLAG_enable_ool_constant_pool &&
- code->constant_pool() != empty_constant_pool_array()) {
- // Copy the constant pool, since edits to the copied code may modify
- // the constant pool.
- allocation = CopyConstantPoolArray(code->constant_pool());
- if (!allocation.To(&new_constant_pool)) return allocation;
- } else {
- new_constant_pool = empty_constant_pool_array();
- }
HeapObject* result = NULL;
// Allocate an object the same size as the code object.
@@ -3853,37 +4073,25 @@ AllocationResult Heap::CopyCode(Code* code) {
CopyBlock(new_addr, old_addr, obj_size);
Code* new_code = Code::cast(result);
- // Update the constant pool.
- new_code->set_constant_pool(new_constant_pool);
-
// Relocate the copy.
DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
- isolate_->code_range()->contains(code->address()));
+ isolate_->code_range()->contains(code->address()) ||
+ obj_size <= code_space()->AreaSize());
new_code->Relocate(new_addr - old_addr);
return new_code;
}
AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
- // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
- // do not risk leaving uninitialized Code object (and breaking the heap).
+ // Allocate ByteArray before the Code object, so that we do not risk
+ // leaving uninitialized Code object (and breaking the heap).
ByteArray* reloc_info_array;
{
AllocationResult allocation =
AllocateByteArray(reloc_info.length(), TENURED);
if (!allocation.To(&reloc_info_array)) return allocation;
}
- HeapObject* new_constant_pool;
- if (FLAG_enable_ool_constant_pool &&
- code->constant_pool() != empty_constant_pool_array()) {
- // Copy the constant pool, since edits to the copied code may modify
- // the constant pool.
- AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
- if (!allocation.To(&new_constant_pool)) return allocation;
- } else {
- new_constant_pool = empty_constant_pool_array();
- }
int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
@@ -3908,9 +4116,6 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Code* new_code = Code::cast(result);
new_code->set_relocation_info(reloc_info_array);
- // Update constant pool.
- new_code->set_constant_pool(new_constant_pool);
-
// Copy patched rinfo.
CopyBytes(new_code->relocation_start(), reloc_info.start(),
static_cast<size_t>(reloc_info.length()));
@@ -3918,7 +4123,9 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
// Relocate the copy.
DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
- isolate_->code_range()->contains(code->address()));
+ isolate_->code_range()->contains(code->address()) ||
+ new_obj_size <= code_space()->AreaSize());
+
new_code->Relocate(new_addr - old_addr);
#ifdef VERIFY_HEAP
@@ -4358,7 +4565,7 @@ AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
AllocationResult Heap::AllocateEmptyFixedTypedArray(
ExternalArrayType array_type) {
- return AllocateFixedTypedArray(0, array_type, TENURED);
+ return AllocateFixedTypedArray(0, array_type, false, TENURED);
}
@@ -4403,31 +4610,6 @@ AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
}
-AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
- Map* map) {
- HeapObject* obj;
- if (src->is_extended_layout()) {
- ConstantPoolArray::NumberOfEntries small(src,
- ConstantPoolArray::SMALL_SECTION);
- ConstantPoolArray::NumberOfEntries extended(
- src, ConstantPoolArray::EXTENDED_SECTION);
- AllocationResult allocation =
- AllocateExtendedConstantPoolArray(small, extended);
- if (!allocation.To(&obj)) return allocation;
- } else {
- ConstantPoolArray::NumberOfEntries small(src,
- ConstantPoolArray::SMALL_SECTION);
- AllocationResult allocation = AllocateConstantPoolArray(small);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_no_write_barrier(map);
- CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
- src->address() + ConstantPoolArray::kFirstEntryOffset,
- src->size() - ConstantPoolArray::kFirstEntryOffset);
- return obj;
-}
-
-
AllocationResult Heap::AllocateRawFixedArray(int length,
PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
@@ -4448,7 +4630,7 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
if (length == 0) return empty_fixed_array();
DCHECK(!InNewSpace(filler));
- HeapObject* result;
+ HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
if (!allocation.To(&result)) return allocation;
@@ -4516,64 +4698,6 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
}
-AllocationResult Heap::AllocateConstantPoolArray(
- const ConstantPoolArray::NumberOfEntries& small) {
- CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
- int size = ConstantPoolArray::SizeFor(small);
- AllocationSpace space = SelectSpace(size, TENURED);
-
- HeapObject* object = nullptr;
- {
- AllocationResult allocation =
- AllocateRaw(size, space, OLD_SPACE, kDoubleAligned);
- if (!allocation.To(&object)) return allocation;
- }
- object->set_map_no_write_barrier(constant_pool_array_map());
-
- ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
- constant_pool->Init(small);
- constant_pool->ClearPtrEntries(isolate());
- return constant_pool;
-}
-
-
-AllocationResult Heap::AllocateExtendedConstantPoolArray(
- const ConstantPoolArray::NumberOfEntries& small,
- const ConstantPoolArray::NumberOfEntries& extended) {
- CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
- CHECK(extended.are_in_range(0, kMaxInt));
- int size = ConstantPoolArray::SizeForExtended(small, extended);
- AllocationSpace space = SelectSpace(size, TENURED);
-
- HeapObject* object;
- {
- AllocationResult allocation =
- AllocateRaw(size, space, OLD_SPACE, kDoubleAligned);
- if (!allocation.To(&object)) return allocation;
- }
- object->set_map_no_write_barrier(constant_pool_array_map());
-
- ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
- constant_pool->InitExtended(small, extended);
- constant_pool->ClearPtrEntries(isolate());
- return constant_pool;
-}
-
-
-AllocationResult Heap::AllocateEmptyConstantPoolArray() {
- ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
- int size = ConstantPoolArray::SizeFor(small);
- HeapObject* result = NULL;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- result->set_map_no_write_barrier(constant_pool_array_map());
- ConstantPoolArray::cast(result)->Init(small);
- return result;
-}
-
-
AllocationResult Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces.
STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
@@ -4648,17 +4772,116 @@ void Heap::MakeHeapIterable() {
}
-void Heap::ReduceNewSpaceSize(bool is_long_idle_notification) {
- if (is_long_idle_notification) {
+static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
+ const double kMinMutatorUtilization = 0.0;
+ const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
+ if (mutator_speed == 0) return kMinMutatorUtilization;
+ if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
+ // Derivation:
+ // mutator_utilization = mutator_time / (mutator_time + gc_time)
+ // mutator_time = 1 / mutator_speed
+ // gc_time = 1 / gc_speed
+ // mutator_utilization = (1 / mutator_speed) /
+ // (1 / mutator_speed + 1 / gc_speed)
+ // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
+ return gc_speed / (mutator_speed + gc_speed);
+}
+
+
+double Heap::YoungGenerationMutatorUtilization() {
+ double mutator_speed = static_cast<double>(
+ tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
+ double gc_speed = static_cast<double>(
+ tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
+ double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
+ if (FLAG_trace_mutator_utilization) {
+ PrintIsolate(isolate(),
+ "Young generation mutator utilization = %.3f ("
+ "mutator_speed=%.f, gc_speed=%.f)\n",
+ result, mutator_speed, gc_speed);
+ }
+ return result;
+}
+
+
+double Heap::OldGenerationMutatorUtilization() {
+ double mutator_speed = static_cast<double>(
+ tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
+ double gc_speed = static_cast<double>(
+ tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
+ double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
+ if (FLAG_trace_mutator_utilization) {
+ PrintIsolate(isolate(),
+ "Old generation mutator utilization = %.3f ("
+ "mutator_speed=%.f, gc_speed=%.f)\n",
+ result, mutator_speed, gc_speed);
+ }
+ return result;
+}
+
+
+bool Heap::HasLowYoungGenerationAllocationRate() {
+ const double high_mutator_utilization = 0.993;
+ return YoungGenerationMutatorUtilization() > high_mutator_utilization;
+}
+
+
+bool Heap::HasLowOldGenerationAllocationRate() {
+ const double high_mutator_utilization = 0.993;
+ return OldGenerationMutatorUtilization() > high_mutator_utilization;
+}
+
+
+bool Heap::HasLowAllocationRate() {
+ return HasLowYoungGenerationAllocationRate() &&
+ HasLowOldGenerationAllocationRate();
+}
+
+
+bool Heap::HasHighFragmentation() {
+ intptr_t used = PromotedSpaceSizeOfObjects();
+ intptr_t committed = CommittedOldGenerationMemory();
+ return HasHighFragmentation(used, committed);
+}
+
+
+bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
+ const intptr_t kSlack = 16 * MB;
+ // Fragmentation is high if committed > 2 * used + kSlack.
+ // Rewrite the exression to avoid overflow.
+ return committed - used > used + kSlack;
+}
+
+
+void Heap::ReduceNewSpaceSize() {
+ // TODO(ulan): Unify this constant with the similar constant in
+ // GCIdleTimeHandler once the change is merged to 4.5.
+ static const size_t kLowAllocationThroughput = 1000;
+ size_t allocation_throughput =
+ tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
+ if (FLAG_predictable || allocation_throughput == 0) return;
+ if (allocation_throughput < kLowAllocationThroughput) {
new_space_.Shrink();
UncommitFromSpace();
}
}
+void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
+ if (FLAG_overapproximate_weak_closure &&
+ (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
+ (!incremental_marking()->weak_closure_was_overapproximated() &&
+ mark_compact_collector_.marking_deque()->IsEmpty()))) {
+ OverApproximateWeakClosure(comment);
+ } else if (incremental_marking()->IsComplete() ||
+ (mark_compact_collector_.marking_deque()->IsEmpty())) {
+ CollectAllGarbage(kNoGCFlags, comment);
+ }
+}
+
+
bool Heap::TryFinalizeIdleIncrementalMarking(
- bool is_long_idle_notification, double idle_time_in_ms,
- size_t size_of_objects,
+ double idle_time_in_ms, size_t size_of_objects,
size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
if (FLAG_overapproximate_weak_closure &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
@@ -4675,57 +4898,19 @@ bool Heap::TryFinalizeIdleIncrementalMarking(
static_cast<size_t>(idle_time_in_ms), size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
- gc_idle_time_handler_.NotifyIdleMarkCompact();
- ReduceNewSpaceSize(is_long_idle_notification);
return true;
}
return false;
}
-double Heap::MonotonicallyIncreasingTimeInMs() {
- return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
- static_cast<double>(base::Time::kMillisecondsPerSecond);
-}
-
-
-bool Heap::IdleNotification(int idle_time_in_ms) {
- return IdleNotification(
- V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
- (static_cast<double>(idle_time_in_ms) /
- static_cast<double>(base::Time::kMillisecondsPerSecond)));
-}
-
-
-bool Heap::IdleNotification(double deadline_in_seconds) {
- CHECK(HasBeenSetUp()); // http://crbug.com/425035
- double deadline_in_ms =
- deadline_in_seconds *
- static_cast<double>(base::Time::kMillisecondsPerSecond);
- HistogramTimerScope idle_notification_scope(
- isolate_->counters()->gc_idle_notification());
- double start_ms = MonotonicallyIncreasingTimeInMs();
- double idle_time_in_ms = deadline_in_ms - start_ms;
- bool is_long_idle_notification =
- static_cast<size_t>(idle_time_in_ms) >
- GCIdleTimeHandler::kMaxFrameRenderingIdleTime;
-
+GCIdleTimeHandler::HeapState Heap::ComputeHeapState() {
GCIdleTimeHandler::HeapState heap_state;
heap_state.contexts_disposed = contexts_disposed_;
heap_state.contexts_disposal_rate =
tracer()->ContextDisposalRateInMilliseconds();
heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
- // TODO(ulan): Start incremental marking only for large heaps.
- intptr_t limit = old_generation_allocation_limit_;
- if (is_long_idle_notification) {
- limit = idle_old_generation_allocation_limit_;
- }
-
- heap_state.can_start_incremental_marking =
- incremental_marking()->CanBeActivated() &&
- HeapIsFullEnoughToStartIncrementalMarking(limit) &&
- !mark_compact_collector()->sweeping_in_progress();
heap_state.sweeping_in_progress =
mark_compact_collector()->sweeping_in_progress();
heap_state.sweeping_completed =
@@ -4742,72 +4927,64 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
heap_state.used_new_space_size = new_space_.Size();
heap_state.new_space_capacity = new_space_.Capacity();
heap_state.new_space_allocation_throughput_in_bytes_per_ms =
- static_cast<size_t>(
- tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
+ tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond();
+ return heap_state;
+}
- GCIdleTimeAction action =
- gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
- isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
- static_cast<int>(idle_time_in_ms));
- if (is_long_idle_notification) {
- int committed_memory = static_cast<int>(CommittedMemory() / KB);
- int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
- isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
- start_ms, committed_memory);
- isolate()->counters()->aggregated_memory_heap_used()->AddSample(
- start_ms, used_memory);
+double Heap::AdvanceIncrementalMarking(
+ intptr_t step_size_in_bytes, double deadline_in_ms,
+ IncrementalMarking::StepActions step_actions) {
+ DCHECK(!incremental_marking()->IsStopped());
+
+ if (step_size_in_bytes == 0) {
+ step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
+ static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
+ static_cast<size_t>(
+ tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
}
+ double remaining_time_in_ms = 0.0;
+ do {
+ incremental_marking()->Step(
+ step_size_in_bytes, step_actions.completion_action,
+ step_actions.force_marking, step_actions.force_completion);
+ remaining_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
+ } while (remaining_time_in_ms >=
+ 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
+ !incremental_marking()->IsComplete() &&
+ !mark_compact_collector_.marking_deque()->IsEmpty());
+ return remaining_time_in_ms;
+}
+
+
+bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
+ GCIdleTimeHandler::HeapState heap_state,
+ double deadline_in_ms) {
bool result = false;
switch (action.type) {
case DONE:
result = true;
break;
case DO_INCREMENTAL_MARKING: {
- if (incremental_marking()->IsStopped()) {
- // TODO(ulan): take reduce_memory into account.
- incremental_marking()->Start();
- }
- double remaining_idle_time_in_ms = 0.0;
- do {
- incremental_marking()->Step(
- action.parameter, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_MARKING,
- IncrementalMarking::DO_NOT_FORCE_COMPLETION);
- remaining_idle_time_in_ms =
- deadline_in_ms - MonotonicallyIncreasingTimeInMs();
- } while (remaining_idle_time_in_ms >=
- 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
- !incremental_marking()->IsComplete() &&
- !mark_compact_collector_.marking_deque()->IsEmpty());
+ const double remaining_idle_time_in_ms =
+ AdvanceIncrementalMarking(action.parameter, deadline_in_ms,
+ IncrementalMarking::IdleStepActions());
if (remaining_idle_time_in_ms > 0.0) {
action.additional_work = TryFinalizeIdleIncrementalMarking(
- is_long_idle_notification, remaining_idle_time_in_ms,
- heap_state.size_of_objects,
+ remaining_idle_time_in_ms, heap_state.size_of_objects,
heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
}
break;
}
case DO_FULL_GC: {
- if (is_long_idle_notification && gc_count_at_last_idle_gc_ == gc_count_) {
- isolate_->compilation_cache()->Clear();
- }
- if (contexts_disposed_) {
- HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
- } else {
- CollectAllGarbage(kReduceMemoryFootprintMask,
- "idle notification: finalize idle round");
- }
- gc_count_at_last_idle_gc_ = gc_count_;
- ReduceNewSpaceSize(is_long_idle_notification);
- gc_idle_time_handler_.NotifyIdleMarkCompact();
+ DCHECK(contexts_disposed_ > 0);
+ HistogramTimerScope scope(isolate_->counters()->gc_context());
+ CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
break;
}
case DO_SCAVENGE:
CollectGarbage(NEW_SPACE, "idle notification: scavenge");
- ReduceNewSpaceSize(is_long_idle_notification);
break;
case DO_FINALIZE_SWEEPING:
mark_compact_collector()->EnsureSweepingCompleted();
@@ -4816,10 +4993,23 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
break;
}
+ return result;
+}
+
+
+void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
+ GCIdleTimeHandler::HeapState heap_state,
+ double start_ms, double deadline_in_ms) {
+ double idle_time_in_ms = deadline_in_ms - start_ms;
double current_time = MonotonicallyIncreasingTimeInMs();
last_idle_notification_time_ = current_time;
double deadline_difference = deadline_in_ms - current_time;
+ contexts_disposed_ = 0;
+
+ isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
+ static_cast<int>(idle_time_in_ms));
+
if (deadline_difference >= 0) {
if (action.type != DONE && action.type != DO_NOTHING) {
isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
@@ -4847,8 +5037,62 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
}
PrintF("\n");
}
+}
- contexts_disposed_ = 0;
+
+void Heap::CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
+ double now_ms) {
+ if (idle_time_in_ms >= GCIdleTimeHandler::kMinBackgroundIdleTime) {
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kBackgroundIdleNotification;
+ event.time_ms = now_ms;
+ event.can_start_incremental_gc = incremental_marking()->IsStopped() &&
+ incremental_marking()->CanBeActivated();
+ memory_reducer_.NotifyBackgroundIdleNotification(event);
+ optimize_for_memory_usage_ = true;
+ } else {
+ optimize_for_memory_usage_ = false;
+ }
+}
+
+
+double Heap::MonotonicallyIncreasingTimeInMs() {
+ return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+ static_cast<double>(base::Time::kMillisecondsPerSecond);
+}
+
+
+bool Heap::IdleNotification(int idle_time_in_ms) {
+ return IdleNotification(
+ V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
+ (static_cast<double>(idle_time_in_ms) /
+ static_cast<double>(base::Time::kMillisecondsPerSecond)));
+}
+
+
+bool Heap::IdleNotification(double deadline_in_seconds) {
+ CHECK(HasBeenSetUp());
+ double deadline_in_ms =
+ deadline_in_seconds *
+ static_cast<double>(base::Time::kMillisecondsPerSecond);
+ HistogramTimerScope idle_notification_scope(
+ isolate_->counters()->gc_idle_notification());
+ double start_ms = MonotonicallyIncreasingTimeInMs();
+ double idle_time_in_ms = deadline_in_ms - start_ms;
+
+ CheckAndNotifyBackgroundIdleNotification(idle_time_in_ms, start_ms);
+
+ tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
+ OldGenerationAllocationCounter());
+
+ GCIdleTimeHandler::HeapState heap_state = ComputeHeapState();
+
+ GCIdleTimeAction action =
+ gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
+
+ bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
+
+ IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
return result;
}
@@ -4953,6 +5197,20 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
}
+bool Heap::IsValidAllocationSpace(AllocationSpace space) {
+ switch (space) {
+ case NEW_SPACE:
+ case OLD_SPACE:
+ case CODE_SPACE:
+ case MAP_SPACE:
+ case LO_SPACE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
bool Heap::RootIsImmortalImmovable(int root_index) {
switch (root_index) {
#define CASE(name) \
@@ -4966,20 +5224,6 @@ bool Heap::RootIsImmortalImmovable(int root_index) {
}
-bool Heap::GetRootListIndex(Handle<HeapObject> object,
- Heap::RootListIndex* index_return) {
- Object* ptr = *object;
-#define IMMORTAL_IMMOVABLE_ROOT(Name) \
- if (ptr == roots_[Heap::k##Name##RootIndex]) { \
- *index_return = k##Name##RootIndex; \
- return true; \
- }
- IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
-#undef IMMORTAL_IMMOVABLE_ROOT
- return false;
-}
-
-
#ifdef VERIFY_HEAP
void Heap::Verify() {
CHECK(HasBeenSetUp());
@@ -5012,6 +5256,7 @@ void Heap::Verify() {
void Heap::ZapFromSpace() {
+ if (!new_space_.IsFromSpaceCommitted()) return;
NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd());
while (it.has_next()) {
@@ -5307,6 +5552,30 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
}
+void Heap::AddToRingBuffer(const char* string) {
+ size_t first_part =
+ Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
+ memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
+ ring_buffer_end_ += first_part;
+ if (first_part < strlen(string)) {
+ ring_buffer_full_ = true;
+ size_t second_part = strlen(string) - first_part;
+ memcpy(trace_ring_buffer_, string + first_part, second_part);
+ ring_buffer_end_ = second_part;
+ }
+}
+
+
+void Heap::GetFromRingBuffer(char* buffer) {
+ size_t copied = 0;
+ if (ring_buffer_full_) {
+ copied = kTraceRingBufferSize - ring_buffer_end_;
+ memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
+ }
+ memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
+}
+
+
bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
@@ -5339,6 +5608,13 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
stats->size_per_type[type] += obj->Size();
}
}
+ if (stats->last_few_messages != NULL)
+ GetFromRingBuffer(stats->last_few_messages);
+ if (stats->js_stacktrace != NULL) {
+ FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
+ StringStream accumulator(&fixed);
+ isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
+ }
}
@@ -5357,12 +5633,76 @@ int64_t Heap::PromotedExternalMemorySize() {
}
+const double Heap::kMinHeapGrowingFactor = 1.1;
+const double Heap::kMaxHeapGrowingFactor = 4.0;
+const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
+const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
+const double Heap::kTargetMutatorUtilization = 0.97;
+
+
+// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
+// (mutator speed), this function returns the heap growing factor that will
+// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
+// remain the same until the next GC.
+//
+// For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
+// TM / (TM + TG), where TM is the time spent in the mutator and TG is the
+// time spent in the garbage collector.
+//
+// Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
+// time-frame from the end of the current GC to the end of the next GC. Based
+// on the MU we can compute the heap growing factor F as
+//
+// F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
+//
+// This formula can be derived as follows.
+//
+// F = Limit / Live by definition, where the Limit is the allocation limit,
+// and the Live is size of live objects.
+// Let’s assume that we already know the Limit. Then:
+// TG = Limit / gc_speed
+// TM = (TM + TG) * MU, by definition of MU.
+// TM = TG * MU / (1 - MU)
+// TM = Limit * MU / (gc_speed * (1 - MU))
+// On the other hand, if the allocation throughput remains constant:
+// Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
+// Solving it for TM, we get
+// TM = (Limit - Live) / mutator_speed
+// Combining the two equation for TM:
+// (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
+// (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
+// substitute R = gc_speed / mutator_speed
+// (Limit - Live) = Limit * MU / (R * (1 - MU))
+// substitute F = Limit / Live
+// F - 1 = F * MU / (R * (1 - MU))
+// F - F * MU / (R * (1 - MU)) = 1
+// F * (1 - MU / (R * (1 - MU))) = 1
+// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
+// F = R * (1 - MU) / (R * (1 - MU) - MU)
+double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
+ if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;
+
+ const double speed_ratio = gc_speed / mutator_speed;
+ const double mu = kTargetMutatorUtilization;
+
+ const double a = speed_ratio * (1 - mu);
+ const double b = speed_ratio * (1 - mu) - mu;
+
+ // The factor is a / b, but we need to check for small b first.
+ double factor =
+ (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
+ factor = Min(factor, kMaxHeapGrowingFactor);
+ factor = Max(factor, kMinHeapGrowingFactor);
+ return factor;
+}
+
+
intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
intptr_t old_gen_size) {
CHECK(factor > 1.0);
CHECK(old_gen_size > 0);
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
- limit = Max(limit, kMinimumOldGenerationAllocationLimit);
+ limit = Max(limit, old_gen_size + kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
@@ -5370,63 +5710,63 @@ intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
- int freed_global_handles) {
- const int kMaxHandles = 1000;
- const int kMinHandles = 100;
- const double min_factor = 1.1;
- double max_factor = 4;
- const double idle_max_factor = 1.5;
- // We set the old generation growing factor to 2 to grow the heap slower on
- // memory-constrained devices.
- if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
- max_factor = 2;
- }
-
- // If there are many freed global handles, then the next full GC will
- // likely collect a lot of garbage. Choose the heap growing factor
- // depending on freed global handles.
- // TODO(ulan, hpayer): Take into account mutator utilization.
- // TODO(hpayer): The idle factor could make the handles heuristic obsolete.
- // Look into that.
- double factor;
- double idle_factor;
- if (freed_global_handles <= kMinHandles) {
- factor = max_factor;
- } else if (freed_global_handles >= kMaxHandles) {
- factor = min_factor;
- } else {
- // Compute factor using linear interpolation between points
- // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
- factor = max_factor -
- (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
- (kMaxHandles - kMinHandles);
+ double gc_speed,
+ double mutator_speed) {
+ const double kConservativeHeapGrowingFactor = 1.3;
+
+ double factor = HeapGrowingFactor(gc_speed, mutator_speed);
+
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(isolate_,
+ "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
+ "(gc=%.f, mutator=%.f)\n",
+ factor, kTargetMutatorUtilization, gc_speed / mutator_speed,
+ gc_speed, mutator_speed);
}
- if (FLAG_stress_compaction ||
- mark_compact_collector()->reduce_memory_footprint_) {
- factor = min_factor;
+ // We set the old generation growing factor to 2 to grow the heap slower on
+ // memory-constrained devices.
+ if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice ||
+ FLAG_optimize_for_size) {
+ factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
}
- const double kConservativeHeapGrowingFactor = 1.3;
- if (gc_idle_time_handler_.ShouldGrowHeapSlowly()) {
+ if (memory_reducer_.ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
factor = Min(factor, kConservativeHeapGrowingFactor);
}
-
- idle_factor = Min(factor, idle_max_factor);
+ if (FLAG_stress_compaction ||
+ mark_compact_collector()->reduce_memory_footprint_) {
+ factor = kMinHeapGrowingFactor;
+ }
old_generation_allocation_limit_ =
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
- idle_old_generation_allocation_limit_ =
- CalculateOldGenerationAllocationLimit(idle_factor, old_gen_size);
if (FLAG_trace_gc_verbose) {
- PrintIsolate(
- isolate_,
- "Grow: old size: %" V8_PTR_PREFIX "d KB, new limit: %" V8_PTR_PREFIX
- "d KB (%.1f), new idle limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
- old_gen_size / KB, old_generation_allocation_limit_ / KB, factor,
- idle_old_generation_allocation_limit_ / KB, idle_factor);
+ PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX
+ "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
+ old_gen_size / KB, old_generation_allocation_limit_ / KB,
+ factor);
+ }
+}
+
+
+void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
+ double gc_speed,
+ double mutator_speed) {
+ double factor = HeapGrowingFactor(gc_speed, mutator_speed);
+ intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
+ if (limit < old_generation_allocation_limit_) {
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(isolate_, "Dampen: old size: %" V8_PTR_PREFIX
+ "d KB, old limit: %" V8_PTR_PREFIX
+ "d KB, "
+ "new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
+ old_gen_size / KB, old_generation_allocation_limit_ / KB,
+ limit / KB, factor);
+ }
+ old_generation_allocation_limit_ = limit;
}
}
@@ -5536,6 +5876,12 @@ bool Heap::SetUp() {
}
}
+ for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
+ i++) {
+ deferred_counters_[i] = 0;
+ }
+
+
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -5633,6 +5979,8 @@ void Heap::TearDown() {
PrintAlloctionsHash();
}
+ memory_reducer_.TearDown();
+
TearDownArrayBuffers();
isolate_->global_handles()->TearDown();
@@ -5782,7 +6130,9 @@ void Heap::PrintHandles() {
class CheckHandleCountVisitor : public ObjectVisitor {
public:
CheckHandleCountVisitor() : handle_count_(0) {}
- ~CheckHandleCountVisitor() { CHECK(handle_count_ < 2000); }
+ ~CheckHandleCountVisitor() {
+ CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
+ }
void VisitPointers(Object** start, Object** end) {
handle_count_ += end - start;
}
@@ -6604,5 +6954,43 @@ void Heap::UnregisterStrongRoots(Object** start) {
list = next;
}
}
+
+
+bool Heap::GetObjectTypeName(size_t index, const char** object_type,
+ const char** object_sub_type) {
+ if (index >= OBJECT_STATS_COUNT) return false;
+
+ switch (static_cast<int>(index)) {
+#define COMPARE_AND_RETURN_NAME(name) \
+ case name: \
+ *object_type = #name; \
+ *object_sub_type = ""; \
+ return true;
+ INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
+#undef COMPARE_AND_RETURN_NAME
+#define COMPARE_AND_RETURN_NAME(name) \
+ case FIRST_CODE_KIND_SUB_TYPE + Code::name: \
+ *object_type = "CODE_TYPE"; \
+ *object_sub_type = "CODE_KIND/" #name; \
+ return true;
+ CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
+#undef COMPARE_AND_RETURN_NAME
+#define COMPARE_AND_RETURN_NAME(name) \
+ case FIRST_FIXED_ARRAY_SUB_TYPE + name: \
+ *object_type = "FIXED_ARRAY_TYPE"; \
+ *object_sub_type = #name; \
+ return true;
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
+#undef COMPARE_AND_RETURN_NAME
+#define COMPARE_AND_RETURN_NAME(name) \
+ case FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge: \
+ *object_type = "CODE_TYPE"; \
+ *object_sub_type = "CODE_AGE/" #name; \
+ return true;
+ CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
+#undef COMPARE_AND_RETURN_NAME
+ }
+ return false;
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index ab08531a06..38166439ea 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -16,6 +16,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-reducer.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
@@ -46,13 +47,13 @@ namespace internal {
V(Map, meta_map, MetaMap) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
+ V(Map, float32x4_map, Float32x4Map) \
V(Map, native_context_map, NativeContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, code_map, CodeMap) \
V(Map, scope_info_map, ScopeInfoMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
- V(Map, constant_pool_array_map, ConstantPoolArrayMap) \
V(Map, weak_cell_map, WeakCellMap) \
V(Map, one_byte_string_map, OneByteStringMap) \
V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
@@ -60,7 +61,6 @@ namespace internal {
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
- V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
/* being compacted. */ \
@@ -166,6 +166,7 @@ namespace internal {
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, infinity_value, InfinityValue) \
V(HeapNumber, minus_zero_value, MinusZeroValue) \
+ V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
V(JSObject, message_listeners, MessageListeners) \
V(UnseededNumberDictionary, code_stubs, CodeStubs) \
V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
@@ -187,10 +188,12 @@ namespace internal {
V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
V(FixedArray, keyed_load_dummy_vector, KeyedLoadDummyVector) \
+ V(FixedArray, keyed_store_dummy_vector, KeyedStoreDummyVector) \
V(FixedArray, detached_contexts, DetachedContexts) \
V(ArrayList, retained_maps, RetainedMaps) \
V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
V(PropertyCell, array_protector, ArrayProtector) \
+ V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(Object, weak_stack_trace_list, WeakStackTraceList)
// Entries in this list are limited to Smis and are not visited during GC.
@@ -232,6 +235,7 @@ namespace internal {
V(source_string, "source") \
V(source_url_string, "source_url") \
V(source_mapping_url_string, "source_mapping_url") \
+ V(this_string, "this") \
V(global_string, "global") \
V(ignore_case_string, "ignoreCase") \
V(multiline_string, "multiline") \
@@ -270,7 +274,6 @@ namespace internal {
V(illegal_access_string, "illegal access") \
V(cell_value_string, "%cell_value") \
V(illegal_argument_string, "illegal argument") \
- V(identity_hash_string, "v8::IdentityHash") \
V(closure_string, "(closure)") \
V(dot_string, ".") \
V(compare_ic_string, "==") \
@@ -293,6 +296,7 @@ namespace internal {
#define PRIVATE_SYMBOL_LIST(V) \
V(nonextensible_symbol) \
V(sealed_symbol) \
+ V(hash_code_symbol) \
V(frozen_symbol) \
V(nonexistent_symbol) \
V(elements_transition_symbol) \
@@ -343,13 +347,13 @@ namespace internal {
V(MetaMap) \
V(HeapNumberMap) \
V(MutableHeapNumberMap) \
+ V(Float32x4Map) \
V(NativeContextMap) \
V(FixedArrayMap) \
V(CodeMap) \
V(ScopeInfoMap) \
V(FixedCOWArrayMap) \
V(FixedDoubleArrayMap) \
- V(ConstantPoolArrayMap) \
V(WeakCellMap) \
V(NoInterceptorResultSentinel) \
V(HashTableMap) \
@@ -357,7 +361,6 @@ namespace internal {
V(EmptyFixedArray) \
V(EmptyByteArray) \
V(EmptyDescriptorArray) \
- V(EmptyConstantPoolArray) \
V(ArgumentsMarker) \
V(SymbolMap) \
V(SloppyArgumentsElementsMap) \
@@ -715,10 +718,23 @@ class Heap {
MUST_USE_RESULT AllocationResult
CopyJSObject(JSObject* source, AllocationSite* site = NULL);
- // This method assumes overallocation of one word. It will store a filler
- // before the object if the given object is not double aligned, otherwise
- // it will place the filler after the object.
- MUST_USE_RESULT HeapObject* EnsureDoubleAligned(HeapObject* object, int size);
+ // Calculates the maximum amount of filler that could be required by the
+ // given alignment.
+ static int GetMaximumFillToAlign(AllocationAlignment alignment);
+ // Calculates the actual amount of filler required for a given address at the
+ // given alignment.
+ static int GetFillToAlign(Address address, AllocationAlignment alignment);
+
+ // Creates a filler object and returns a heap object immediately after it.
+ MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
+ int filler_size);
+ // Creates a filler object if needed for alignment and returns a heap object
+ // immediately after it. If any space is left after the returned object,
+ // another filler object is created so the over allocated memory is iterable.
+ MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
+ int object_size,
+ int allocation_size,
+ AllocationAlignment alignment);
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
@@ -817,6 +833,27 @@ class Heap {
// Notify the heap that a context has been disposed.
int NotifyContextDisposed(bool dependant_context);
+ // Start incremental marking and ensure that idle time handler can perform
+ // incremental steps.
+ void StartIdleIncrementalMarking();
+
+ // Starts incremental marking assuming incremental marking is currently
+ // stopped.
+ void StartIncrementalMarking(int gc_flags,
+ const GCCallbackFlags gc_callback_flags,
+ const char* reason = nullptr);
+
+ // Performs incremental marking steps of step_size_in_bytes as long as
+ // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute
+ // an estimate increment. Returns the remaining time that cannot be used
+ // for incremental marking anymore because a single step would exceed the
+ // deadline.
+ double AdvanceIncrementalMarking(
+ intptr_t step_size_in_bytes, double deadline_in_ms,
+ IncrementalMarking::StepActions step_actions);
+
+ void FinalizeIncrementalMarkingIfComplete(const char* comment);
+
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
@@ -943,6 +980,9 @@ class Heap {
bool InSpace(Address addr, AllocationSpace space);
bool InSpace(HeapObject* value, AllocationSpace space);
+ // Checks whether the space is valid.
+ static bool IsValidAllocationSpace(AllocationSpace space);
+
// Checks whether the given object is allowed to be migrated from it's
// current space into the given destination space. Used for debugging.
inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
@@ -1023,6 +1063,13 @@ class Heap {
// Print short heap statistics.
void PrintShortHeapStatistics();
+ size_t object_count_last_gc(size_t index) {
+ return index < OBJECT_STATS_COUNT ? object_counts_last_time_[index] : 0;
+ }
+ size_t object_size_last_gc(size_t index) {
+ return index < OBJECT_STATS_COUNT ? object_sizes_last_time_[index] : 0;
+ }
+
// Write barrier support for address[offset] = o.
INLINE(void RecordWrite(Address address, int offset));
@@ -1082,7 +1129,10 @@ class Heap {
inline intptr_t PromotedTotalSize() {
int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
- if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
+ if (total > std::numeric_limits<intptr_t>::max()) {
+ // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
+ return std::numeric_limits<intptr_t>::max();
+ }
if (total < 0) return 0;
return static_cast<intptr_t>(total);
}
@@ -1132,14 +1182,31 @@ class Heap {
static const int kMaxExecutableSizeHugeMemoryDevice =
256 * kPointerMultiplier;
+ static const int kTraceRingBufferSize = 512;
+ static const int kStacktraceBufferSize = 512;
+
+ static const double kMinHeapGrowingFactor;
+ static const double kMaxHeapGrowingFactor;
+ static const double kMaxHeapGrowingFactorMemoryConstrained;
+ static const double kMaxHeapGrowingFactorIdle;
+ static const double kTargetMutatorUtilization;
+
+ static double HeapGrowingFactor(double gc_speed, double mutator_speed);
+
// Calculates the allocation limit based on a given growing factor and a
// given old generation size.
intptr_t CalculateOldGenerationAllocationLimit(double factor,
intptr_t old_gen_size);
// Sets the allocation limit to trigger the next full garbage collection.
- void SetOldGenerationAllocationLimit(intptr_t old_gen_size,
- int freed_global_handles);
+ void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
+ double mutator_speed);
+
+ // Decrease the allocation limit if the new limit based on the given
+ // parameters is lower than the current limit.
+ void DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
+ double gc_speed,
+ double mutator_speed);
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
@@ -1186,10 +1253,6 @@ class Heap {
kSmiRootsStart = kStringTableRootIndex + 1
};
- // Get the root list index for {object} if such a root list index exists.
- bool GetRootListIndex(Handle<HeapObject> object,
- Heap::RootListIndex* index_return);
-
Object* root(RootListIndex index) { return roots_[index]; }
STATIC_ASSERT(kUndefinedValueRootIndex ==
@@ -1239,6 +1302,10 @@ class Heap {
semi_space_copied_object_size_ += object_size;
}
+ inline intptr_t SurvivedNewSpaceObjectSize() {
+ return promoted_objects_size_ + semi_space_copied_object_size_;
+ }
+
inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
@@ -1298,6 +1365,40 @@ class Heap {
}
}
+ void UpdateNewSpaceAllocationCounter() {
+ new_space_allocation_counter_ = NewSpaceAllocationCounter();
+ }
+
+ size_t NewSpaceAllocationCounter() {
+ return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
+ }
+
+ // This should be used only for testing.
+ void set_new_space_allocation_counter(size_t new_value) {
+ new_space_allocation_counter_ = new_value;
+ }
+
+ void UpdateOldGenerationAllocationCounter() {
+ old_generation_allocation_counter_ = OldGenerationAllocationCounter();
+ }
+
+ size_t OldGenerationAllocationCounter() {
+ return old_generation_allocation_counter_ + PromotedSinceLastGC();
+ }
+
+ // This should be used only for testing.
+ void set_old_generation_allocation_counter(size_t new_value) {
+ old_generation_allocation_counter_ = new_value;
+ }
+
+ size_t PromotedSinceLastGC() {
+ return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
+ }
+
+ // Record the fact that we generated some optimized code since the last GC
+ // which will pretenure some previously unpretenured allocation.
+ void RecordDeoptForPretenuring() { gathering_lifetime_feedback_ = 2; }
+
// Update GC statistics that are tracked on the Heap.
void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
double marking_time);
@@ -1311,6 +1412,8 @@ class Heap {
// Returns minimal interval between two subsequent collections.
double get_min_in_mutator() { return min_in_mutator_; }
+ void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
+
MarkCompactCollector* mark_compact_collector() {
return &mark_compact_collector_;
}
@@ -1453,6 +1556,8 @@ class Heap {
void TraceObjectStats();
void TraceObjectStat(const char* name, int count, int size, double time);
void CheckpointObjectStats();
+ bool GetObjectTypeName(size_t index, const char** object_type,
+ const char** object_sub_type);
void RegisterStrongRoots(Object** start, Object** end);
void UnregisterStrongRoots(Object** start);
@@ -1535,6 +1640,12 @@ class Heap {
// An ArrayBuffer moved from new space to old space.
void PromoteArrayBuffer(Object* buffer);
+ bool HasLowAllocationRate();
+ bool HasHighFragmentation();
+ bool HasHighFragmentation(intptr_t used, intptr_t committed);
+
+ bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
+
protected:
// Methods made available to tests.
@@ -1560,12 +1671,17 @@ class Heap {
bool alloc_props = true,
AllocationSite* allocation_site = NULL);
- // Allocated a HeapNumber from value.
+ // Allocates a HeapNumber from value.
MUST_USE_RESULT AllocationResult
AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
- // Allocate a byte array of the specified length
+ // Allocates a Float32x4 from the given lane values.
+ MUST_USE_RESULT AllocationResult
+ AllocateFloat32x4(float w, float x, float y, float z,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a byte array of the specified length
MUST_USE_RESULT AllocationResult
AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
@@ -1690,14 +1806,14 @@ class Heap {
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
- // The allocation limit when there is >16.66ms idle time in the idle time
- // handler.
- intptr_t idle_old_generation_allocation_limit_;
-
// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;
+ // Indicates that memory usage is more important than latency.
+ // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed.
+ bool optimize_for_memory_usage_;
+
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
bool inline_allocation_disabled_;
@@ -1741,6 +1857,8 @@ class Heap {
// any string when looked up in properties.
String* hidden_string_;
+ void AddPrivateGlobalSymbols(Handle<Object> private_intern_table);
+
// GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed.
struct GCPrologueCallbackPair {
@@ -1792,7 +1910,7 @@ class Heap {
// space evacuation. Note that between feedback collection and calling this
// method object in old space must not move.
// Right now we only process pretenuring feedback in high promotion mode.
- void ProcessPretenuringFeedback();
+ bool ProcessPretenuringFeedback();
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
@@ -1834,15 +1952,13 @@ class Heap {
HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
- enum Alignment { kWordAligned, kDoubleAligned };
-
// Allocate an uninitialized object. The memory is non-executable if the
// hardware and OS allow. This is the single choke-point for allocations
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
MUST_USE_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationSpace space, AllocationSpace retry_space,
- Alignment aligment = kWordAligned);
+ AllocationAlignment aligment = kWordAligned);
// Allocates a heap object based on the map.
MUST_USE_RESULT AllocationResult
@@ -1925,12 +2041,6 @@ class Heap {
MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
FixedDoubleArray* src);
- // Make a copy of src and return it. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray(
- ConstantPoolArray* src);
-
-
// Computes a single character string where the character has code.
// A cache is used for one-byte (Latin1) codes.
MUST_USE_RESULT AllocationResult
@@ -1939,17 +2049,6 @@ class Heap {
// Allocate a symbol in old space.
MUST_USE_RESULT AllocationResult AllocateSymbol();
- // Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult
- CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map);
-
- MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
- const ConstantPoolArray::NumberOfEntries& small);
-
- MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray(
- const ConstantPoolArray::NumberOfEntries& small,
- const ConstantPoolArray::NumberOfEntries& extended);
-
// Allocates an external array of the specified length and type.
MUST_USE_RESULT AllocationResult
AllocateExternalArray(int length, ExternalArrayType array_type,
@@ -1957,8 +2056,8 @@ class Heap {
// Allocates a fixed typed array of the specified length and type.
MUST_USE_RESULT AllocationResult
- AllocateFixedTypedArray(int length, ExternalArrayType array_type,
- PretenureFlag pretenure);
+ AllocateFixedTypedArray(int length, ExternalArrayType array_type,
+ bool initialize, PretenureFlag pretenure);
// Make a copy of src and return it.
MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
@@ -1989,9 +2088,6 @@ class Heap {
MUST_USE_RESULT AllocationResult
AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
- // Allocate empty constant pool array.
- MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
-
// Allocate a tenured simple cell.
MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
@@ -2080,6 +2176,8 @@ class Heap {
// Total RegExp code ever generated
double total_regexp_code_generated_;
+ int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
+
GCTracer tracer_;
// Creates and installs the full-sized number string cache.
@@ -2099,13 +2197,23 @@ class Heap {
void UpdateSurvivalStatistics(int start_new_space_size);
+ enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
+
static const int kYoungSurvivalRateHighThreshold = 90;
+ static const int kYoungSurvivalRateLowThreshold = 10;
static const int kYoungSurvivalRateAllowedDeviation = 15;
static const int kOldSurvivalRateLowThreshold = 10;
+ bool new_space_high_promotion_mode_active_;
+ // If this is non-zero, then there is hope yet that the optimized code we
+ // have generated will solve our high promotion rate problems, so we don't
+ // need to go into high promotion mode just yet.
+ int gathering_lifetime_feedback_;
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
+ int low_survival_rate_period_length_;
+ double survival_rate_;
double promotion_ratio_;
double promotion_rate_;
intptr_t semi_space_copied_object_size_;
@@ -2121,19 +2229,83 @@ class Heap {
// of the allocation site.
unsigned int maximum_size_scavenges_;
- // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
- // Re-visit incremental marking heuristics.
+ SurvivalRateTrend previous_survival_rate_trend_;
+ SurvivalRateTrend survival_rate_trend_;
+
+ void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
+ DCHECK(survival_rate_trend != FLUCTUATING);
+ previous_survival_rate_trend_ = survival_rate_trend_;
+ survival_rate_trend_ = survival_rate_trend;
+ }
+
+ SurvivalRateTrend survival_rate_trend() {
+ if (survival_rate_trend_ == STABLE) {
+ return STABLE;
+ } else if (previous_survival_rate_trend_ == STABLE) {
+ return survival_rate_trend_;
+ } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
+ return FLUCTUATING;
+ } else {
+ return survival_rate_trend_;
+ }
+ }
+
+ bool IsStableOrIncreasingSurvivalTrend() {
+ switch (survival_rate_trend()) {
+ case STABLE:
+ case INCREASING:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool IsStableOrDecreasingSurvivalTrend() {
+ switch (survival_rate_trend()) {
+ case STABLE:
+ case DECREASING:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool IsIncreasingSurvivalTrend() {
+ return survival_rate_trend() == INCREASING;
+ }
+
+ bool IsLowSurvivalRate() { return low_survival_rate_period_length_ > 0; }
+
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
void ConfigureInitialOldGenerationSize();
+ void ConfigureNewGenerationSize();
+
void SelectScavengingVisitorsTable();
- void ReduceNewSpaceSize(bool is_long_idle_notification);
+ bool HasLowYoungGenerationAllocationRate();
+ bool HasLowOldGenerationAllocationRate();
+ double YoungGenerationMutatorUtilization();
+ double OldGenerationMutatorUtilization();
+
+ void ReduceNewSpaceSize();
bool TryFinalizeIdleIncrementalMarking(
- bool is_long_idle_notification, double idle_time_in_ms,
- size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
+ double idle_time_in_ms, size_t size_of_objects,
+ size_t mark_compact_speed_in_bytes_per_ms);
+
+ GCIdleTimeHandler::HeapState ComputeHeapState();
+
+ bool PerformIdleTimeAction(GCIdleTimeAction action,
+ GCIdleTimeHandler::HeapState heap_state,
+ double deadline_in_ms);
+
+ void IdleNotificationEpilogue(GCIdleTimeAction action,
+ GCIdleTimeHandler::HeapState heap_state,
+ double start_ms, double deadline_in_ms);
+ void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
+ double now_ms);
void ClearObjectStats(bool clear_last_time_stats = false);
@@ -2141,6 +2313,9 @@ class Heap {
inline void UpdateAllocationsHash(uint32_t value);
inline void PrintAlloctionsHash();
+ void AddToRingBuffer(const char* string);
+ void GetFromRingBuffer(char* buffer);
+
// Object counts and used memory by InstanceType
size_t object_counts_[OBJECT_STATS_COUNT];
size_t object_counts_last_time_[OBJECT_STATS_COUNT];
@@ -2159,15 +2334,18 @@ class Heap {
// Minimal interval between two subsequent collections.
double min_in_mutator_;
- // Cumulative GC time spent in marking
+ // Cumulative GC time spent in marking.
double marking_time_;
- // Cumulative GC time spent in sweeping
+ // Cumulative GC time spent in sweeping.
double sweeping_time_;
- // Last time an idle notification happened
+ // Last time an idle notification happened.
double last_idle_notification_time_;
+ // Last time a garbage collection happened.
+ double last_gc_time_;
+
MarkCompactCollector mark_compact_collector_;
StoreBuffer store_buffer_;
@@ -2178,12 +2356,25 @@ class Heap {
GCIdleTimeHandler gc_idle_time_handler_;
- unsigned int gc_count_at_last_idle_gc_;
+ MemoryReducer memory_reducer_;
// These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_;
size_t crankshaft_codegen_bytes_generated_;
+ // This counter is increased before each GC and never reset.
+ // To account for the bytes allocated since the last GC, use the
+ // NewSpaceAllocationCounter() function.
+ size_t new_space_allocation_counter_;
+
+ // This counter is increased before each GC and never reset. To
+ // account for the bytes allocated since the last GC, use the
+ // OldGenerationAllocationCounter() function.
+ size_t old_generation_allocation_counter_;
+
+ // The size of objects in old generation after the last MarkCompact GC.
+ size_t old_generation_size_at_last_gc_;
+
// If the --deopt_every_n_garbage_collections flag is set to a positive value,
// this variable holds the number of garbage collections since the last
// deoptimization triggered by garbage collection.
@@ -2192,6 +2383,13 @@ class Heap {
static const int kAllocationSiteScratchpadSize = 256;
int allocation_sites_scratchpad_length_;
+ char trace_ring_buffer_[kTraceRingBufferSize];
+ // If it's not full then the data is from 0 to ring_buffer_end_. If it's
+ // full then the data is from ring_buffer_end_ to the end of the buffer and
+ // from 0 to ring_buffer_end_.
+ bool ring_buffer_full_;
+ size_t ring_buffer_end_;
+
static const int kMaxMarkCompactsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
@@ -2279,7 +2477,9 @@ class HeapStats {
int* objects_per_type; // 17
int* size_per_type; // 18
int* os_error; // 19
- int* end_marker; // 20
+ char* last_few_messages; // 20
+ char* js_stacktrace; // 21
+ int* end_marker; // 22
};
diff --git a/deps/v8/src/heap/identity-map.cc b/deps/v8/src/heap/identity-map.cc
index e968989f97..a93f607ee7 100644
--- a/deps/v8/src/heap/identity-map.cc
+++ b/deps/v8/src/heap/identity-map.cc
@@ -38,7 +38,7 @@ IdentityMapBase::RawEntry IdentityMapBase::Insert(Handle<Object> key) {
int IdentityMapBase::Hash(Object* address) {
uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
- CHECK_NE(0, raw_address); // Cannot store Smi 0 as a key here, sorry.
+ CHECK_NE(0U, raw_address); // Cannot store Smi 0 as a key here, sorry.
// Xor some of the upper bits, since the lower 2 or 3 are usually aligned.
return static_cast<int>((raw_address >> 11) ^ raw_address);
}
@@ -187,5 +187,5 @@ void IdentityMapBase::Resize() {
heap_->UnregisterStrongRoots(old_keys);
heap_->RegisterStrongRoots(keys_, keys_ + size_);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 95f522bd99..58eb0aa409 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -16,22 +16,34 @@ namespace v8 {
namespace internal {
+IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
+ return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_MARKING,
+ IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+}
+
+
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
state_(STOPPED),
+ is_compacting_(false),
steps_count_(0),
old_generation_space_available_at_start_of_incremental_(0),
old_generation_space_used_at_start_of_incremental_(0),
+ bytes_rescanned_(0),
should_hurry_(false),
marking_speed_(0),
+ bytes_scanned_(0),
allocated_(0),
+ write_barriers_invoked_since_last_step_(0),
idle_marking_delay_counter_(0),
no_marking_scope_depth_(0),
unscanned_bytes_of_large_object_(0),
was_activated_(false),
weak_closure_was_overapproximated_(false),
weak_closure_approximation_rounds_(0),
- request_type_(COMPLETE_MARKING) {}
+ request_type_(COMPLETE_MARKING),
+ gc_callback_flags_(kNoGCCallbackFlags) {}
void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
@@ -188,7 +200,12 @@ class IncrementalMarkingMarkingVisitor
} while (scan_until_end && start_offset < object_size);
chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
- heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
+ if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
+ heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
+ } else {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ heap->mark_compact_collector()->marking_deque()->UnshiftBlack(object);
+ }
heap->incremental_marking()->NotifyIncompleteScanOfObject(
object_size - (start_offset - already_scanned_offset));
}
@@ -467,9 +484,12 @@ static void PatchIncrementalMarkingRecordWriteStubs(
}
-void IncrementalMarking::Start() {
+void IncrementalMarking::Start(int mark_compact_flags,
+ const GCCallbackFlags gc_callback_flags,
+ const char* reason) {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start\n");
+ PrintF("[IncrementalMarking] Start (%s)\n",
+ (reason == nullptr) ? "unknown reason" : reason);
}
DCHECK(FLAG_incremental_marking);
DCHECK(FLAG_incremental_marking_steps);
@@ -479,10 +499,13 @@ void IncrementalMarking::Start() {
ResetStepCounters();
+ gc_callback_flags_ = gc_callback_flags;
was_activated_ = true;
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+ heap_->mark_compact_collector()->SetFlags(mark_compact_flags);
StartMarking();
+ heap_->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
} else {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start sweeping.\n");
@@ -511,7 +534,8 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
- heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize();
+ heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
+ MarkCompactCollector::kMaxMarkingDequeSize);
ActivateIncrementalWriteBarrier();
@@ -571,16 +595,6 @@ void IncrementalMarking::MarkObjectGroups() {
}
-void IncrementalMarking::PrepareForScavenge() {
- if (!IsMarking()) return;
- NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
- heap_->new_space()->FromSpaceEnd());
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
-}
-
-
void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
if (!IsMarking()) return;
@@ -632,10 +646,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
- MarkBit map_mark_bit = Marking::MarkBitFrom(map);
- if (Marking::IsWhite(map_mark_bit)) {
- WhiteToGreyAndPush(map, map_mark_bit);
- }
+ MarkObject(heap_, map);
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
@@ -831,7 +842,7 @@ void IncrementalMarking::Epilogue() {
void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
- Start();
+ Start(Heap::kNoGCFlags, kNoGCCallbackFlags, "old space step");
} else {
Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
}
@@ -1033,5 +1044,5 @@ void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
void IncrementalMarking::ClearIdleMarkingDelayCounter() {
idle_marking_delay_counter_ = 0;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 7668def679..706e332327 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -26,6 +26,21 @@ class IncrementalMarking {
enum GCRequestType { COMPLETE_MARKING, OVERAPPROXIMATION };
+ struct StepActions {
+ StepActions(CompletionAction complete_action_,
+ ForceMarkingAction force_marking_,
+ ForceCompletionAction force_completion_)
+ : completion_action(complete_action_),
+ force_marking(force_marking_),
+ force_completion(force_completion_) {}
+
+ CompletionAction completion_action;
+ ForceMarkingAction force_marking;
+ ForceCompletionAction force_completion;
+ };
+
+ static StepActions IdleStepActions();
+
explicit IncrementalMarking(Heap* heap);
static void Initialize();
@@ -67,14 +82,14 @@ class IncrementalMarking {
bool WasActivated();
- void Start();
+ void Start(int mark_compact_flags,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags,
+ const char* reason = nullptr);
void Stop();
void MarkObjectGroups();
- void PrepareForScavenge();
-
void UpdateMarkingDequeAfterScavenge();
void Hurry();
@@ -187,6 +202,8 @@ class IncrementalMarking {
Heap* heap() const { return heap_; }
+ GCCallbackFlags CallbackFlags() const { return gc_callback_flags_; }
+
private:
int64_t SpaceLeftInOldSpace();
@@ -245,6 +262,8 @@ class IncrementalMarking {
GCRequestType request_type_;
+ GCCallbackFlags gc_callback_flags_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 5b29c2175b..9ca06cf2e6 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -52,7 +52,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
migration_slots_buffer_(NULL),
heap_(heap),
marking_deque_memory_(NULL),
- marking_deque_memory_committed_(false),
+ marking_deque_memory_committed_(0),
code_flusher_(NULL),
have_code_to_deoptimize_(false) {
}
@@ -226,7 +226,8 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() {
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
- EnsureMarkingDequeIsCommittedAndInitialize(256 * KB);
+ EnsureMarkingDequeIsReserved();
+ EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
}
@@ -259,8 +260,7 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
CollectEvacuationCandidates(heap()->old_space());
- if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
- FLAG_incremental_code_compaction)) {
+ if (FLAG_compact_code_space) {
CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space());
@@ -336,6 +336,7 @@ void MarkCompactCollector::CollectGarbage() {
DCHECK(state_ == PREPARE_GC);
MarkLiveObjects();
+
DCHECK(heap_->incremental_marking()->IsStopped());
// ClearNonLiveReferences can deoptimize code in dependent code arrays.
@@ -343,7 +344,7 @@ void MarkCompactCollector::CollectGarbage() {
// arrays are cleared or contain only live code objects.
ProcessAndClearWeakCells();
- if (FLAG_collect_maps) ClearNonLiveReferences();
+ ClearNonLiveReferences();
ClearWeakCollections();
@@ -367,7 +368,7 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
VerifyWeakEmbeddedObjectsInCode();
- if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
+ if (FLAG_omit_map_checks_for_leaf_maps) {
VerifyOmittedMapChecks();
}
#endif
@@ -556,34 +557,6 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
}
-void Marking::SetAllMarkBitsInRange(MarkBit start, MarkBit end) {
- MarkBit::CellType* start_cell = start.cell();
- MarkBit::CellType* end_cell = end.cell();
- MarkBit::CellType start_mask = ~(start.mask() - 1);
- MarkBit::CellType end_mask = (end.mask() << 1) - 1;
-
- if (start_cell == end_cell) {
- *start_cell |= start_mask & end_mask;
- } else {
- *start_cell |= start_mask;
- for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
- *cell = ~0;
- }
- *end_cell |= end_mask;
- }
-}
-
-
-void Marking::ClearAllMarkBitsOfCellsContainedInRange(MarkBit start,
- MarkBit end) {
- MarkBit::CellType* start_cell = start.cell();
- MarkBit::CellType* end_cell = end.cell();
- for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
- *cell = 0;
- }
-}
-
-
void Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
DCHECK(MemoryChunk::FromAddress(old_start) ==
@@ -641,136 +614,15 @@ const char* AllocationSpaceName(AllocationSpace space) {
}
-// Returns zero for pages that have so little fragmentation that it is not
-// worth defragmenting them. Otherwise a positive integer that gives an
-// estimate of fragmentation on an arbitrary scale.
-static int FreeListFragmentation(PagedSpace* space, Page* p) {
- // If page was not swept then there are no free list items on it.
- if (!p->WasSwept()) {
- if (FLAG_trace_fragmentation_verbose) {
- PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()), p->LiveBytes());
- }
- return FLAG_always_compact ? 1 : 0;
- }
-
- PagedSpace::SizeStats sizes;
- space->ObtainFreeListStatistics(p, &sizes);
-
- intptr_t ratio;
- intptr_t ratio_threshold;
- intptr_t area_size = space->AreaSize();
- if (space->identity() == CODE_SPACE) {
- ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
- ratio_threshold = 10;
- } else {
- ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
- ratio_threshold = 15;
- }
-
- if (FLAG_trace_fragmentation_verbose) {
- PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
- reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
- static_cast<int>(sizes.small_size_),
- static_cast<double>(sizes.small_size_ * 100) / area_size,
- static_cast<int>(sizes.medium_size_),
- static_cast<double>(sizes.medium_size_ * 100) / area_size,
- static_cast<int>(sizes.large_size_),
- static_cast<double>(sizes.large_size_ * 100) / area_size,
- static_cast<int>(sizes.huge_size_),
- static_cast<double>(sizes.huge_size_ * 100) / area_size,
- (ratio > ratio_threshold) ? "[fragmented]" : "");
- }
-
- if (FLAG_always_compact && sizes.Total() != area_size) {
- return 1;
- }
-
- if (ratio <= ratio_threshold) return 0; // Not fragmented.
-
- return static_cast<int>(ratio - ratio_threshold);
-}
-
-
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
- static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
- int max_evacuation_candidates =
- static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
-
- if (FLAG_stress_compaction || FLAG_always_compact) {
- max_evacuation_candidates = kMaxMaxEvacuationCandidates;
- }
-
- class Candidate {
- public:
- Candidate() : fragmentation_(0), page_(NULL) {}
- Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
-
- int fragmentation() { return fragmentation_; }
- Page* page() { return page_; }
-
- private:
- int fragmentation_;
- Page* page_;
- };
-
- enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
+ int area_size = space->AreaSize();
- CompactionMode mode = COMPACT_FREE_LISTS;
-
- intptr_t reserved = number_of_pages * space->AreaSize();
- intptr_t over_reserved = reserved - space->SizeOfObjects();
- static const intptr_t kFreenessThreshold = 50;
-
- if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
- // If reduction of memory footprint was requested, we are aggressive
- // about choosing pages to free. We expect that half-empty pages
- // are easier to compact so slightly bump the limit.
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates += 2;
- }
-
-
- if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
- // If over-usage is very high (more than a third of the space), we
- // try to free all mostly empty pages. We expect that almost empty
- // pages are even easier to compact so bump the limit even more.
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates *= 2;
- }
-
- if (FLAG_always_compact) {
- max_evacuation_candidates = kMaxMaxEvacuationCandidates;
- }
-
- if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
- PrintF(
- "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
- "evacuation candidate limit: %d\n",
- static_cast<double>(over_reserved) / MB,
- static_cast<double>(reserved) / MB,
- static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
- }
-
- intptr_t estimated_release = 0;
-
- if (FLAG_trace_fragmentation &&
- max_evacuation_candidates >= kMaxMaxEvacuationCandidates) {
- PrintF("Hit max page compaction limit of %d pages\n",
- kMaxMaxEvacuationCandidates);
- }
- max_evacuation_candidates =
- Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
-
- std::vector<Candidate> candidates(max_evacuation_candidates);
-
- int count = 0;
- int fragmentation = 0;
- int page_number = 0;
- int least_index = -1;
+ // Pairs of (live_bytes_in_page, page).
+ std::vector<std::pair<int, Page*> > pages;
+ pages.reserve(number_of_pages);
PageIterator it(space);
while (it.has_next()) {
@@ -781,88 +633,107 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
p->ClearFlag(Page::POPULAR_PAGE);
continue;
}
-
// Invariant: Evacuation candidates are just created when marking is
// started. At the end of a GC all evacuation candidates are cleared and
// their slot buffers are released.
CHECK(!p->IsEvacuationCandidate());
CHECK(p->slots_buffer() == NULL);
-
- if (FLAG_stress_compaction) {
- if (FLAG_manual_evacuation_candidates_selection) {
- if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
- p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
- fragmentation = 1;
- }
- } else {
- unsigned int counter = space->heap()->ms_count();
- if ((counter & 1) == (page_number & 1)) fragmentation = 1;
- page_number++;
+ DCHECK(p->area_size() == area_size);
+ int live_bytes =
+ p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
+ pages.push_back(std::make_pair(live_bytes, p));
+ }
+
+ int candidate_count = 0;
+ int total_live_bytes = 0;
+
+ bool reduce_memory =
+ reduce_memory_footprint_ || heap()->HasLowAllocationRate();
+ if (FLAG_manual_evacuation_candidates_selection) {
+ for (size_t i = 0; i < pages.size(); i++) {
+ Page* p = pages[i].second;
+ if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
+ candidate_count++;
+ total_live_bytes += pages[i].first;
+ p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ AddEvacuationCandidate(p);
}
- } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) {
- // Don't try to release too many pages.
- if (estimated_release >= over_reserved) {
- continue;
+ }
+ } else if (FLAG_stress_compaction) {
+ for (size_t i = 0; i < pages.size(); i++) {
+ Page* p = pages[i].second;
+ if (i % 2 == 0) {
+ candidate_count++;
+ total_live_bytes += pages[i].first;
+ AddEvacuationCandidate(p);
}
+ }
+ } else {
+ const int kTargetFragmentationPercent = 50;
+ const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
- intptr_t free_bytes = 0;
-
- if (!p->WasSwept()) {
- free_bytes = (p->area_size() - p->LiveBytes());
- } else {
- PagedSpace::SizeStats sizes;
- space->ObtainFreeListStatistics(p, &sizes);
- free_bytes = sizes.Total();
- }
+ const int kTargetFragmentationPercentForReduceMemory = 20;
+ const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
- int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
+ int max_evacuated_bytes;
+ int target_fragmentation_percent;
- if (free_pct >= kFreenessThreshold) {
- estimated_release += free_bytes;
- fragmentation = free_pct;
- } else {
- fragmentation = 0;
+ if (reduce_memory) {
+ target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
+ max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
+ } else {
+ target_fragmentation_percent = kTargetFragmentationPercent;
+ max_evacuated_bytes = kMaxEvacuatedBytes;
+ }
+ intptr_t free_bytes_threshold =
+ target_fragmentation_percent * (area_size / 100);
+
+ // Sort pages from the most free to the least free, then select
+ // the first n pages for evacuation such that:
+ // - the total size of evacuated objects does not exceed the specified
+ // limit.
+ // - fragmentation of (n+1)-th page does not exceed the specified limit.
+ std::sort(pages.begin(), pages.end());
+ for (size_t i = 0; i < pages.size(); i++) {
+ int live_bytes = pages[i].first;
+ int free_bytes = area_size - live_bytes;
+ if (FLAG_always_compact ||
+ (free_bytes >= free_bytes_threshold &&
+ total_live_bytes + live_bytes <= max_evacuated_bytes)) {
+ candidate_count++;
+ total_live_bytes += live_bytes;
}
-
if (FLAG_trace_fragmentation_verbose) {
- PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()),
- static_cast<int>(free_bytes),
- static_cast<double>(free_bytes * 100) / p->area_size(),
- (fragmentation > 0) ? "[fragmented]" : "");
+ PrintF(
+ "Page in %s: %d KB free [fragmented if this >= %d KB], "
+ "sum of live bytes in fragmented pages %d KB [max is %d KB]\n",
+ AllocationSpaceName(space->identity()),
+ static_cast<int>(free_bytes / KB),
+ static_cast<int>(free_bytes_threshold / KB),
+ static_cast<int>(total_live_bytes / KB),
+ static_cast<int>(max_evacuated_bytes / KB));
}
- } else {
- fragmentation = FreeListFragmentation(space, p);
}
-
- if (fragmentation != 0) {
- if (count < max_evacuation_candidates) {
- candidates[count++] = Candidate(fragmentation, p);
- } else {
- if (least_index == -1) {
- for (int i = 0; i < max_evacuation_candidates; i++) {
- if (least_index == -1 ||
- candidates[i].fragmentation() <
- candidates[least_index].fragmentation()) {
- least_index = i;
- }
- }
- }
- if (candidates[least_index].fragmentation() < fragmentation) {
- candidates[least_index] = Candidate(fragmentation, p);
- least_index = -1;
- }
- }
+ // How many pages we will allocated for the evacuated objects
+ // in the worst case: ceil(total_live_bytes / area_size)
+ int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
+ DCHECK_LE(estimated_new_pages, candidate_count);
+ int estimated_released_pages = candidate_count - estimated_new_pages;
+ // Avoid (compact -> expand) cycles.
+ if (estimated_released_pages == 0 && !FLAG_always_compact)
+ candidate_count = 0;
+ for (int i = 0; i < candidate_count; i++) {
+ AddEvacuationCandidate(pages[i].second);
}
}
- for (int i = 0; i < count; i++) {
- AddEvacuationCandidate(candidates[i].page());
- }
-
- if (count > 0 && FLAG_trace_fragmentation) {
- PrintF("Collected %d evacuation candidates for space %s\n", count,
- AllocationSpaceName(space->identity()));
+ if (FLAG_trace_fragmentation) {
+ PrintF(
+ "Collected %d evacuation candidates [%d KB live] for space %s "
+ "[mode %s]\n",
+ candidate_count, static_cast<int>(total_live_bytes / KB),
+ AllocationSpaceName(space->identity()),
+ (reduce_memory ? "reduce memory footprint" : "normal"));
}
}
@@ -878,7 +749,6 @@ void MarkCompactCollector::AbortCompaction() {
}
compacting_ = false;
evacuation_candidates_.Rewind(0);
- invalidated_code_.Rewind(0);
}
DCHECK_EQ(0, evacuation_candidates_.length());
}
@@ -998,6 +868,10 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
shared->ShortPrint();
PrintF(" - age: %d]\n", code->GetAge());
}
+ // Always flush the optimized code map if there is one.
+ if (!shared->optimized_code_map()->IsSmi()) {
+ shared->ClearOptimizedCodeMap();
+ }
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
} else {
@@ -1041,6 +915,10 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
candidate->ShortPrint();
PrintF(" - age: %d]\n", code->GetAge());
}
+ // Always flush the optimized code map if there is one.
+ if (!candidate->optimized_code_map()->IsSmi()) {
+ candidate->ClearOptimizedCodeMap();
+ }
candidate->set_code(lazy_compile);
}
@@ -1066,29 +944,60 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
next_holder = GetNextCodeMap(holder);
ClearNextCodeMap(holder);
+ // Process context-dependent entries in the optimized code map.
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
int new_length = SharedFunctionInfo::kEntriesStart;
int old_length = code_map->length();
for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
i += SharedFunctionInfo::kEntryLength) {
+ // Each entry contains [ context, code, literals, ast-id ] as fields.
+ STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
+ Context* context =
+ Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
Code* code =
Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+ FixedArray* literals = FixedArray::cast(
+ code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
+ Smi* ast_id =
+ Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
+ if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
- // Move every slot in the entry.
- for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
- int dst_index = new_length++;
- Object** slot = code_map->RawFieldOfElementAt(dst_index);
- Object* object = code_map->get(i + j);
- code_map->set(dst_index, object);
- if (j == SharedFunctionInfo::kOsrAstIdOffset) {
- DCHECK(object->IsSmi());
- } else {
- DCHECK(
- Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
- isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
- *slot);
- }
+ if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
+ // Move every slot in the entry and record slots when needed.
+ code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
+ code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
+ code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
+ code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
+ Object** code_slot = code_map->RawFieldOfElementAt(
+ new_length + SharedFunctionInfo::kCachedCodeOffset);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(
+ code_slot, code_slot, *code_slot);
+ Object** context_slot = code_map->RawFieldOfElementAt(
+ new_length + SharedFunctionInfo::kContextOffset);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(
+ context_slot, context_slot, *context_slot);
+ Object** literals_slot = code_map->RawFieldOfElementAt(
+ new_length + SharedFunctionInfo::kLiteralsOffset);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(
+ literals_slot, literals_slot, *literals_slot);
+ new_length += SharedFunctionInfo::kEntryLength;
+ }
+
+ // Process context-independent entry in the optimized code map.
+ Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
+ if (shared_object->IsCode()) {
+ Code* shared_code = Code::cast(shared_object);
+ if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
+ code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
+ } else {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
+ Object** slot =
+ code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
+ *slot);
}
}
@@ -1175,11 +1084,12 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
- ->get(SharedFunctionInfo::kNextMapIndex)
- ->IsUndefined());
+ FixedArray* code_map =
+ FixedArray::cast(code_map_holder->optimized_code_map());
+ DCHECK(!code_map->get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
// Make sure previous flushing decisions are revisited.
+ isolate_->heap()->incremental_marking()->RecordWrites(code_map);
isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
if (FLAG_trace_code_flushing) {
@@ -1690,11 +1600,6 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
void MarkCompactCollector::PrepareForCodeFlushing() {
- // Enable code flushing for non-incremental cycles.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(!was_marked_incrementally_);
- }
-
// If code flushing is disabled, there is no need to prepare for it.
if (!is_code_flushing_enabled()) return;
@@ -1941,16 +1846,8 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
continue;
}
- AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
- if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = new_space->AllocateRawDoubleAligned(size);
- } else {
- allocation = new_space->AllocateRaw(size);
- }
-#else
- allocation = new_space->AllocateRaw(size);
-#endif
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation = new_space->AllocateRaw(size, alignment);
if (allocation.IsRetry()) {
if (!new_space->AddFreshPage()) {
// Shouldn't happen. We are sweeping linearly, and to-space
@@ -1958,15 +1855,7 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
// always room.
UNREACHABLE();
}
-#ifndef V8_HOST_ARCH_64_BIT
- if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = new_space->AllocateRawDoubleAligned(size);
- } else {
- allocation = new_space->AllocateRaw(size);
- }
-#else
- allocation = new_space->AllocateRaw(size);
-#endif
+ allocation = new_space->AllocateRaw(size, alignment);
DCHECK(!allocation.IsRetry());
}
Object* target = allocation.ToObjectChecked();
@@ -2258,41 +2147,46 @@ void MarkCompactCollector::RetainMaps() {
}
-void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize(
- size_t max_size) {
+void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
+ DCHECK(!marking_deque_.in_use());
+ if (marking_deque_memory_ == NULL) {
+ marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
+ marking_deque_memory_committed_ = 0;
+ }
+ if (marking_deque_memory_ == NULL) {
+ V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
+ }
+}
+
+
+void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
// If the marking deque is too small, we try to allocate a bigger one.
// If that fails, make do with a smaller one.
- for (size_t size = max_size; size >= 256 * KB; size >>= 1) {
+ CHECK(!marking_deque_.in_use());
+ for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
base::VirtualMemory* memory = marking_deque_memory_;
- bool is_committed = marking_deque_memory_committed_;
+ size_t currently_committed = marking_deque_memory_committed_;
- if (memory == NULL || memory->size() < size) {
- // If we don't have memory or we only have small memory, then
- // try to reserve a new one.
- memory = new base::VirtualMemory(size);
- is_committed = false;
- }
- if (is_committed) return;
- if (memory->IsReserved() &&
- memory->Commit(reinterpret_cast<Address>(memory->address()),
- memory->size(),
- false)) { // Not executable.
- if (marking_deque_memory_ != NULL && marking_deque_memory_ != memory) {
- delete marking_deque_memory_;
+ if (currently_committed == size) return;
+
+ if (currently_committed > size) {
+ bool success = marking_deque_memory_->Uncommit(
+ reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
+ currently_committed - size);
+ if (success) {
+ marking_deque_memory_committed_ = size;
+ return;
}
- marking_deque_memory_ = memory;
- marking_deque_memory_committed_ = true;
- InitializeMarkingDeque();
+ UNREACHABLE();
+ }
+
+ bool success = memory->Commit(
+ reinterpret_cast<Address>(memory->address()) + currently_committed,
+ size - currently_committed,
+ false); // Not executable.
+ if (success) {
+ marking_deque_memory_committed_ = size;
return;
- } else {
- // Commit failed, so we are under memory pressure. If this was the
- // previously reserved area we tried to commit, then remove references
- // to it before deleting it and unreserving it.
- if (marking_deque_memory_ == memory) {
- marking_deque_memory_ = NULL;
- marking_deque_memory_committed_ = false;
- }
- delete memory; // Will also unreserve the virtual allocation.
}
}
V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
@@ -2300,23 +2194,37 @@ void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize(
void MarkCompactCollector::InitializeMarkingDeque() {
- if (marking_deque_memory_committed_) {
- Address addr = static_cast<Address>(marking_deque_memory_->address());
- size_t size = marking_deque_memory_->size();
- if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
- marking_deque_.Initialize(addr, addr + size);
- }
+ DCHECK(!marking_deque_.in_use());
+ DCHECK(marking_deque_memory_committed_ > 0);
+ Address addr = static_cast<Address>(marking_deque_memory_->address());
+ size_t size = marking_deque_memory_committed_;
+ if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+ marking_deque_.Initialize(addr, addr + size);
}
-void MarkCompactCollector::UncommitMarkingDeque() {
- if (marking_deque_memory_committed_) {
- bool success = marking_deque_memory_->Uncommit(
- reinterpret_cast<Address>(marking_deque_memory_->address()),
- marking_deque_memory_->size());
- CHECK(success);
- marking_deque_memory_committed_ = false;
+void MarkingDeque::Initialize(Address low, Address high) {
+ DCHECK(!in_use_);
+ HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+ HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+ array_ = obj_low;
+ mask_ = base::bits::RoundDownToPowerOfTwo32(
+ static_cast<uint32_t>(obj_high - obj_low)) -
+ 1;
+ top_ = bottom_ = 0;
+ overflowed_ = false;
+ in_use_ = true;
+}
+
+
+void MarkingDeque::Uninitialize(bool aborting) {
+ if (!aborting) {
+ DCHECK(IsEmpty());
+ DCHECK(!overflowed_);
}
+ DCHECK(in_use_);
+ top_ = bottom_ = 0xdecbad;
+ in_use_ = false;
}
@@ -2337,7 +2245,9 @@ void MarkCompactCollector::MarkLiveObjects() {
} else {
// Abort any pending incremental activities e.g. incremental sweeping.
incremental_marking->Abort();
- InitializeMarkingDeque();
+ if (marking_deque_.in_use()) {
+ marking_deque_.Uninitialize(true);
+ }
}
#ifdef DEBUG
@@ -2345,7 +2255,8 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
- EnsureMarkingDequeIsCommittedAndInitialize();
+ EnsureMarkingDequeIsCommittedAndInitialize(
+ MarkCompactCollector::kMaxMarkingDequeSize);
PrepareForCodeFlushing();
@@ -2420,11 +2331,6 @@ void MarkCompactCollector::AfterMarking() {
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
code_flusher_->ProcessCandidates();
- // If incremental marker does not support code flushing, we need to
- // disable it before incremental marking steps for next cycle.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(false);
- }
}
if (FLAG_track_gc_object_stats) {
@@ -2489,11 +2395,13 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
const int header = TransitionArray::kProtoTransitionHeaderSize;
int new_number_of_transitions = 0;
for (int i = 0; i < number_of_transitions; i++) {
- Object* cached_map = prototype_transitions->get(header + i);
- if (IsMarked(cached_map)) {
+ Object* cell = prototype_transitions->get(header + i);
+ if (!WeakCell::cast(cell)->cleared()) {
if (new_number_of_transitions != i) {
- prototype_transitions->set(header + new_number_of_transitions,
- cached_map, SKIP_WRITE_BARRIER);
+ prototype_transitions->set(header + new_number_of_transitions, cell);
+ Object** slot = prototype_transitions->RawFieldOfElementAt(
+ header + new_number_of_transitions);
+ RecordSlot(slot, slot, cell);
}
new_number_of_transitions++;
}
@@ -2729,7 +2637,6 @@ void MarkCompactCollector::AbortWeakCollections() {
void MarkCompactCollector::ProcessAndClearWeakCells() {
- HeapObject* undefined = heap()->undefined_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
@@ -2764,19 +2671,18 @@ void MarkCompactCollector::ProcessAndClearWeakCells() {
RecordSlot(slot, slot, *slot);
}
weak_cell_obj = weak_cell->next();
- weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
+ weak_cell->clear_next(heap());
}
heap()->set_encountered_weak_cells(Smi::FromInt(0));
}
void MarkCompactCollector::AbortWeakCells() {
- Object* undefined = heap()->undefined_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
weak_cell_obj = weak_cell->next();
- weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
+ weak_cell->clear_next(heap());
}
heap()->set_encountered_weak_cells(Smi::FromInt(0));
}
@@ -2814,37 +2720,23 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
DCHECK(heap()->AllowedToBeMigrated(src, dest));
DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_SPACE) {
- Address src_slot = src_addr;
- Address dst_slot = dst_addr;
DCHECK(IsAligned(size, kPointerSize));
+ switch (src->ContentType()) {
+ case HeapObjectContents::kTaggedValues:
+ MigrateObjectTagged(dst, src, size);
+ break;
- bool may_contain_raw_values = src->MayContainRawValues();
-#if V8_DOUBLE_FIELDS_UNBOXING
- LayoutDescriptorHelper helper(src->map());
- bool has_only_tagged_fields = helper.all_fields_tagged();
-#endif
- for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
- Object* value = Memory::Object_at(src_slot);
-
- Memory::Object_at(dst_slot) = value;
-
-#if V8_DOUBLE_FIELDS_UNBOXING
- if (!may_contain_raw_values &&
- (has_only_tagged_fields ||
- helper.IsTagged(static_cast<int>(src_slot - src_addr))))
-#else
- if (!may_contain_raw_values)
-#endif
- {
- RecordMigratedSlot(value, dst_slot);
- }
+ case HeapObjectContents::kMixedValues:
+ MigrateObjectMixed(dst, src, size);
+ break;
- src_slot += kPointerSize;
- dst_slot += kPointerSize;
+ case HeapObjectContents::kRawValues:
+ MigrateObjectRaw(dst, src, size);
+ break;
}
if (compacting_ && dst->IsJSFunction()) {
- Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
+ Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset;
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
@@ -2852,30 +2744,6 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
- } else if (dst->IsConstantPoolArray()) {
- // We special case ConstantPoolArrays since they could contain integers
- // value entries which look like tagged pointers.
- // TODO(mstarzinger): restructure this code to avoid this special-casing.
- ConstantPoolArray* array = ConstantPoolArray::cast(dst);
- ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
- while (!code_iter.is_finished()) {
- Address code_entry_slot =
- dst_addr + array->OffsetOfElementAt(code_iter.next_index());
- Address code_entry = Memory::Address_at(code_entry_slot);
-
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
- SlotsBuffer::IGNORE_OVERFLOW);
- }
- }
- ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
- while (!heap_iter.is_finished()) {
- Address heap_slot =
- dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
- Object* value = Memory::Object_at(heap_slot);
- RecordMigratedSlot(value, heap_slot);
- }
}
} else if (dest == CODE_SPACE) {
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
@@ -2893,6 +2761,59 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
}
+void MarkCompactCollector::MigrateObjectTagged(HeapObject* dst, HeapObject* src,
+ int size) {
+ Address src_slot = src->address();
+ Address dst_slot = dst->address();
+ for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+ Object* value = Memory::Object_at(src_slot);
+ Memory::Object_at(dst_slot) = value;
+ RecordMigratedSlot(value, dst_slot);
+ src_slot += kPointerSize;
+ dst_slot += kPointerSize;
+ }
+}
+
+
+void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
+ int size) {
+ if (src->IsFixedTypedArrayBase()) {
+ heap()->MoveBlock(dst->address(), src->address(), size);
+ Address base_pointer_slot =
+ dst->address() + FixedTypedArrayBase::kBasePointerOffset;
+ RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot);
+ } else if (FLAG_unbox_double_fields) {
+ Address dst_addr = dst->address();
+ Address src_addr = src->address();
+ Address src_slot = src_addr;
+ Address dst_slot = dst_addr;
+
+ LayoutDescriptorHelper helper(src->map());
+ DCHECK(!helper.all_fields_tagged());
+ for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+ Object* value = Memory::Object_at(src_slot);
+
+ Memory::Object_at(dst_slot) = value;
+
+ if (helper.IsTagged(static_cast<int>(src_slot - src_addr))) {
+ RecordMigratedSlot(value, dst_slot);
+ }
+
+ src_slot += kPointerSize;
+ dst_slot += kPointerSize;
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void MarkCompactCollector::MigrateObjectRaw(HeapObject* dst, HeapObject* src,
+ int size) {
+ heap()->MoveBlock(dst->address(), src->address(), size);
+}
+
+
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor {
@@ -3119,16 +3040,8 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
OldSpace* old_space = heap()->old_space();
HeapObject* target;
- AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
- if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = old_space->AllocateRawDoubleAligned(object_size);
- } else {
- allocation = old_space->AllocateRaw(object_size);
- }
-#else
- allocation = old_space->AllocateRaw(object_size);
-#endif
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
if (allocation.To(&target)) {
MigrateObject(target, object, object_size, old_space->identity());
// If we end up needing more special cases, we should factor this out.
@@ -3145,11 +3058,18 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
HeapObject** out_object) {
- // This function does not support large objects right now.
Space* owner = p->owner();
if (owner == heap_->lo_space() || owner == NULL) {
- *out_object = NULL;
- return true;
+ Object* large_object = heap_->lo_space()->FindObject(slot);
+ // This object has to exist, otherwise we would not have recorded a slot
+ // for it.
+ CHECK(large_object->IsHeapObject());
+ HeapObject* large_heap_object = HeapObject::cast(large_object);
+ if (IsMarked(large_heap_object)) {
+ *out_object = large_heap_object;
+ return true;
+ }
+ return false;
}
uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
@@ -3266,25 +3186,37 @@ bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
return false;
}
-#if V8_DOUBLE_FIELDS_UNBOXING
- // |object| is NULL only when the slot belongs to large object space.
- DCHECK(object != NULL ||
- Page::FromAnyPointerAddress(heap_, slot)->owner() ==
- heap_->lo_space());
- // We don't need to check large objects' layout descriptor since it can't
- // contain in-object fields anyway.
- if (object != NULL) {
- // Filter out slots that happens to point to unboxed double fields.
- LayoutDescriptorHelper helper(object->map());
- bool has_only_tagged_fields = helper.all_fields_tagged();
- if (!has_only_tagged_fields &&
- !helper.IsTagged(static_cast<int>(slot - object->address()))) {
- return false;
- }
- }
-#endif
+ DCHECK(object != NULL);
- return true;
+ switch (object->ContentType()) {
+ case HeapObjectContents::kTaggedValues:
+ return true;
+
+ case HeapObjectContents::kRawValues: {
+ InstanceType type = object->map()->instance_type();
+ // Slots in maps and code can't be invalid because they are never
+ // shrunk.
+ if (type == MAP_TYPE || type == CODE_TYPE) return true;
+
+ // Consider slots in objects that contain ONLY raw data as invalid.
+ return false;
+ }
+
+ case HeapObjectContents::kMixedValues: {
+ if (object->IsFixedTypedArrayBase()) {
+ return static_cast<int>(slot - object->address()) ==
+ FixedTypedArrayBase::kBasePointerOffset;
+ } else if (FLAG_unbox_double_fields) {
+ // Filter out slots that happen to point to unboxed double fields.
+ LayoutDescriptorHelper helper(object->map());
+ DCHECK(!helper.all_fields_tagged());
+ return helper.IsTagged(static_cast<int>(slot - object->address()));
+ }
+ break;
+ }
+ }
+ UNREACHABLE();
+ return true;
}
@@ -3300,6 +3232,23 @@ void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
}
+void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
+ Address end_slot) {
+ // Remove entries by replacing them with an old-space slot containing a smi
+ // that is located in an unmovable page.
+ int npages = evacuation_candidates_.length();
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ DCHECK(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ if (p->IsEvacuationCandidate()) {
+ SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
+ end_slot);
+ }
+ }
+}
+
+
void MarkCompactCollector::EvacuateNewSpace() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
@@ -3355,14 +3304,14 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
int size = object->Size();
-
+ AllocationAlignment alignment = object->RequiredAlignment();
HeapObject* target_object;
- AllocationResult allocation = space->AllocateRaw(size);
+ AllocationResult allocation = space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
// If allocation failed, use emergency memory and re-try allocation.
CHECK(space->HasEmergencyMemory());
space->UseEmergencyMemory();
- allocation = space->AllocateRaw(size);
+ allocation = space->AllocateRaw(size, alignment);
}
if (!allocation.To(&target_object)) {
// OS refused to give us memory.
@@ -3496,6 +3445,10 @@ static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
rinfo.Visit(isolate, v);
break;
}
+ case SlotsBuffer::OBJECT_SLOT: {
+ v->VisitPointer(reinterpret_cast<Object**>(addr));
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -3612,121 +3565,18 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
}
-static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
- Page* p = Page::FromAddress(code->address());
-
- if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- return false;
- }
-
- Address code_start = code->address();
- Address code_end = code_start + code->Size();
-
- uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
- uint32_t end_index =
- MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
-
- // TODO(hpayer): Filter out invalidated code in
- // ClearInvalidSlotsBufferEntries.
- Bitmap* b = p->markbits();
-
- MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
- MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
-
- if (value) {
- Marking::SetAllMarkBitsInRange(start_mark_bit, end_mark_bit);
- } else {
- Marking::ClearAllMarkBitsOfCellsContainedInRange(start_mark_bit,
- end_mark_bit);
- }
-
- return true;
-}
-
-
-static bool IsOnInvalidatedCodeObject(Address addr) {
- // We did not record any slots in large objects thus
- // we can safely go to the page from the slot address.
- Page* p = Page::FromAddress(addr);
-
- // First check owner's identity because old space is swept concurrently or
- // lazily and might still have non-zero mark-bits on some pages.
- if (p->owner()->identity() != CODE_SPACE) return false;
-
- // In code space only bits on evacuation candidates (but we don't record
- // any slots on them) and under invalidated code objects are non-zero.
- MarkBit mark_bit =
- p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
-
- return Marking::IsBlackOrGrey(mark_bit);
-}
-
-
-void MarkCompactCollector::InvalidateCode(Code* code) {
- if (heap_->incremental_marking()->IsCompacting() &&
- !ShouldSkipEvacuationSlotRecording(code)) {
- DCHECK(compacting_);
-
- // If the object is white than no slots were recorded on it yet.
- MarkBit mark_bit = Marking::MarkBitFrom(code);
- if (Marking::IsWhite(mark_bit)) return;
-
- invalidated_code_.Add(code);
- }
-}
-
-
// Return true if the given code is deoptimized or will be deoptimized.
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
return code->is_optimized_code() && code->marked_for_deoptimization();
}
-bool MarkCompactCollector::MarkInvalidatedCode() {
- bool code_marked = false;
-
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
-
- if (SetMarkBitsUnderInvalidatedCode(code, true)) {
- code_marked = true;
- }
- }
-
- return code_marked;
-}
-
-
-void MarkCompactCollector::RemoveDeadInvalidatedCode() {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
- }
-}
-
-
-void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
- if (code != NULL) {
- code->Iterate(visitor);
- SetMarkBitsUnderInvalidatedCode(code, false);
- }
- }
- invalidated_code_.Rewind(0);
-}
-
-
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Heap::RelocationLock relocation_lock(heap());
- bool code_slots_filtering_required;
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_NEWSPACE);
- code_slots_filtering_required = MarkInvalidatedCode();
EvacuationScope evacuation_scope(this);
EvacuateNewSpace();
}
@@ -3773,8 +3623,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
- code_slots_filtering_required);
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_);
if (FLAG_trace_fragmentation_verbose) {
PrintF(" migration slots buffer: %d\n",
SlotsBuffer::SizeOfChain(migration_slots_buffer_));
@@ -3808,8 +3657,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
- code_slots_filtering_required);
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer());
if (FLAG_trace_fragmentation_verbose) {
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
@@ -3865,10 +3713,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuationWeakObjectRetainer evacuation_object_retainer;
heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
- // Visit invalidated code (we ignored all slots on it) and clear mark-bits
- // under it.
- ProcessInvalidatedCode(&updating_visitor);
-
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
@@ -4458,8 +4302,6 @@ void MarkCompactCollector::SweepSpaces() {
StartSweeperThreads();
}
}
- RemoveDeadInvalidatedCode();
-
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE);
@@ -4602,12 +4444,10 @@ void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
ObjectSlot slot = slots[slot_idx];
if (!IsTypedSlot(slot)) {
Object* object = *slot;
- if (object->IsHeapObject()) {
- if (heap->InNewSpace(object) ||
- !heap->mark_compact_collector()->IsSlotInLiveObject(
- reinterpret_cast<Address>(slot))) {
- slots[slot_idx] = kRemovedEntry;
- }
+ if ((object->IsHeapObject() && heap->InNewSpace(object)) ||
+ !heap->mark_compact_collector()->IsSlotInLiveObject(
+ reinterpret_cast<Address>(slot))) {
+ slots[slot_idx] = kRemovedEntry;
}
} else {
++slot_idx;
@@ -4619,6 +4459,41 @@ void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
}
+void SlotsBuffer::RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
+ Address start_slot, Address end_slot) {
+ // Remove entries by replacing them with an old-space slot containing a smi
+ // that is located in an unmovable page.
+ const ObjectSlot kRemovedEntry = HeapObject::RawField(
+ heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
+ DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
+ ->NeverEvacuate());
+
+ while (buffer != NULL) {
+ SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+ intptr_t slots_count = buffer->idx_;
+ bool is_typed_slot = false;
+
+ for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+ ObjectSlot slot = slots[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ Address slot_address = reinterpret_cast<Address>(slot);
+ if (slot_address >= start_slot && slot_address < end_slot) {
+ slots[slot_idx] = kRemovedEntry;
+ if (is_typed_slot) {
+ slots[slot_idx - 1] = kRemovedEntry;
+ }
+ }
+ is_typed_slot = false;
+ } else {
+ is_typed_slot = true;
+ DCHECK(slot_idx < slots_count);
+ }
+ }
+ buffer = buffer->next();
+ }
+}
+
+
void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
while (buffer != NULL) {
SlotsBuffer::ObjectSlot* slots = buffer->slots_;
@@ -4629,9 +4504,10 @@ void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
if (!IsTypedSlot(slot)) {
Object* object = *slot;
if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
CHECK(!heap->InNewSpace(object));
- CHECK(heap->mark_compact_collector()->IsSlotInLiveObject(
- reinterpret_cast<Address>(slot)));
+ heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
+ reinterpret_cast<Address>(slot), heap_object);
}
} else {
++slot_idx;
@@ -4666,9 +4542,20 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
if (target_page->IsEvacuationCandidate() &&
(rinfo->host() == NULL ||
!ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+ Address addr = rinfo->pc();
+ SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
+ if (rinfo->IsInConstantPool()) {
+ addr = rinfo->constant_pool_entry_address();
+ if (RelocInfo::IsCodeTarget(rmode)) {
+ slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
+ } else {
+ DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+ slot_type = SlotsBuffer::OBJECT_SLOT;
+ }
+ }
bool success = SlotsBuffer::AddTo(
&slots_buffer_allocator_, target_page->slots_buffer_address(),
- SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
+ slot_type, addr, SlotsBuffer::FAIL_ON_OVERFLOW);
if (!success) {
EvictPopularEvacuationCandidate(target_page);
}
@@ -4751,28 +4638,6 @@ void SlotsBuffer::UpdateSlots(Heap* heap) {
}
-void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
- PointersUpdatingVisitor v(heap);
-
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- }
- } else {
- ++slot_idx;
- DCHECK(slot_idx < idx_);
- Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
- if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
- }
- }
-}
-
-
SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
return new SlotsBuffer(next_buffer);
}
@@ -4792,5 +4657,5 @@ void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
}
*buffer_address = NULL;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 322965decd..9892e0e42c 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -116,10 +116,6 @@ class Marking {
markbit.Next().Set();
}
- static void SetAllMarkBitsInRange(MarkBit start, MarkBit end);
- static void ClearAllMarkBitsOfCellsContainedInRange(MarkBit start,
- MarkBit end);
-
void TransferMark(Address old_start, Address new_start);
#ifdef DEBUG
@@ -182,18 +178,15 @@ class Marking {
class MarkingDeque {
public:
MarkingDeque()
- : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
+ : array_(NULL),
+ top_(0),
+ bottom_(0),
+ mask_(0),
+ overflowed_(false),
+ in_use_(false) {}
- void Initialize(Address low, Address high) {
- HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
- HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
- array_ = obj_low;
- mask_ = base::bits::RoundDownToPowerOfTwo32(
- static_cast<uint32_t>(obj_high - obj_low)) -
- 1;
- top_ = bottom_ = 0;
- overflowed_ = false;
- }
+ void Initialize(Address low, Address high);
+ void Uninitialize(bool aborting = false);
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
@@ -201,6 +194,8 @@ class MarkingDeque {
bool overflowed() const { return overflowed_; }
+ bool in_use() const { return in_use_; }
+
void ClearOverflowed() { overflowed_ = false; }
void SetOverflowed() { overflowed_ = true; }
@@ -210,8 +205,6 @@ class MarkingDeque {
// heap.
INLINE(void PushBlack(HeapObject* object)) {
DCHECK(object->IsHeapObject());
- // TODO(jochen): Remove again before we branch for 4.2.
- CHECK(object->IsHeapObject() && object->map()->IsMap());
if (IsFull()) {
Marking::BlackToGrey(object);
MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
@@ -224,8 +217,6 @@ class MarkingDeque {
INLINE(void PushGrey(HeapObject* object)) {
DCHECK(object->IsHeapObject());
- // TODO(jochen): Remove again before we branch for 4.2.
- CHECK(object->IsHeapObject() && object->map()->IsMap());
if (IsFull()) {
SetOverflowed();
} else {
@@ -252,6 +243,19 @@ class MarkingDeque {
}
}
+ INLINE(void UnshiftBlack(HeapObject* object)) {
+ DCHECK(object->IsHeapObject());
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ if (IsFull()) {
+ Marking::BlackToGrey(object);
+ MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
+ SetOverflowed();
+ } else {
+ bottom_ = ((bottom_ - 1) & mask_);
+ array_[bottom_] = object;
+ }
+ }
+
HeapObject** array() { return array_; }
int bottom() { return bottom_; }
int top() { return top_; }
@@ -267,6 +271,7 @@ class MarkingDeque {
int bottom_;
int mask_;
bool overflowed_;
+ bool in_use_;
DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
};
@@ -316,8 +321,15 @@ class SlotsBuffer {
slots_[idx_++] = slot;
}
+ // Should be used for testing only.
+ ObjectSlot Get(intptr_t i) {
+ DCHECK(i >= 0 && i < kNumberOfElements);
+ return slots_[i];
+ }
+
enum SlotType {
EMBEDDED_OBJECT_SLOT,
+ OBJECT_SLOT,
RELOCATED_CODE_OBJECT,
CELL_TARGET_SLOT,
CODE_TARGET_SLOT,
@@ -331,6 +343,8 @@ class SlotsBuffer {
switch (type) {
case EMBEDDED_OBJECT_SLOT:
return "EMBEDDED_OBJECT_SLOT";
+ case OBJECT_SLOT:
+ return "OBJECT_SLOT";
case RELOCATED_CODE_OBJECT:
return "RELOCATED_CODE_OBJECT";
case CELL_TARGET_SLOT:
@@ -351,8 +365,6 @@ class SlotsBuffer {
void UpdateSlots(Heap* heap);
- void UpdateSlotsWithFilter(Heap* heap);
-
SlotsBuffer* next() { return next_; }
static int SizeOfChain(SlotsBuffer* buffer) {
@@ -365,14 +377,9 @@ class SlotsBuffer {
inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
- static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
- bool code_slots_filtering_required) {
+ static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer) {
while (buffer != NULL) {
- if (code_slots_filtering_required) {
- buffer->UpdateSlotsWithFilter(heap);
- } else {
- buffer->UpdateSlots(heap);
- }
+ buffer->UpdateSlots(heap);
buffer = buffer->next();
}
}
@@ -411,6 +418,10 @@ class SlotsBuffer {
// before sweeping when mark bits are still intact.
static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
+ // Eliminate all slots that are within the given address range.
+ static void RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
+ Address start_slot, Address end_slot);
+
// Ensures that there are no invalid slots in the chain of slots buffers.
static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
@@ -504,7 +515,8 @@ class CodeFlusher {
static void SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate) {
- candidate->set_next_function_link(next_candidate);
+ candidate->set_next_function_link(next_candidate,
+ UPDATE_WEAK_WRITE_BARRIER);
}
static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
@@ -651,9 +663,11 @@ class MarkCompactCollector {
void MigrateObject(HeapObject* dst, HeapObject* src, int size,
AllocationSpace to_old_space);
- bool TryPromoteObject(HeapObject* object, int object_size);
+ void MigrateObjectTagged(HeapObject* dst, HeapObject* src, int size);
+ void MigrateObjectMixed(HeapObject* dst, HeapObject* src, int size);
+ void MigrateObjectRaw(HeapObject* dst, HeapObject* src, int size);
- void InvalidateCode(Code* code);
+ bool TryPromoteObject(HeapObject* object, int object_size);
void ClearMarkbits();
@@ -704,11 +718,20 @@ class MarkCompactCollector {
MarkingDeque* marking_deque() { return &marking_deque_; }
- void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size = 4 * MB);
+ static const size_t kMaxMarkingDequeSize = 4 * MB;
+ static const size_t kMinMarkingDequeSize = 256 * KB;
- void InitializeMarkingDeque();
+ void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
+ if (!marking_deque_.in_use()) {
+ EnsureMarkingDequeIsCommitted(max_size);
+ InitializeMarkingDeque();
+ }
+ }
- void UncommitMarkingDeque();
+ void EnsureMarkingDequeIsCommitted(size_t max_size);
+ void EnsureMarkingDequeIsReserved();
+
+ void InitializeMarkingDeque();
// The following four methods can just be called after marking, when the
// whole transitive closure is known. They must be called before sweeping
@@ -718,16 +741,17 @@ class MarkCompactCollector {
bool IsSlotInLiveObject(Address slot);
void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
+ // Removes all the slots in the slot buffers that are within the given
+ // address range.
+ void RemoveObjectSlots(Address start_slot, Address end_slot);
+
private:
class SweeperTask;
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector();
- bool MarkInvalidatedCode();
bool WillBeDeoptimized(Code* code);
- void RemoveDeadInvalidatedCode();
- void ProcessInvalidatedCode(ObjectVisitor* visitor);
void EvictPopularEvacuationCandidate(Page* page);
void ClearInvalidSlotsBufferEntries(PagedSpace* space);
void ClearInvalidStoreAndSlotsBufferEntries();
@@ -938,13 +962,12 @@ class MarkCompactCollector {
Heap* heap_;
base::VirtualMemory* marking_deque_memory_;
- bool marking_deque_memory_committed_;
+ size_t marking_deque_memory_committed_;
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
- List<Code*> invalidated_code_;
SmartPointer<FreeList> free_list_old_space_;
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
new file mode 100644
index 0000000000..e226f09a04
--- /dev/null
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -0,0 +1,241 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/memory-reducer.h"
+
+#include "src/flags.h"
+#include "src/heap/heap.h"
+#include "src/utils.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+const int MemoryReducer::kLongDelayMs = 5000;
+const int MemoryReducer::kShortDelayMs = 500;
+const int MemoryReducer::kWatchdogDelayMs = 100000;
+const int MemoryReducer::kMaxNumberOfGCs = 3;
+
+
+void MemoryReducer::TimerTask::Run() {
+ if (heap_is_torn_down_) return;
+ Heap* heap = memory_reducer_->heap();
+ Event event;
+ double time_ms = heap->MonotonicallyIncreasingTimeInMs();
+ heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
+ heap->OldGenerationAllocationCounter());
+ event.type = kTimer;
+ event.time_ms = time_ms;
+ event.low_allocation_rate = heap->HasLowAllocationRate();
+ event.can_start_incremental_gc =
+ heap->incremental_marking()->IsStopped() &&
+ heap->incremental_marking()->CanBeActivated();
+ memory_reducer_->NotifyTimer(event);
+}
+
+
+void MemoryReducer::NotifyTimer(const Event& event) {
+ DCHECK(nullptr != pending_task_);
+ DCHECK_EQ(kTimer, event.type);
+ DCHECK_EQ(kWait, state_.action);
+ pending_task_ = nullptr;
+ state_ = Step(state_, event);
+ if (state_.action == kRun) {
+ DCHECK(heap()->incremental_marking()->IsStopped());
+ DCHECK(FLAG_incremental_marking);
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
+ state_.started_gcs);
+ }
+ if (heap()->ShouldOptimizeForMemoryUsage()) {
+ // Do full GC if memory usage has higher priority than latency. This is
+ // important for background tabs that do not send idle notifications.
+ heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
+ "memory reducer");
+ } else {
+ heap()->StartIdleIncrementalMarking();
+ }
+ } else if (state_.action == kWait) {
+ if (!heap()->incremental_marking()->IsStopped() &&
+ heap()->ShouldOptimizeForMemoryUsage()) {
+ // Make progress with pending incremental marking if memory usage has
+ // higher priority than latency. This is important for background tabs
+ // that do not send idle notifications.
+ const int kIncrementalMarkingDelayMs = 500;
+ double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
+ kIncrementalMarkingDelayMs;
+ heap()->AdvanceIncrementalMarking(
+ 0, deadline, i::IncrementalMarking::StepActions(
+ i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::IncrementalMarking::FORCE_MARKING,
+ i::IncrementalMarking::FORCE_COMPLETION));
+ heap()->FinalizeIncrementalMarkingIfComplete(
+ "Memory reducer: finalize incremental marking");
+ }
+ // Re-schedule the timer.
+ ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
+ state_.next_gc_start_ms - event.time_ms);
+ }
+ }
+}
+
+
+void MemoryReducer::NotifyMarkCompact(const Event& event) {
+ DCHECK_EQ(kMarkCompact, event.type);
+ Action old_action = state_.action;
+ state_ = Step(state_, event);
+ if (old_action != kWait && state_.action == kWait) {
+ // If we are transitioning to the WAIT state, start the timer.
+ ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ }
+ if (old_action == kRun) {
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n",
+ state_.started_gcs,
+ state_.action == kWait ? "will do more" : "done");
+ }
+ }
+}
+
+
+void MemoryReducer::NotifyContextDisposed(const Event& event) {
+ DCHECK_EQ(kContextDisposed, event.type);
+ Action old_action = state_.action;
+ state_ = Step(state_, event);
+ if (old_action != kWait && state_.action == kWait) {
+ // If we are transitioning to the WAIT state, start the timer.
+ ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ }
+}
+
+
+void MemoryReducer::NotifyBackgroundIdleNotification(const Event& event) {
+ DCHECK_EQ(kBackgroundIdleNotification, event.type);
+ Action old_action = state_.action;
+ int old_started_gcs = state_.started_gcs;
+ state_ = Step(state_, event);
+ if (old_action == kWait && state_.action == kWait &&
+ old_started_gcs + 1 == state_.started_gcs) {
+ DCHECK(heap()->incremental_marking()->IsStopped());
+ // TODO(ulan): Replace it with incremental marking GC once
+ // chromium:490559 is fixed.
+ if (event.time_ms > state_.last_gc_time_ms + kLongDelayMs) {
+ heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
+ "memory reducer background GC");
+ } else {
+ DCHECK(FLAG_incremental_marking);
+ heap()->StartIdleIncrementalMarking();
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(heap()->isolate(),
+ "Memory reducer: started GC #%d"
+ " (background idle)\n",
+ state_.started_gcs);
+ }
+ }
+ }
+}
+
+
+bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
+ return state.last_gc_time_ms != 0 &&
+ event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
+}
+
+
+// For specification of this function see the comment for MemoryReducer class.
+MemoryReducer::State MemoryReducer::Step(const State& state,
+ const Event& event) {
+ if (!FLAG_incremental_marking) {
+ return State(kDone, 0, 0, state.last_gc_time_ms);
+ }
+ switch (state.action) {
+ case kDone:
+ if (event.type == kTimer || event.type == kBackgroundIdleNotification) {
+ return state;
+ } else {
+ DCHECK(event.type == kContextDisposed || event.type == kMarkCompact);
+ return State(
+ kWait, 0, event.time_ms + kLongDelayMs,
+ event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
+ }
+ case kWait:
+ switch (event.type) {
+ case kContextDisposed:
+ return state;
+ case kTimer:
+ if (state.started_gcs >= kMaxNumberOfGCs) {
+ return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
+ } else if (event.can_start_incremental_gc &&
+ (event.low_allocation_rate || WatchdogGC(state, event))) {
+ if (state.next_gc_start_ms <= event.time_ms) {
+ return State(kRun, state.started_gcs + 1, 0.0,
+ state.last_gc_time_ms);
+ } else {
+ return state;
+ }
+ } else {
+ return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
+ state.last_gc_time_ms);
+ }
+ case kBackgroundIdleNotification:
+ if (event.can_start_incremental_gc &&
+ state.started_gcs < kMaxNumberOfGCs) {
+ return State(kWait, state.started_gcs + 1,
+ event.time_ms + kLongDelayMs, state.last_gc_time_ms);
+ } else {
+ return state;
+ }
+ case kMarkCompact:
+ return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
+ event.time_ms);
+ }
+ case kRun:
+ if (event.type != kMarkCompact) {
+ return state;
+ } else {
+ if (state.started_gcs < kMaxNumberOfGCs &&
+ (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
+ return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
+ event.time_ms);
+ } else {
+ return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms);
+ }
+ }
+ }
+ UNREACHABLE();
+ return State(kDone, 0, 0, 0.0); // Make the compiler happy.
+}
+
+
+void MemoryReducer::ScheduleTimer(double delay_ms) {
+ DCHECK(delay_ms > 0);
+ // Leave some room for precision error in task scheduler.
+ const double kSlackMs = 100;
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
+ DCHECK(nullptr == pending_task_);
+ pending_task_ = new MemoryReducer::TimerTask(this);
+ V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(
+ isolate, pending_task_, (delay_ms + kSlackMs) / 1000.0);
+}
+
+
+void MemoryReducer::ClearTask(v8::Task* task) {
+ if (pending_task_ == task) {
+ pending_task_ = nullptr;
+ }
+}
+
+
+void MemoryReducer::TearDown() {
+ if (pending_task_ != nullptr) {
+ pending_task_->NotifyHeapTearDown();
+ pending_task_ = nullptr;
+ }
+ state_ = State(kDone, 0, 0, 0.0);
+}
+
+} // internal
+} // v8
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
new file mode 100644
index 0000000000..c387322172
--- /dev/null
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -0,0 +1,170 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_memory_reducer_H
+#define V8_HEAP_memory_reducer_H
+
+#include "include/v8-platform.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+
+// The goal of the MemoryReducer class is to detect transition of the mutator
+// from high allocation phase to low allocation phase and to collect potential
+// garbage created in the high allocation phase.
+//
+// The class implements an automaton with the following states and transitions.
+//
+// States:
+// - DONE <last_gc_time_ms>
+// - WAIT <started_gcs> <next_gc_start_ms> <last_gc_time_ms>
+// - RUN <started_gcs> <last_gc_time_ms>
+// The <started_gcs> is an integer in range from 0..kMaxNumberOfGCs that stores
+// the number of GCs initiated by the MemoryReducer since it left the DONE
+// state.
+// The <next_gc_start_ms> is a double that stores the earliest time the next GC
+// can be initiated by the MemoryReducer.
+// The <last_gc_start_ms> is a double that stores the time of the last full GC.
+// The DONE state means that the MemoryReducer is not active.
+// The WAIT state means that the MemoryReducer is waiting for mutator allocation
+// rate to drop. The check for the allocation rate happens in the timer task
+// callback. If the allocation rate does not drop in watchdog_delay_ms since
+// the last GC then transition to the RUN state is forced.
+// The RUN state means that the MemoryReducer started incremental marking and is
+// waiting for it to finish. Incremental marking steps are performed as usual
+// in the idle notification and in the mutator.
+//
+// Transitions:
+// DONE t -> WAIT 0 (now_ms + long_delay_ms) t' happens:
+// - on context disposal.
+// - at the end of mark-compact GC initiated by the mutator.
+// This signals that there is potential garbage to be collected.
+//
+// WAIT n x t -> WAIT n (now_ms + long_delay_ms) t' happens:
+// - on mark-compact GC initiated by the mutator,
+// - in the timer callback if the mutator allocation rate is high or
+// incremental GC is in progress or (now_ms - t < watchdog_delay_ms)
+//
+// WAIT n x t -> WAIT (n+1) t happens:
+// - on background idle notification, which signals that we can start
+// incremental marking even if the allocation rate is high.
+// The MemoryReducer starts incremental marking on this transition but still
+// has a pending timer task.
+//
+// WAIT n x t -> DONE t happens:
+// - in the timer callback if n >= kMaxNumberOfGCs.
+//
+// WAIT n x t -> RUN (n+1) t happens:
+// - in the timer callback if the mutator allocation rate is low
+// and now_ms >= x and there is no incremental GC in progress.
+// - in the timer callback if (now_ms - t > watchdog_delay_ms) and
+// and now_ms >= x and there is no incremental GC in progress.
+// The MemoryReducer starts incremental marking on this transition.
+//
+// RUN n t -> DONE now_ms happens:
+// - at end of the incremental GC initiated by the MemoryReducer if
+// (n > 1 and there is no more garbage to be collected) or
+// n == kMaxNumberOfGCs.
+// RUN n t -> WAIT n (now_ms + short_delay_ms) now_ms happens:
+// - at end of the incremental GC initiated by the MemoryReducer if
+// (n == 1 or there is more garbage to be collected) and
+// n < kMaxNumberOfGCs.
+//
+// now_ms is the current time,
+// t' is t if the current event is not a GC event and is now_ms otherwise,
+// long_delay_ms, short_delay_ms, and watchdog_delay_ms are constants.
+class MemoryReducer {
+ public:
+ enum Action { kDone, kWait, kRun };
+
+ struct State {
+ State(Action action, int started_gcs, double next_gc_start_ms,
+ double last_gc_time_ms)
+ : action(action),
+ started_gcs(started_gcs),
+ next_gc_start_ms(next_gc_start_ms),
+ last_gc_time_ms(last_gc_time_ms) {}
+ Action action;
+ int started_gcs;
+ double next_gc_start_ms;
+ double last_gc_time_ms;
+ };
+
+ enum EventType {
+ kTimer,
+ kMarkCompact,
+ kContextDisposed,
+ kBackgroundIdleNotification
+ };
+
+ struct Event {
+ EventType type;
+ double time_ms;
+ bool low_allocation_rate;
+ bool next_gc_likely_to_collect_more;
+ bool can_start_incremental_gc;
+ };
+
+ explicit MemoryReducer(Heap* heap)
+ : heap_(heap), state_(kDone, 0, 0.0, 0.0), pending_task_(nullptr) {}
+ // Callbacks.
+ void NotifyTimer(const Event& event);
+ void NotifyMarkCompact(const Event& event);
+ void NotifyContextDisposed(const Event& event);
+ void NotifyBackgroundIdleNotification(const Event& event);
+ // The step function that computes the next state from the current state and
+ // the incoming event.
+ static State Step(const State& state, const Event& event);
+ // Posts a timer task that will call NotifyTimer after the given delay.
+ void ScheduleTimer(double delay_ms);
+ void TearDown();
+ void ClearTask(v8::Task* task);
+
+ static bool WatchdogGC(const State& state, const Event& event);
+
+ static const int kLongDelayMs;
+ static const int kShortDelayMs;
+ static const int kWatchdogDelayMs;
+ static const int kMaxNumberOfGCs;
+
+ Heap* heap() { return heap_; }
+
+ bool ShouldGrowHeapSlowly() {
+ return state_.action == kDone && state_.started_gcs > 0;
+ }
+
+ private:
+ class TimerTask : public v8::Task {
+ public:
+ explicit TimerTask(MemoryReducer* memory_reducer)
+ : memory_reducer_(memory_reducer), heap_is_torn_down_(false) {}
+ virtual ~TimerTask() {
+ if (!heap_is_torn_down_) {
+ memory_reducer_->ClearTask(this);
+ }
+ }
+ void NotifyHeapTearDown() { heap_is_torn_down_ = true; }
+
+ private:
+ // v8::Task overrides.
+ void Run() override;
+ MemoryReducer* memory_reducer_;
+ bool heap_is_torn_down_;
+ DISALLOW_COPY_AND_ASSIGN(TimerTask);
+ };
+ Heap* heap_;
+ State state_;
+ TimerTask* pending_task_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryReducer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_memory_reducer_H
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index c880eb7445..0103054822 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_VISITING_INL_H_
#define V8_OBJECTS_VISITING_INL_H_
+#include "src/heap/objects-visiting.h"
namespace v8 {
namespace internal {
@@ -140,8 +141,6 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit);
- table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
-
table_.Register(kVisitNativeContext, &VisitNativeContext);
table_.Register(kVisitAllocationSite, &VisitAllocationSite);
@@ -303,7 +302,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(Map* map,
// When map collection is enabled we have to mark through map's transitions
// and back pointers in a special way to make these links weak.
- if (FLAG_collect_maps && map_object->CanTransition()) {
+ if (map_object->CanTransition()) {
MarkMapContents(heap, map_object);
} else {
StaticVisitor::VisitPointers(
@@ -330,12 +329,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCell(Map* map,
HeapObject* object) {
Heap* heap = map->GetHeap();
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(object);
- Object* undefined = heap->undefined_value();
// Enqueue weak cell in linked list of encountered weak collections.
// We can ignore weak cells with cleared values because they will always
// contain smi zero.
- if (weak_cell->next() == undefined && !weak_cell->cleared()) {
- weak_cell->set_next(heap->encountered_weak_cells());
+ if (weak_cell->next_cleared() && !weak_cell->cleared()) {
+ weak_cell->set_next(heap->encountered_weak_cells(),
+ UPDATE_WEAK_WRITE_BARRIER);
heap->set_encountered_weak_cells(weak_cell);
}
}
@@ -410,14 +409,15 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (FLAG_cleanup_code_caches_at_gc) {
shared->ClearTypeFeedbackInfoAtGCTime();
}
- if (FLAG_cache_optimized_code && FLAG_flush_optimized_code_cache &&
+ if ((FLAG_flush_optimized_code_cache ||
+ heap->isolate()->serializer_enabled()) &&
!shared->optimized_code_map()->IsSmi()) {
// Always flush the optimized code map if requested by flag.
shared->ClearOptimizedCodeMap();
}
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
- if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+ if (!shared->optimized_code_map()->IsSmi()) {
// Add the shared function info holding an optimized code map to
// the code flusher for processing of code maps after marking.
collector->code_flusher()->AddOptimizedCodeMap(shared);
@@ -439,7 +439,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
return;
}
} else {
- if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+ if (!shared->optimized_code_map()->IsSmi()) {
// Flush optimized code map on major GCs without code flushing,
// needed because cached code doesn't contain breakpoints.
shared->ClearOptimizedCodeMap();
@@ -450,34 +450,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- ConstantPoolArray* array = ConstantPoolArray::cast(object);
- ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
- while (!code_iter.is_finished()) {
- Address code_entry = reinterpret_cast<Address>(
- array->RawFieldOfElementAt(code_iter.next_index()));
- StaticVisitor::VisitCodeEntry(heap, code_entry);
- }
-
- ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
- while (!heap_iter.is_finished()) {
- Object** slot = array->RawFieldOfElementAt(heap_iter.next_index());
- HeapObject* object = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(slot, slot, object);
- bool is_weak_object =
- (array->get_weak_object_state() ==
- ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE &&
- Code::IsWeakObjectInOptimizedCode(object));
- if (!is_weak_object) {
- StaticVisitor::MarkObject(heap, object);
- }
- }
-}
-
-
-template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
HeapObject* object) {
Heap* heap = map->GetHeap();
@@ -571,22 +543,25 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
}
// Since descriptor arrays are potentially shared, ensure that only the
- // descriptors that belong to this map are marked. The first time a
- // non-empty descriptor array is marked, its header is also visited. The slot
- // holding the descriptor array will be implicitly recorded when the pointer
- // fields of this map are visited.
- DescriptorArray* descriptors = map->instance_descriptors();
- if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
- descriptors->length() > 0) {
- StaticVisitor::VisitPointers(heap, descriptors->GetFirstElementAddress(),
- descriptors->GetDescriptorEndSlot(0));
- }
- int start = 0;
- int end = map->NumberOfOwnDescriptors();
- if (start < end) {
- StaticVisitor::VisitPointers(heap,
- descriptors->GetDescriptorStartSlot(start),
- descriptors->GetDescriptorEndSlot(end));
+ // descriptors that belong to this map are marked. The first time a non-empty
+ // descriptor array is marked, its header is also visited. The slot holding
+ // the descriptor array will be implicitly recorded when the pointer fields of
+ // this map are visited. Prototype maps don't keep track of transitions, so
+ // just mark the entire descriptor array.
+ if (!map->is_prototype_map()) {
+ DescriptorArray* descriptors = map->instance_descriptors();
+ if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
+ descriptors->length() > 0) {
+ StaticVisitor::VisitPointers(heap, descriptors->GetFirstElementAddress(),
+ descriptors->GetDescriptorEndSlot(0));
+ }
+ int start = 0;
+ int end = map->NumberOfOwnDescriptors();
+ if (start < end) {
+ StaticVisitor::VisitPointers(heap,
+ descriptors->GetDescriptorStartSlot(start),
+ descriptors->GetDescriptorEndSlot(end));
+ }
}
// Mark the pointer fields of the Map. Since the transitions array has
@@ -604,13 +579,8 @@ void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
if (transitions->HasPrototypeTransitions()) {
- // Mark prototype transitions array but do not push it onto marking
- // stack, this will make references from it weak. We will clean dead
- // prototype transitions in ClearNonLiveReferences.
- Object** slot = transitions->GetPrototypeTransitionsSlot();
- HeapObject* obj = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
- StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ StaticVisitor::VisitPointer(heap,
+ transitions->GetPrototypeTransitionsSlot());
}
int num_transitions = TransitionArray::NumberOfTransitions(transitions);
@@ -623,19 +593,16 @@ void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(Heap* heap,
Code* code) {
- // Skip in absence of inlining.
- // TODO(turbofan): Revisit once we support inlining.
- if (code->is_turbofanned()) return;
// For optimized functions we should retain both non-optimized version
// of its code and non-optimized version of all inlined functions.
// This is required to support bailing out from inlined code.
- DeoptimizationInputData* data =
+ DeoptimizationInputData* const data =
DeoptimizationInputData::cast(code->deoptimization_data());
- FixedArray* literals = data->LiteralArray();
- for (int i = 0, count = data->InlinedFunctionCount()->value(); i < count;
- i++) {
- JSFunction* inlined = JSFunction::cast(literals->get(i));
- StaticVisitor::MarkObject(heap, inlined->shared()->code());
+ FixedArray* const literals = data->LiteralArray();
+ int const inlined_count = data->InlinedFunctionCount()->value();
+ for (int i = 0; i < inlined_count; ++i) {
+ StaticVisitor::MarkObject(
+ heap, SharedFunctionInfo::cast(literals->get(i))->code());
}
}
@@ -832,7 +799,6 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
IteratePointer(v, kDeoptimizationDataOffset);
IteratePointer(v, kTypeFeedbackInfoOffset);
IterateNextCodeLink(v, kNextCodeLinkOffset);
- IteratePointer(v, kConstantPoolOffset);
RelocIterator it(this, mode_mask);
Isolate* isolate = this->GetIsolate();
@@ -869,8 +835,6 @@ void Code::CodeIterateBody(Heap* heap) {
reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
StaticVisitor::VisitNextCodeLink(
heap, reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
- StaticVisitor::VisitPointer(
- heap, reinterpret_cast<Object**>(this->address() + kConstantPoolOffset));
RelocIterator it(this, mode_mask);
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index de2cfddb28..49ce4f97ab 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -51,9 +51,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case FIXED_DOUBLE_ARRAY_TYPE:
return kVisitFixedDoubleArray;
- case CONSTANT_POOL_ARRAY_TYPE:
- return kVisitConstantPoolArray;
-
case ODDBALL_TYPE:
return kVisitOddball;
@@ -138,6 +135,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
+ case FLOAT32X4_TYPE:
#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ARRAY_TYPE:
@@ -253,7 +251,7 @@ static void ClearWeakList(Heap* heap, Object* list) {
template <>
struct WeakListVisitor<JSFunction> {
static void SetWeakNext(JSFunction* function, Object* next) {
- function->set_next_function_link(next);
+ function->set_next_function_link(next, UPDATE_WEAK_WRITE_BARRIER);
}
static Object* WeakNext(JSFunction* function) {
@@ -271,7 +269,7 @@ struct WeakListVisitor<JSFunction> {
template <>
struct WeakListVisitor<Code> {
static void SetWeakNext(Code* code, Object* next) {
- code->set_next_code_link(next);
+ code->set_next_code_link(next, UPDATE_WEAK_WRITE_BARRIER);
}
static Object* WeakNext(Code* code) { return code->next_code_link(); }
@@ -287,7 +285,7 @@ struct WeakListVisitor<Code> {
template <>
struct WeakListVisitor<Context> {
static void SetWeakNext(Context* context, Object* next) {
- context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WRITE_BARRIER);
+ context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WEAK_WRITE_BARRIER);
}
static Object* WeakNext(Context* context) {
@@ -342,7 +340,7 @@ struct WeakListVisitor<Context> {
template <>
struct WeakListVisitor<AllocationSite> {
static void SetWeakNext(AllocationSite* obj, Object* next) {
- obj->set_weak_next(next);
+ obj->set_weak_next(next, UPDATE_WEAK_WRITE_BARRIER);
}
static Object* WeakNext(AllocationSite* obj) { return obj->weak_next(); }
@@ -360,5 +358,5 @@ template Object* VisitWeakList<Context>(Heap* heap, Object* list,
template Object* VisitWeakList<AllocationSite>(Heap* heap, Object* list,
WeakObjectRetainer* retainer);
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 87785e534d..1b788e893b 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -34,7 +34,6 @@ class StaticVisitorBase : public AllStatic {
V(FixedDoubleArray) \
V(FixedTypedArray) \
V(FixedFloat64Array) \
- V(ConstantPoolArray) \
V(NativeContext) \
V(AllocationSite) \
V(DataObject2) \
@@ -427,7 +426,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitMap(Map* map, HeapObject* object));
INLINE(static void VisitCode(Map* map, HeapObject* object));
INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
- INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object));
INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 78fda3c1e1..c2c4d12697 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -250,27 +250,27 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
}
-HeapObject* PagedSpace::AllocateLinearlyDoubleAlign(int size_in_bytes) {
+HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
+ AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
- int alignment_size = 0;
+ int filler_size = Heap::GetFillToAlign(current_top, alignment);
- if ((OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
- alignment_size = kPointerSize;
- size_in_bytes += alignment_size;
- }
- Address new_top = current_top + size_in_bytes;
+ Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit()) return NULL;
allocation_info_.set_top(new_top);
- if (alignment_size > 0)
- return heap()->EnsureDoubleAligned(HeapObject::FromAddress(current_top),
- size_in_bytes);
+ if (filler_size > 0) {
+ *size_in_bytes += filler_size;
+ return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
+ filler_size);
+ }
+
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
-AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object == NULL) {
@@ -293,23 +293,32 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
// Raw allocation.
-AllocationResult PagedSpace::AllocateRawDoubleAligned(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE);
- HeapObject* object = AllocateLinearlyDoubleAlign(size_in_bytes);
- int aligned_size_in_bytes = size_in_bytes + kPointerSize;
+ int allocation_size = size_in_bytes;
+ HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
if (object == NULL) {
- object = free_list_.Allocate(aligned_size_in_bytes);
+ // We don't know exactly how much filler we need to align until space is
+ // allocated, so assume the worst case.
+ int filler_size = Heap::GetMaximumFillToAlign(alignment);
+ allocation_size += filler_size;
+ object = free_list_.Allocate(allocation_size);
if (object == NULL) {
- object = SlowAllocateRaw(aligned_size_in_bytes);
+ object = SlowAllocateRaw(allocation_size);
}
- if (object != NULL) {
- object = heap()->EnsureDoubleAligned(object, aligned_size_in_bytes);
+ if (object != NULL && filler_size != 0) {
+ object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
+ alignment);
+ // Filler objects are initialized, so mark only the aligned object memory
+ // as uninitialized.
+ allocation_size = size_in_bytes;
}
}
if (object != NULL) {
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
return object;
}
@@ -317,32 +326,38 @@ AllocationResult PagedSpace::AllocateRawDoubleAligned(int size_in_bytes) {
}
+AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+ return alignment == kDoubleAligned
+ ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+ : AllocateRawUnaligned(size_in_bytes);
+#else
+ return AllocateRawUnaligned(size_in_bytes);
+#endif
+}
+
+
// -----------------------------------------------------------------------------
// NewSpace
-AllocationResult NewSpace::AllocateRawDoubleAligned(int size_in_bytes) {
+AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
- int alignment_size = 0;
- int aligned_size_in_bytes = 0;
-
- // If double alignment is required and top pointer is not aligned, we allocate
- // additional memory to take care of the alignment.
- if ((OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
- alignment_size += kPointerSize;
- }
- aligned_size_in_bytes = size_in_bytes + alignment_size;
+ int filler_size = Heap::GetFillToAlign(old_top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + filler_size;
if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
- return SlowAllocateRaw(size_in_bytes, true);
+ return SlowAllocateRaw(size_in_bytes, alignment);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- if (alignment_size > 0) {
- obj = heap()->EnsureDoubleAligned(obj, aligned_size_in_bytes);
+ if (filler_size > 0) {
+ obj = heap()->PrecedeWithFiller(obj, filler_size);
}
// The slow path above ultimately goes through AllocateRaw, so this suffices.
@@ -352,11 +367,11 @@ AllocationResult NewSpace::AllocateRawDoubleAligned(int size_in_bytes) {
}
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
Address old_top = allocation_info_.top();
if (allocation_info_.limit() - old_top < size_in_bytes) {
- return SlowAllocateRaw(size_in_bytes, false);
+ return SlowAllocateRaw(size_in_bytes, kWordAligned);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
@@ -370,6 +385,18 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
}
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+ return alignment == kDoubleAligned
+ ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+ : AllocateRawUnaligned(size_in_bytes);
+#else
+ return AllocateRawUnaligned(size_in_bytes);
+#endif
+}
+
+
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
return static_cast<LargePage*>(chunk);
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index f0e2cf6314..0806b2565d 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -114,7 +114,14 @@ bool CodeRange::SetUp(size_t requested) {
}
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
+#ifdef V8_TARGET_ARCH_MIPS64
+ // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
+ // encoded immediate, the addresses have to be in range of 256Mb aligned
+ // region.
+ code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
+#else
code_range_ = new base::VirtualMemory(requested);
+#endif
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
delete code_range_;
@@ -355,7 +362,8 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
!isolate_->code_range()->contains(
static_cast<Address>(reservation->address())));
DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
- !isolate_->code_range()->valid());
+ !isolate_->code_range()->valid() || size <= Page::kPageSize);
+
reservation->Release();
}
@@ -645,7 +653,14 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
base::OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
+#ifdef V8_TARGET_ARCH_MIPS64
+ // Use code range only for large object space on mips64 to keep address
+ // range within 256-MB memory region.
+ if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
+ reserve_area_size > CodePageAreaSize()) {
+#else
if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
+#endif
base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
&chunk_size);
DCHECK(
@@ -1055,14 +1070,6 @@ int PagedSpace::CountTotalPages() {
}
-void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
- sizes->huge_size_ = page->available_in_huge_free_list();
- sizes->small_size_ = page->available_in_small_free_list();
- sizes->medium_size_ = page->available_in_medium_free_list();
- sizes->large_size_ = page->available_in_large_free_list();
-}
-
-
void PagedSpace::ResetFreeListStatistics() {
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
@@ -1459,31 +1466,32 @@ bool NewSpace::AddFreshPage() {
AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
- bool double_aligned) {
+ AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
+ int alignment_size = Heap::GetFillToAlign(old_top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + alignment_size;
+
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly.
- int aligned_size = size_in_bytes;
- aligned_size += (double_aligned ? kPointerSize : 0);
- Address new_top = old_top + aligned_size;
+ Address new_top = old_top + aligned_size_in_bytes;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
- UpdateInlineAllocationLimit(aligned_size);
+ UpdateInlineAllocationLimit(aligned_size_in_bytes);
top_on_previous_step_ = new_top;
- if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
- return AllocateRaw(size_in_bytes);
+ if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
+ return AllocateRawAligned(size_in_bytes, alignment);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
- if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
- return AllocateRaw(size_in_bytes);
+ if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
+ return AllocateRawAligned(size_in_bytes, alignment);
} else {
return AllocationResult::Retry();
}
@@ -1570,7 +1578,7 @@ void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
address_mask_ = ~(maximum_capacity - 1);
object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
- age_mark_ = start_;
+ age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
}
@@ -3044,8 +3052,7 @@ void LargeObjectSpace::Verify() {
// large object space.
CHECK(object->IsCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsFixedArray() ||
- object->IsFixedDoubleArray() || object->IsByteArray() ||
- object->IsConstantPoolArray());
+ object->IsFixedDoubleArray() || object->IsByteArray());
// The object itself should look OK.
object->ObjectVerify();
@@ -3132,5 +3139,5 @@ void Page::Print() {
}
#endif // DEBUG
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 494d05c9c7..3461de3ef0 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -678,11 +678,11 @@ class MemoryChunk {
base::AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics.
- intptr_t available_in_small_free_list_;
- intptr_t available_in_medium_free_list_;
- intptr_t available_in_large_free_list_;
- intptr_t available_in_huge_free_list_;
- intptr_t non_available_small_blocks_;
+ int available_in_small_free_list_;
+ int available_in_medium_free_list_;
+ int available_in_large_free_list_;
+ int available_in_huge_free_list_;
+ int non_available_small_blocks_;
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -776,16 +776,22 @@ class Page : public MemoryChunk {
void ResetFreeListStatistics();
+ int LiveBytesFromFreeList() {
+ return area_size() - non_available_small_blocks_ -
+ available_in_small_free_list_ - available_in_medium_free_list_ -
+ available_in_large_free_list_ - available_in_huge_free_list_;
+ }
+
#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
type name() { return name##_; } \
void set_##name(type name) { name##_ = name; } \
void add_##name(type name) { name##_ += name; }
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks)
+ FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list)
#undef FRAGMENTATION_STATS_ACCESSORS
@@ -1700,18 +1706,6 @@ class PagedSpace : public Space {
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
- struct SizeStats {
- intptr_t Total() {
- return small_size_ + medium_size_ + large_size_ + huge_size_;
- }
-
- intptr_t small_size_;
- intptr_t medium_size_;
- intptr_t large_size_;
- intptr_t huge_size_;
- };
-
- void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
void ResetFreeListStatistics();
// Sets the capacity, the available space and the wasted space to zero.
@@ -1764,12 +1758,18 @@ class PagedSpace : public Space {
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
- MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
+ int size_in_bytes);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
- MUST_USE_RESULT inline AllocationResult AllocateRawDoubleAligned(
- int size_in_bytes);
+ MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment);
+
+ // Allocate the requested number of bytes in the space and consider allocation
+ // alignment if needed.
+ MUST_USE_RESULT inline AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationAlignment alignment);
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
@@ -1931,9 +1931,11 @@ class PagedSpace : public Space {
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
- // Generic fast case allocation function that tries double aligned linear
- // allocation at the address denoted by top in allocation_info_.
- inline HeapObject* AllocateLinearlyDoubleAlign(int size_in_bytes);
+ // Generic fast case allocation function that tries aligned linear allocation
+ // at the address denoted by top in allocation_info_. Writes the aligned
+ // allocation size, which includes the filler size, to size_in_bytes.
+ inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
+ AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
@@ -2252,9 +2254,6 @@ class SemiSpace : public Space {
friend class SemiSpaceIterator;
friend class NewSpacePageIterator;
-
- public:
- TRACK_MEMORY("SemiSpace")
};
@@ -2435,6 +2434,25 @@ class NewSpace : public Space {
// Return the available bytes without growing.
intptr_t Available() override { return Capacity() - Size(); }
+ intptr_t PagesFromStart(Address addr) {
+ return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
+ }
+
+ size_t AllocatedSinceLastGC() {
+ intptr_t allocated = top() - to_space_.age_mark();
+ if (allocated < 0) {
+ // Runtime has lowered the top below the age mark.
+ return 0;
+ }
+ // Correctly account for non-allocatable regions at the beginning of
+ // each page from the age_mark() to the top().
+ intptr_t pages =
+ PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
+ allocated -= pages * (NewSpacePage::kObjectStartOffset);
+ DCHECK(0 <= allocated && allocated <= Size());
+ return static_cast<size_t>(allocated);
+ }
+
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
DCHECK(to_space_.MaximumTotalCapacity() ==
@@ -2500,10 +2518,14 @@ class NewSpace : public Space {
return allocation_info_.limit_address();
}
+ MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment));
+
MUST_USE_RESULT INLINE(
- AllocationResult AllocateRawDoubleAligned(int size_in_bytes));
+ AllocationResult AllocateRawUnaligned(int size_in_bytes));
- MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
+ MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationAlignment alignment));
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
@@ -2580,6 +2602,8 @@ class NewSpace : public Space {
return from_space_.Uncommit();
}
+ bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
+
inline intptr_t inline_allocation_limit_step() {
return inline_allocation_limit_step_;
}
@@ -2621,12 +2645,9 @@ class NewSpace : public Space {
HistogramInfo* promoted_histogram_;
MUST_USE_RESULT AllocationResult
- SlowAllocateRaw(int size_in_bytes, bool double_aligned);
+ SlowAllocateRaw(int size_in_bytes, AllocationAlignment alignment);
friend class SemiSpaceIterator;
-
- public:
- TRACK_MEMORY("NewSpace")
};
@@ -2640,9 +2661,6 @@ class OldSpace : public PagedSpace {
OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
Executability executable)
: PagedSpace(heap, max_capacity, id, executable) {}
-
- public:
- TRACK_MEMORY("OldSpace")
};
@@ -2688,9 +2706,6 @@ class MapSpace : public PagedSpace {
}
const int max_map_space_pages_;
-
- public:
- TRACK_MEMORY("MapSpace")
};
@@ -2784,9 +2799,6 @@ class LargeObjectSpace : public Space {
HashMap chunk_map_;
friend class LargeObjectIterator;
-
- public:
- TRACK_MEMORY("LargeObjectSpace")
};
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 52c2333bc2..03f587f215 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -483,36 +483,47 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
// We iterate over objects that contain new space pointers only.
- bool may_contain_raw_values = heap_object->MayContainRawValues();
- if (!may_contain_raw_values) {
- Address obj_address = heap_object->address();
- const int start_offset = HeapObject::kHeaderSize;
- const int end_offset = heap_object->Size();
-#if V8_DOUBLE_FIELDS_UNBOXING
- LayoutDescriptorHelper helper(heap_object->map());
- bool has_only_tagged_fields = helper.all_fields_tagged();
-
- if (!has_only_tagged_fields) {
- for (int offset = start_offset; offset < end_offset;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, end_offset,
- &end_of_region_offset)) {
- FindPointersToNewSpaceInRegion(
- obj_address + offset,
- obj_address + end_of_region_offset, slot_callback);
- }
- offset = end_of_region_offset;
- }
- } else {
-#endif
+ Address obj_address = heap_object->address();
+ const int start_offset = HeapObject::kHeaderSize;
+ const int end_offset = heap_object->Size();
+
+ switch (heap_object->ContentType()) {
+ case HeapObjectContents::kTaggedValues: {
Address start_address = obj_address + start_offset;
Address end_address = obj_address + end_offset;
// Object has only tagged fields.
FindPointersToNewSpaceInRegion(start_address, end_address,
slot_callback);
-#if V8_DOUBLE_FIELDS_UNBOXING
+ break;
}
-#endif
+
+ case HeapObjectContents::kMixedValues: {
+ if (heap_object->IsFixedTypedArrayBase()) {
+ FindPointersToNewSpaceInRegion(
+ obj_address + FixedTypedArrayBase::kBasePointerOffset,
+ obj_address + FixedTypedArrayBase::kHeaderSize,
+ slot_callback);
+ } else if (FLAG_unbox_double_fields) {
+ LayoutDescriptorHelper helper(heap_object->map());
+ DCHECK(!helper.all_fields_tagged());
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset,
+ &end_of_region_offset)) {
+ FindPointersToNewSpaceInRegion(
+ obj_address + offset,
+ obj_address + end_of_region_offset, slot_callback);
+ }
+ offset = end_of_region_offset;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+
+ case HeapObjectContents::kRawValues:
+ break;
}
}
}
@@ -579,5 +590,5 @@ void StoreBuffer::Compact() {
}
heap_->isolate()->counters()->store_buffer_compactions()->Increment();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
index 729317eec6..30c218f82a 100644
--- a/deps/v8/src/hydrogen-bce.cc
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -231,12 +231,15 @@ class BoundsCheckBbData: public ZoneObject {
HArithmeticBinaryOperation::cast(index_raw);
HValue* left_input = index->left();
HValue* right_input = index->right();
+ HValue* context = index->context();
bool must_move_index = false;
bool must_move_left_input = false;
bool must_move_right_input = false;
+ bool must_move_context = false;
for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
if (cursor == left_input) must_move_left_input = true;
if (cursor == right_input) must_move_right_input = true;
+ if (cursor == context) must_move_context = true;
if (cursor == index) must_move_index = true;
if (cursor->previous() == NULL) {
cursor = cursor->block()->dominator()->end();
@@ -258,6 +261,11 @@ class BoundsCheckBbData: public ZoneObject {
HConstant::cast(right_input)->Unlink();
HConstant::cast(right_input)->InsertBefore(index);
}
+ if (must_move_context) {
+ // Contexts are always constants.
+ HConstant::cast(context)->Unlink();
+ HConstant::cast(context)->InsertBefore(index);
+ }
} else if (index_raw->IsConstant()) {
HConstant* index = HConstant::cast(index_raw);
bool must_move = false;
@@ -465,4 +473,5 @@ void HBoundsCheckEliminationPhase::PostProcessBlock(
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-bch.cc b/deps/v8/src/hydrogen-bch.cc
index 875c18c3f9..a4c0ae4e25 100644
--- a/deps/v8/src/hydrogen-bch.cc
+++ b/deps/v8/src/hydrogen-bch.cc
@@ -375,4 +375,5 @@ void HBoundsCheckHoistingPhase::HoistRedundantBoundsChecks() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-canonicalize.cc b/deps/v8/src/hydrogen-canonicalize.cc
index c15b8d99c0..25911eb353 100644
--- a/deps/v8/src/hydrogen-canonicalize.cc
+++ b/deps/v8/src/hydrogen-canonicalize.cc
@@ -52,4 +52,5 @@ void HCanonicalizePhase::Run() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc
index cdfedb4e51..74be2e42f4 100644
--- a/deps/v8/src/hydrogen-check-elimination.cc
+++ b/deps/v8/src/hydrogen-check-elimination.cc
@@ -909,4 +909,5 @@ void HCheckEliminationPhase::PrintStats() {
PRINT_STAT(transitions);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-dce.cc b/deps/v8/src/hydrogen-dce.cc
index c653fc1b60..1d5bb7841a 100644
--- a/deps/v8/src/hydrogen-dce.cc
+++ b/deps/v8/src/hydrogen-dce.cc
@@ -102,4 +102,5 @@ void HDeadCodeEliminationPhase::RemoveDeadInstructions() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-dehoist.cc b/deps/v8/src/hydrogen-dehoist.cc
index 51dda37e8d..e521c25cda 100644
--- a/deps/v8/src/hydrogen-dehoist.cc
+++ b/deps/v8/src/hydrogen-dehoist.cc
@@ -67,4 +67,5 @@ void HDehoistIndexComputationsPhase::Run() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-environment-liveness.cc b/deps/v8/src/hydrogen-environment-liveness.cc
index 8e9018fb4e..7cc4dc04a8 100644
--- a/deps/v8/src/hydrogen-environment-liveness.cc
+++ b/deps/v8/src/hydrogen-environment-liveness.cc
@@ -228,4 +228,5 @@ bool HEnvironmentLivenessAnalysisPhase::VerifyClosures(
}
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-escape-analysis.cc b/deps/v8/src/hydrogen-escape-analysis.cc
index e22dd88831..3613737192 100644
--- a/deps/v8/src/hydrogen-escape-analysis.cc
+++ b/deps/v8/src/hydrogen-escape-analysis.cc
@@ -325,4 +325,5 @@ void HEscapeAnalysisPhase::Run() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index 3836e32941..31a2cd68a5 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -894,4 +894,5 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-infer-representation.cc b/deps/v8/src/hydrogen-infer-representation.cc
index 3815ba514e..6687aefed8 100644
--- a/deps/v8/src/hydrogen-infer-representation.cc
+++ b/deps/v8/src/hydrogen-infer-representation.cc
@@ -158,4 +158,5 @@ void HInferRepresentationPhase::Run() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-infer-types.cc b/deps/v8/src/hydrogen-infer-types.cc
index e69b4fad20..ea69662b40 100644
--- a/deps/v8/src/hydrogen-infer-types.cc
+++ b/deps/v8/src/hydrogen-infer-types.cc
@@ -51,4 +51,5 @@ void HInferTypesPhase::InferTypes(int from_inclusive, int to_inclusive) {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 2db3c43c68..2843195e86 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -6,6 +6,7 @@
#include "src/base/bits.h"
#include "src/double.h"
+#include "src/elements.h"
#include "src/factory.h"
#include "src/hydrogen-infer-representation.h"
@@ -837,7 +838,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kStoreNamedGeneric:
case HValue::kStringCharCodeAt:
case HValue::kStringCharFromCode:
- case HValue::kTailCallThroughMegamorphicCache:
case HValue::kThisFunction:
case HValue::kTypeofIsAndBranch:
case HValue::kUnknownOSRValue:
@@ -874,6 +874,7 @@ bool HInstruction::CanDeoptimize() {
case HValue::kLoadKeyed:
case HValue::kLoadKeyedGeneric:
case HValue::kMathFloorOfDiv:
+ case HValue::kMaybeGrowElements:
case HValue::kMod:
case HValue::kMul:
case HValue::kOsrEntry:
@@ -1626,6 +1627,9 @@ void HCheckInstanceType::GetCheckInterval(InstanceType* first,
case IS_JS_ARRAY:
*first = *last = JS_ARRAY_TYPE;
return;
+ case IS_JS_DATE:
+ *first = *last = JS_DATE_TYPE;
+ return;
default:
UNREACHABLE();
}
@@ -1695,6 +1699,8 @@ const char* HCheckInstanceType::GetCheckName() const {
switch (check_) {
case IS_SPEC_OBJECT: return "object";
case IS_JS_ARRAY: return "array";
+ case IS_JS_DATE:
+ return "date";
case IS_STRING: return "string";
case IS_INTERNALIZED_STRING: return "internalized_string";
}
@@ -1716,22 +1722,6 @@ std::ostream& HCallStub::PrintDataTo(std::ostream& os) const { // NOLINT
}
-Code::Flags HTailCallThroughMegamorphicCache::flags() const {
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- return code_flags;
-}
-
-
-std::ostream& HTailCallThroughMegamorphicCache::PrintDataTo(
- std::ostream& os) const { // NOLINT
- for (int i = 0; i < OperandCount(); i++) {
- os << NameOf(OperandAt(i)) << " ";
- }
- return os << "flags: " << flags();
-}
-
-
std::ostream& HUnknownOSRValue::PrintDataTo(std::ostream& os) const { // NOLINT
const char* type = "expression";
if (environment_->is_local_index(index_)) type = "local";
@@ -2975,7 +2965,7 @@ std::ostream& HConstant::PrintDataTo(std::ostream& os) const { // NOLINT
os << reinterpret_cast<void*>(external_reference_value_.address()) << " ";
} else {
// The handle() method is silently and lazily mutating the object.
- Handle<Object> h = const_cast<HConstant*>(this)->handle(Isolate::Current());
+ Handle<Object> h = const_cast<HConstant*>(this)->handle(isolate());
os << Brief(*h) << " ";
if (HasStableMapValue()) os << "[stable-map] ";
if (HasObjectMap()) os << "[map " << *ObjectMap().handle() << "] ";
@@ -3414,7 +3404,7 @@ void HCompareNumericAndBranch::InferRepresentation(
// (false). Therefore, any comparisons other than ordered relational
// comparisons must cause a deopt when one of their arguments is undefined.
// See also v8:1434
- if (Token::IsOrderedRelationalCompareOp(token_)) {
+ if (Token::IsOrderedRelationalCompareOp(token_) && !is_strong(strength())) {
SetFlag(kAllowUndefinedAsNaN);
}
}
@@ -3982,22 +3972,21 @@ bool HStoreKeyed::NeedsCanonicalization() {
#define H_CONSTANT_DOUBLE(val) \
HConstant::New(isolate, zone, context, static_cast<double>(val))
-#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
- HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
- HValue* left, HValue* right, \
- LanguageMode language_mode) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
- if (IsInt32Double(double_res)) { \
- return H_CONSTANT_INT(double_res); \
- } \
- return H_CONSTANT_DOUBLE(double_res); \
- } \
- } \
- return new (zone) HInstr(context, left, right, language_mode); \
+#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
+ HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
+ HValue* left, HValue* right, Strength strength) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
+ HConstant* c_left = HConstant::cast(left); \
+ HConstant* c_right = HConstant::cast(right); \
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
+ double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
+ if (IsInt32Double(double_res)) { \
+ return H_CONSTANT_INT(double_res); \
+ } \
+ return H_CONSTANT_DOUBLE(double_res); \
+ } \
+ } \
+ return new (zone) HInstr(context, left, right, strength); \
}
@@ -4009,8 +3998,7 @@ DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
HInstruction* HStringAdd::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- LanguageMode language_mode,
+ HValue* left, HValue* right, Strength strength,
PretenureFlag pretenure_flag,
StringAddFlags flags,
Handle<AllocationSite> allocation_site) {
@@ -4028,9 +4016,8 @@ HInstruction* HStringAdd::New(Isolate* isolate, Zone* zone, HValue* context,
}
}
}
- return new(zone) HStringAdd(
- context, left, right, language_mode, pretenure_flag, flags,
- allocation_site);
+ return new (zone) HStringAdd(context, left, right, strength, pretenure_flag,
+ flags, allocation_site);
}
@@ -4228,8 +4215,7 @@ HInstruction* HMathMinMax::New(Isolate* isolate, Zone* zone, HValue* context,
HInstruction* HMod::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- LanguageMode language_mode) {
+ HValue* left, HValue* right, Strength strength) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4248,13 +4234,12 @@ HInstruction* HMod::New(Isolate* isolate, Zone* zone, HValue* context,
}
}
}
- return new(zone) HMod(context, left, right, language_mode);
+ return new (zone) HMod(context, left, right, strength);
}
HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- LanguageMode language_mode) {
+ HValue* left, HValue* right, Strength strength) {
// If left and right are constant values, try to return a constant value.
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
@@ -4273,13 +4258,13 @@ HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context,
}
}
}
- return new(zone) HDiv(context, left, right, language_mode);
+ return new (zone) HDiv(context, left, right, strength);
}
HInstruction* HBitwise::New(Isolate* isolate, Zone* zone, HValue* context,
Token::Value op, HValue* left, HValue* right,
- LanguageMode language_mode) {
+ Strength strength) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4304,22 +4289,21 @@ HInstruction* HBitwise::New(Isolate* isolate, Zone* zone, HValue* context,
return H_CONSTANT_INT(result);
}
}
- return new(zone) HBitwise(context, op, left, right, language_mode);
+ return new (zone) HBitwise(context, op, left, right, strength);
}
-#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
- HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
- HValue* left, HValue* right, \
- LanguageMode language_mode) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- return H_CONSTANT_INT(result); \
- } \
- } \
- return new (zone) HInstr(context, left, right, language_mode); \
+#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
+ HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
+ HValue* left, HValue* right, Strength strength) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
+ HConstant* c_left = HConstant::cast(left); \
+ HConstant* c_right = HConstant::cast(right); \
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
+ return H_CONSTANT_INT(result); \
+ } \
+ } \
+ return new (zone) HInstr(context, left, right, strength); \
}
@@ -4332,8 +4316,7 @@ c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
HInstruction* HShr::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- LanguageMode language_mode) {
+ HValue* left, HValue* right, Strength strength) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4346,7 +4329,7 @@ HInstruction* HShr::New(Isolate* isolate, Zone* zone, HValue* context,
return H_CONSTANT_INT(static_cast<uint32_t>(left_val) >> right_val);
}
}
- return new(zone) HShr(context, left, right, language_mode);
+ return new (zone) HShr(context, left, right, strength);
}
@@ -4730,4 +4713,5 @@ std::ostream& operator<<(std::ostream& os, const HObjectAccess& access) {
return os << "@" << access.offset();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index fcd7c2c24b..2cac0eb460 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -127,6 +127,7 @@ class LChunkBuilder;
V(MapEnumLength) \
V(MathFloorOfDiv) \
V(MathMinMax) \
+ V(MaybeGrowElements) \
V(Mod) \
V(Mul) \
V(OsrEntry) \
@@ -155,7 +156,6 @@ class LChunkBuilder;
V(StringCharFromCode) \
V(StringCompareAndBranch) \
V(Sub) \
- V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -965,6 +965,12 @@ std::ostream& operator<<(std::ostream& os, const ChangesOf& v);
return new (zone) I(context, p1, p2, p3, p4, p5); \
}
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3, P4 p4, P5 p5, P6 p6) { \
+ return new (zone) I(context, p1, p2, p3, p4, p5, p6); \
+ }
+
// A helper class to represent per-operand position information attached to
// the HInstruction in the compact form. Uses tagging to distinguish between
@@ -1952,6 +1958,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
Handle<JSFunction> closure() const { return closure_; }
HConstant* closure_context() const { return closure_context_; }
int arguments_count() const { return arguments_count_; }
@@ -1979,6 +1986,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
Variable* arguments_var, HArgumentsObject* arguments_object,
Zone* zone)
: return_id_(return_id),
+ shared_(handle(closure->shared())),
closure_(closure),
closure_context_(closure_context),
arguments_count_(arguments_count),
@@ -1991,6 +1999,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
return_targets_(2, zone) {}
BailoutId return_id_;
+ Handle<SharedFunctionInfo> shared_;
Handle<JSFunction> closure_;
HConstant* closure_context_;
int arguments_count_;
@@ -2252,9 +2261,9 @@ class HCallWithDescriptor final : public HInstruction {
CallInterfaceDescriptor descriptor,
const Vector<HValue*>& operands,
CallMode call_mode = NORMAL_CALL) {
- DCHECK(operands.length() == descriptor.GetEnvironmentLength());
HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
target, argument_count, descriptor, operands, call_mode, zone);
+ DCHECK(operands.length() == res->GetParameterCount());
return res;
}
@@ -2262,12 +2271,13 @@ class HCallWithDescriptor final : public HInstruction {
HValue* OperandAt(int index) const final { return values_[index]; }
Representation RequiredInputRepresentation(int index) final {
- if (index == 0) {
+ if (index == 0 || index == 1) {
+ // Target + context
return Representation::Tagged();
} else {
- int par_index = index - 1;
- DCHECK(par_index < descriptor_.GetEnvironmentLength());
- return descriptor_.GetParameterRepresentation(par_index);
+ int par_index = index - 2;
+ DCHECK(par_index < GetParameterCount());
+ return RepresentationFromType(descriptor_.GetParameterType(par_index));
}
}
@@ -2298,7 +2308,7 @@ class HCallWithDescriptor final : public HInstruction {
const Vector<HValue*>& operands, CallMode call_mode,
Zone* zone)
: descriptor_(descriptor),
- values_(descriptor.GetEnvironmentLength() + 1, zone),
+ values_(GetParameterCount() + 1, zone),
argument_count_(argument_count),
call_mode_(call_mode) {
// We can only tail call without any stack arguments.
@@ -2316,6 +2326,10 @@ class HCallWithDescriptor final : public HInstruction {
SetOperandAt(values_.length() - 1, v);
}
+ int GetParameterCount() const {
+ return descriptor_.GetRegisterParameterCount() + 1;
+ }
+
void InternalSetOperandAt(int index, HValue* value) final {
values_[index] = value;
}
@@ -2866,9 +2880,10 @@ class HCheckInstanceType final : public HUnaryOperation {
enum Check {
IS_SPEC_OBJECT,
IS_JS_ARRAY,
+ IS_JS_DATE,
IS_STRING,
IS_INTERNALIZED_STRING,
- LAST_INTERVAL_CHECK = IS_JS_ARRAY
+ LAST_INTERVAL_CHECK = IS_JS_DATE
};
DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
@@ -2883,6 +2898,8 @@ class HCheckInstanceType final : public HUnaryOperation {
switch (check_) {
case IS_SPEC_OBJECT: return HType::JSObject();
case IS_JS_ARRAY: return HType::JSArray();
+ case IS_JS_DATE:
+ return HType::JSObject();
case IS_STRING: return HType::String();
case IS_INTERNALIZED_STRING: return HType::String();
}
@@ -3768,8 +3785,9 @@ class HConstant final : public HTemplateInstruction<0> {
class HBinaryOperation : public HTemplateInstruction<3> {
public:
HBinaryOperation(HValue* context, HValue* left, HValue* right,
- LanguageMode language_mode, HType type = HType::Tagged())
- : HTemplateInstruction<3>(type), language_mode_(language_mode),
+ Strength strength, HType type = HType::Tagged())
+ : HTemplateInstruction<3>(type),
+ strength_(strength),
observed_output_representation_(Representation::None()) {
DCHECK(left != NULL && right != NULL);
SetOperandAt(0, context);
@@ -3782,7 +3800,7 @@ class HBinaryOperation : public HTemplateInstruction<3> {
HValue* context() const { return OperandAt(0); }
HValue* left() const { return OperandAt(1); }
HValue* right() const { return OperandAt(2); }
- LanguageMode language_mode() const { return language_mode_; }
+ Strength strength() const { return strength_; }
// True if switching left and right operands likely generates better code.
bool AreOperandsBetterSwitched() {
@@ -3858,15 +3876,13 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(value));
}
- LanguageMode language_mode() {
- return language_mode_;
- }
+ Strength strength() { return strength_; }
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
private:
bool IgnoreObservedOutputRepresentation(Representation current_rep);
- LanguageMode language_mode_;
+ Strength strength_;
Representation observed_input_representation_[2];
Representation observed_output_representation_;
@@ -4136,12 +4152,11 @@ class HBoundsCheckBaseIndexInformation final : public HTemplateInstruction<2> {
class HBitwiseBinaryOperation : public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
- LanguageMode language_mode,
- HType type = HType::TaggedNumber())
- : HBinaryOperation(context, left, right, language_mode, type) {
+ Strength strength, HType type = HType::TaggedNumber())
+ : HBinaryOperation(context, left, right, strength, type) {
SetFlag(kFlexibleRepresentation);
SetFlag(kTruncatingToInt32);
- SetFlag(kAllowUndefinedAsNaN);
+ if (!is_strong(strength)) SetFlag(kAllowUndefinedAsNaN);
SetAllSideEffects();
}
@@ -4197,7 +4212,7 @@ class HMathFloorOfDiv final : public HBinaryOperation {
private:
HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right, SLOPPY) {
+ : HBinaryOperation(context, left, right, Strength::WEAK) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetFlag(kCanOverflow);
@@ -4217,12 +4232,12 @@ class HMathFloorOfDiv final : public HBinaryOperation {
class HArithmeticBinaryOperation : public HBinaryOperation {
public:
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right,
- LanguageMode language_mode)
- : HBinaryOperation(context, left, right, language_mode,
+ Strength strength)
+ : HBinaryOperation(context, left, right, strength,
HType::TaggedNumber()) {
SetAllSideEffects();
SetFlag(kFlexibleRepresentation);
- SetFlag(kAllowUndefinedAsNaN);
+ if (!is_strong(strength)) SetFlag(kAllowUndefinedAsNaN);
}
void RepresentationChanged(Representation to) override {
@@ -4247,10 +4262,9 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
class HCompareGeneric final : public HBinaryOperation {
public:
static HCompareGeneric* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Token::Value token,
- LanguageMode language_mode = SLOPPY) {
- return new(zone) HCompareGeneric(context, left, right, token,
- language_mode);
+ HValue* left, HValue* right, Token::Value token,
+ Strength strength = Strength::WEAK) {
+ return new (zone) HCompareGeneric(context, left, right, token, strength);
}
Representation RequiredInputRepresentation(int index) override {
@@ -4265,12 +4279,9 @@ class HCompareGeneric final : public HBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
private:
- HCompareGeneric(HValue* context,
- HValue* left,
- HValue* right,
- Token::Value token,
- LanguageMode language_mode)
- : HBinaryOperation(context, left, right, language_mode, HType::Boolean()),
+ HCompareGeneric(HValue* context, HValue* left, HValue* right,
+ Token::Value token, Strength strength)
+ : HBinaryOperation(context, left, right, strength, HType::Boolean()),
token_(token) {
DCHECK(Token::IsCompareOp(token));
set_representation(Representation::Tagged());
@@ -4283,11 +4294,22 @@ class HCompareGeneric final : public HBinaryOperation {
class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- DECLARE_INSTRUCTION_FACTORY_P3(HCompareNumericAndBranch,
- HValue*, HValue*, Token::Value);
- DECLARE_INSTRUCTION_FACTORY_P5(HCompareNumericAndBranch,
- HValue*, HValue*, Token::Value,
- HBasicBlock*, HBasicBlock*);
+ static HCompareNumericAndBranch* New(Isolate* isolate, Zone* zone,
+ HValue* context, HValue* left,
+ HValue* right, Token::Value token,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL,
+ Strength strength = Strength::WEAK) {
+ return new (zone) HCompareNumericAndBranch(left, right, token, true_target,
+ false_target, strength);
+ }
+ static HCompareNumericAndBranch* New(Isolate* isolate, Zone* zone,
+ HValue* context, HValue* left,
+ HValue* right, Token::Value token,
+ Strength strength) {
+ return new (zone)
+ HCompareNumericAndBranch(left, right, token, NULL, NULL, strength);
+ }
HValue* left() const { return OperandAt(0); }
HValue* right() const { return OperandAt(1); }
@@ -4310,6 +4332,8 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
bool KnownSuccessorBlock(HBasicBlock** block) override;
+ Strength strength() const { return strength_; }
+
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
void SetOperandPositions(Zone* zone, SourcePosition left_pos,
@@ -4321,12 +4345,10 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
private:
- HCompareNumericAndBranch(HValue* left,
- HValue* right,
- Token::Value token,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : token_(token) {
+ HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token,
+ HBasicBlock* true_target, HBasicBlock* false_target,
+ Strength strength)
+ : token_(token), strength_(strength) {
SetFlag(kFlexibleRepresentation);
DCHECK(Token::IsCompareOp(token));
SetOperandAt(0, left);
@@ -4337,6 +4359,7 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
Representation observed_input_representation_[2];
Token::Value token_;
+ Strength strength_;
};
@@ -4729,7 +4752,8 @@ class HInstanceOf final : public HBinaryOperation {
private:
HInstanceOf(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right, SLOPPY, HType::Boolean()) {
+ : HBinaryOperation(context, left, right, Strength::WEAK,
+ HType::Boolean()) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
@@ -4808,7 +4832,7 @@ class HAdd final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY);
+ Strength strength = Strength::WEAK);
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
@@ -4859,8 +4883,8 @@ class HAdd final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HAdd(HValue* context, HValue* left, HValue* right, LanguageMode language_mode)
- : HArithmeticBinaryOperation(context, left, right, language_mode) {
+ HAdd(HValue* context, HValue* left, HValue* right, Strength strength)
+ : HArithmeticBinaryOperation(context, left, right, strength) {
SetFlag(kCanOverflow);
}
};
@@ -4870,7 +4894,7 @@ class HSub final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY);
+ Strength strength = Strength::WEAK);
HValue* Canonicalize() override;
@@ -4891,8 +4915,8 @@ class HSub final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HSub(HValue* context, HValue* left, HValue* right, LanguageMode language_mode)
- : HArithmeticBinaryOperation(context, left, right, language_mode) {
+ HSub(HValue* context, HValue* left, HValue* right, Strength strength)
+ : HArithmeticBinaryOperation(context, left, right, strength) {
SetFlag(kCanOverflow);
}
};
@@ -4902,13 +4926,13 @@ class HMul final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY);
+ Strength strength = Strength::WEAK);
static HInstruction* NewImul(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY) {
- HInstruction* instr = HMul::New(isolate, zone, context, left, right,
- language_mode);
+ Strength strength = Strength::WEAK) {
+ HInstruction* instr =
+ HMul::New(isolate, zone, context, left, right, strength);
if (!instr->IsMul()) return instr;
HMul* mul = HMul::cast(instr);
// TODO(mstarzinger): Prevent bailout on minus zero for imul.
@@ -4938,8 +4962,8 @@ class HMul final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HMul(HValue* context, HValue* left, HValue* right, LanguageMode language_mode)
- : HArithmeticBinaryOperation(context, left, right, language_mode) {
+ HMul(HValue* context, HValue* left, HValue* right, Strength strength)
+ : HArithmeticBinaryOperation(context, left, right, strength) {
SetFlag(kCanOverflow);
}
};
@@ -4949,7 +4973,7 @@ class HMod final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY);
+ Strength strength = Strength::WEAK);
HValue* Canonicalize() override;
@@ -4968,12 +4992,8 @@ class HMod final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HMod(HValue* context,
- HValue* left,
- HValue* right,
- LanguageMode language_mode) : HArithmeticBinaryOperation(context, left,
- right,
- language_mode) {
+ HMod(HValue* context, HValue* left, HValue* right, Strength strength)
+ : HArithmeticBinaryOperation(context, left, right, strength) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
SetFlag(kLeftCanBeNegative);
@@ -4985,7 +5005,7 @@ class HDiv final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY);
+ Strength strength = Strength::WEAK);
HValue* Canonicalize() override;
@@ -5004,8 +5024,8 @@ class HDiv final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HDiv(HValue* context, HValue* left, HValue* right, LanguageMode language_mode)
- : HArithmeticBinaryOperation(context, left, right, language_mode) {
+ HDiv(HValue* context, HValue* left, HValue* right, Strength strength)
+ : HArithmeticBinaryOperation(context, left, right, strength) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
}
@@ -5051,8 +5071,8 @@ class HMathMinMax final : public HArithmeticBinaryOperation {
private:
HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
- : HArithmeticBinaryOperation(context, left, right, SLOPPY),
- operation_(op) { }
+ : HArithmeticBinaryOperation(context, left, right, Strength::WEAK),
+ operation_(op) {}
Operation operation_;
};
@@ -5062,7 +5082,7 @@ class HBitwise final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
Token::Value op, HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY);
+ Strength strength = Strength::WEAK);
Token::Value op() const { return op_; }
@@ -5082,13 +5102,9 @@ class HBitwise final : public HBitwiseBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HBitwise(HValue* context,
- Token::Value op,
- HValue* left,
- HValue* right,
- LanguageMode language_mode)
- : HBitwiseBinaryOperation(context, left, right, language_mode),
- op_(op) {
+ HBitwise(HValue* context, Token::Value op, HValue* left, HValue* right,
+ Strength strength)
+ : HBitwiseBinaryOperation(context, left, right, strength), op_(op) {
DCHECK(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
// BIT_AND with a smi-range positive value will always unset the
// entire sign-extension of the smi-sign.
@@ -5123,7 +5139,7 @@ class HShl final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY);
+ Strength strength = Strength::WEAK);
Range* InferRange(Zone* zone) override;
@@ -5144,8 +5160,8 @@ class HShl final : public HBitwiseBinaryOperation {
bool DataEquals(HValue* other) override { return true; }
private:
- HShl(HValue* context, HValue* left, HValue* right, LanguageMode language_mode)
- : HBitwiseBinaryOperation(context, left, right, language_mode) { }
+ HShl(HValue* context, HValue* left, HValue* right, Strength strength)
+ : HBitwiseBinaryOperation(context, left, right, strength) {}
};
@@ -5153,7 +5169,7 @@ class HShr final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY);
+ Strength strength = Strength::WEAK);
bool TryDecompose(DecompositionResult* decomposition) override {
if (right()->IsInteger32Constant()) {
@@ -5182,8 +5198,8 @@ class HShr final : public HBitwiseBinaryOperation {
bool DataEquals(HValue* other) override { return true; }
private:
- HShr(HValue* context, HValue* left, HValue* right, LanguageMode language_mode)
- : HBitwiseBinaryOperation(context, left, right, language_mode) { }
+ HShr(HValue* context, HValue* left, HValue* right, Strength strength)
+ : HBitwiseBinaryOperation(context, left, right, strength) {}
};
@@ -5191,7 +5207,7 @@ class HSar final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY);
+ Strength strength = Strength::WEAK);
bool TryDecompose(DecompositionResult* decomposition) override {
if (right()->IsInteger32Constant()) {
@@ -5220,8 +5236,8 @@ class HSar final : public HBitwiseBinaryOperation {
bool DataEquals(HValue* other) override { return true; }
private:
- HSar(HValue* context, HValue* left, HValue* right, LanguageMode language_mode)
- : HBitwiseBinaryOperation(context, left, right, language_mode) { }
+ HSar(HValue* context, HValue* left, HValue* right, Strength strength)
+ : HBitwiseBinaryOperation(context, left, right, strength) {}
};
@@ -5229,8 +5245,8 @@ class HRor final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
- LanguageMode language_mode = SLOPPY) {
- return new(zone) HRor(context, left, right, language_mode);
+ Strength strength = Strength::WEAK) {
+ return new (zone) HRor(context, left, right, strength);
}
virtual void UpdateRepresentation(Representation new_rep,
@@ -5246,8 +5262,8 @@ class HRor final : public HBitwiseBinaryOperation {
bool DataEquals(HValue* other) override { return true; }
private:
- HRor(HValue* context, HValue* left, HValue* right, LanguageMode language_mode)
- : HBitwiseBinaryOperation(context, left, right, language_mode) {
+ HRor(HValue* context, HValue* left, HValue* right, Strength strength)
+ : HBitwiseBinaryOperation(context, left, right, strength) {
ChangeRepresentation(Representation::Integer32());
}
};
@@ -5346,44 +5362,6 @@ class HCallStub final : public HUnaryCall {
};
-class HTailCallThroughMegamorphicCache final : public HInstruction {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HTailCallThroughMegamorphicCache,
- HValue*, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- virtual int OperandCount() const final override { return 3; }
- virtual HValue* OperandAt(int i) const final override { return inputs_[i]; }
-
- HValue* context() const { return OperandAt(0); }
- HValue* receiver() const { return OperandAt(1); }
- HValue* name() const { return OperandAt(2); }
- Code::Flags flags() const;
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache)
-
- protected:
- virtual void InternalSetOperandAt(int i, HValue* value) final override {
- inputs_[i] = value;
- }
-
- private:
- HTailCallThroughMegamorphicCache(HValue* context, HValue* receiver,
- HValue* name) {
- SetOperandAt(0, context);
- SetOperandAt(1, receiver);
- SetOperandAt(2, name);
- }
-
- EmbeddedContainer<HValue*, 3> inputs_;
-};
-
-
class HUnknownOSRValue final : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
@@ -5433,10 +5411,9 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
- bool HasVectorAndSlot() const { return FLAG_vector_ics; }
+ bool HasVectorAndSlot() const { return true; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_ics);
feedback_vector_ = vector;
slot_ = slot;
}
@@ -5968,6 +5945,11 @@ class HObjectAccess final {
Representation::Smi());
}
+ static HObjectAccess ForFixedTypedArrayBaseBasePointer() {
+ return HObjectAccess(kInobject, FixedTypedArrayBase::kBasePointerOffset,
+ Representation::Tagged());
+ }
+
static HObjectAccess ForStringHashField() {
return HObjectAccess(kInobject,
String::kHashFieldOffset,
@@ -6015,6 +5997,11 @@ class HObjectAccess final {
SharedFunctionInfo::kOptimizedCodeMapOffset);
}
+ static HObjectAccess ForOptimizedCodeMapSharedCode() {
+ return HObjectAccess(kInobject, FixedArray::OffsetOfElementAt(
+ SharedFunctionInfo::kSharedCodeIndex));
+ }
+
static HObjectAccess ForFunctionContextPointer() {
return HObjectAccess(kInobject, JSFunction::kContextOffset);
}
@@ -6427,12 +6414,13 @@ class HLoadNamedField final : public HTemplateInstruction<2> {
class HLoadNamedGeneric final : public HTemplateInstruction<2> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadNamedGeneric, HValue*,
- Handle<Object>, InlineCacheState);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadNamedGeneric, HValue*,
+ Handle<Name>, LanguageMode,
+ InlineCacheState);
HValue* context() const { return OperandAt(0); }
HValue* object() const { return OperandAt(1); }
- Handle<Object> name() const { return name_; }
+ Handle<Name> name() const { return name_; }
InlineCacheState initialization_state() const {
return initialization_state_;
@@ -6441,10 +6429,9 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
- bool HasVectorAndSlot() const { return FLAG_vector_ics; }
+ bool HasVectorAndSlot() const { return true; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_ics);
feedback_vector_ = vector;
slot_ = slot;
}
@@ -6457,11 +6444,15 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
+ LanguageMode language_mode() const { return language_mode_; }
+
private:
- HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name,
+ HLoadNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
+ LanguageMode language_mode,
InlineCacheState initialization_state)
: name_(name),
slot_(FeedbackVectorICSlot::Invalid()),
+ language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, context);
SetOperandAt(1, object);
@@ -6469,9 +6460,10 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
SetAllSideEffects();
}
- Handle<Object> name_;
+ Handle<Name> name_;
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorICSlot slot_;
+ LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
@@ -6711,8 +6703,9 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadKeyedGeneric, HValue*,
- HValue*, InlineCacheState);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadKeyedGeneric, HValue*,
+ HValue*, LanguageMode,
+ InlineCacheState);
HValue* object() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* context() const { return OperandAt(2); }
@@ -6724,13 +6717,11 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
return feedback_vector_;
}
bool HasVectorAndSlot() const {
- DCHECK(!FLAG_vector_ics || initialization_state_ == MEGAMORPHIC ||
- !feedback_vector_.is_null());
+ DCHECK(initialization_state_ == MEGAMORPHIC || !feedback_vector_.is_null());
return !feedback_vector_.is_null();
}
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_ics);
feedback_vector_ = vector;
slot_ = slot;
}
@@ -6746,11 +6737,15 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
+ LanguageMode language_mode() const { return language_mode_; }
+
private:
HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
+ LanguageMode language_mode,
InlineCacheState initialization_state)
: slot_(FeedbackVectorICSlot::Invalid()),
- initialization_state_(initialization_state) {
+ initialization_state_(initialization_state),
+ language_mode_(language_mode) {
set_representation(Representation::Tagged());
SetOperandAt(0, obj);
SetOperandAt(1, key);
@@ -6761,6 +6756,7 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorICSlot slot_;
InlineCacheState initialization_state_;
+ LanguageMode language_mode_;
};
@@ -6836,7 +6832,7 @@ class HStoreNamedField final : public HTemplateInstruction<3> {
Handle<Map> transition_map() const {
if (has_transition()) {
return Handle<Map>::cast(
- HConstant::cast(transition())->handle(Isolate::Current()));
+ HConstant::cast(transition())->handle(isolate()));
} else {
return Handle<Map>();
}
@@ -6926,12 +6922,12 @@ class HStoreNamedField final : public HTemplateInstruction<3> {
class HStoreNamedGeneric final : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HStoreNamedGeneric, HValue*,
- Handle<String>, HValue*,
+ Handle<Name>, HValue*,
LanguageMode, InlineCacheState);
HValue* object() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
HValue* context() const { return OperandAt(2); }
- Handle<String> name() const { return name_; }
+ Handle<Name> name() const { return name_; }
LanguageMode language_mode() const { return language_mode_; }
InlineCacheState initialization_state() const {
return initialization_state_;
@@ -6943,13 +6939,25 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
return Representation::Tagged();
}
+ FeedbackVectorICSlot slot() const { return slot_; }
+ Handle<TypeFeedbackVector> feedback_vector() const {
+ return feedback_vector_;
+ }
+ bool HasVectorAndSlot() const { return FLAG_vector_stores; }
+ void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
+ FeedbackVectorICSlot slot) {
+ feedback_vector_ = vector;
+ slot_ = slot;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
private:
- HStoreNamedGeneric(HValue* context, HValue* object, Handle<String> name,
+ HStoreNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
HValue* value, LanguageMode language_mode,
InlineCacheState initialization_state)
: name_(name),
+ slot_(FeedbackVectorICSlot::Invalid()),
language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, object);
@@ -6958,7 +6966,9 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
SetAllSideEffects();
}
- Handle<String> name_;
+ Handle<Name> name_;
+ Handle<TypeFeedbackVector> feedback_vector_;
+ FeedbackVectorICSlot slot_;
LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
@@ -7168,6 +7178,21 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
return Representation::Tagged();
}
+ FeedbackVectorICSlot slot() const { return slot_; }
+ Handle<TypeFeedbackVector> feedback_vector() const {
+ return feedback_vector_;
+ }
+ bool HasVectorAndSlot() const {
+ DCHECK(!(FLAG_vector_stores && initialization_state_ != MEGAMORPHIC) ||
+ !feedback_vector_.is_null());
+ return !feedback_vector_.is_null();
+ }
+ void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
+ FeedbackVectorICSlot slot) {
+ feedback_vector_ = vector;
+ slot_ = slot;
+ }
+
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
@@ -7176,7 +7201,8 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
HValue* value, LanguageMode language_mode,
InlineCacheState initialization_state)
- : language_mode_(language_mode),
+ : slot_(FeedbackVectorICSlot::Invalid()),
+ language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, object);
SetOperandAt(1, key);
@@ -7185,6 +7211,8 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
SetAllSideEffects();
}
+ Handle<TypeFeedbackVector> feedback_vector_;
+ FeedbackVectorICSlot slot_;
LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
@@ -7264,7 +7292,7 @@ class HStringAdd final : public HBinaryOperation {
public:
static HInstruction* New(
Isolate* isolate, Zone* zone, HValue* context, HValue* left,
- HValue* right, LanguageMode language_mode = SLOPPY,
+ HValue* right, Strength strength = Strength::WEAK,
PretenureFlag pretenure_flag = NOT_TENURED,
StringAddFlags flags = STRING_ADD_CHECK_BOTH,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
@@ -7287,15 +7315,12 @@ class HStringAdd final : public HBinaryOperation {
}
private:
- HStringAdd(HValue* context,
- HValue* left,
- HValue* right,
- LanguageMode language_mode,
- PretenureFlag pretenure_flag,
- StringAddFlags flags,
+ HStringAdd(HValue* context, HValue* left, HValue* right, Strength strength,
+ PretenureFlag pretenure_flag, StringAddFlags flags,
Handle<AllocationSite> allocation_site)
- : HBinaryOperation(context, left, right, language_mode, HType::String()),
- flags_(flags), pretenure_flag_(pretenure_flag) {
+ : HBinaryOperation(context, left, right, strength, HType::String()),
+ flags_(flags),
+ pretenure_flag_(pretenure_flag) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetDependsOnFlag(kMaps);
@@ -7558,6 +7583,58 @@ class HTrapAllocationMemento final : public HTemplateInstruction<1> {
};
+class HMaybeGrowElements final : public HTemplateInstruction<5> {
+ public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HMaybeGrowElements, HValue*,
+ HValue*, HValue*, HValue*, bool,
+ ElementsKind);
+
+ Representation RequiredInputRepresentation(int index) override {
+ if (index < 3) {
+ return Representation::Tagged();
+ }
+ DCHECK(index == 3 || index == 4);
+ return Representation::Integer32();
+ }
+
+ HValue* context() const { return OperandAt(0); }
+ HValue* object() const { return OperandAt(1); }
+ HValue* elements() const { return OperandAt(2); }
+ HValue* key() const { return OperandAt(3); }
+ HValue* current_capacity() const { return OperandAt(4); }
+
+ bool is_js_array() const { return is_js_array_; }
+ ElementsKind kind() const { return kind_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements)
+
+ protected:
+ bool DataEquals(HValue* other) override { return true; }
+
+ private:
+ explicit HMaybeGrowElements(HValue* context, HValue* object, HValue* elements,
+ HValue* key, HValue* current_capacity,
+ bool is_js_array, ElementsKind kind) {
+ is_js_array_ = is_js_array;
+ kind_ = kind;
+
+ SetOperandAt(0, context);
+ SetOperandAt(1, object);
+ SetOperandAt(2, elements);
+ SetOperandAt(3, key);
+ SetOperandAt(4, current_capacity);
+
+ SetFlag(kUseGVN);
+ SetChangesFlag(kElementsPointer);
+ SetChangesFlag(kNewSpacePromotion);
+ set_representation(Representation::Tagged());
+ }
+
+ bool is_js_array_;
+ ElementsKind kind_;
+};
+
+
class HToFastProperties final : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
diff --git a/deps/v8/src/hydrogen-load-elimination.cc b/deps/v8/src/hydrogen-load-elimination.cc
index c5fd88b396..a4536fd750 100644
--- a/deps/v8/src/hydrogen-load-elimination.cc
+++ b/deps/v8/src/hydrogen-load-elimination.cc
@@ -508,4 +508,5 @@ void HLoadEliminationPhase::Run() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-mark-deoptimize.cc b/deps/v8/src/hydrogen-mark-deoptimize.cc
index 47642e45cd..fe7a88614c 100644
--- a/deps/v8/src/hydrogen-mark-deoptimize.cc
+++ b/deps/v8/src/hydrogen-mark-deoptimize.cc
@@ -58,4 +58,5 @@ void HComputeChangeUndefinedToNaN::Run() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-mark-unreachable.cc b/deps/v8/src/hydrogen-mark-unreachable.cc
index 05779ca524..affe7ce205 100644
--- a/deps/v8/src/hydrogen-mark-unreachable.cc
+++ b/deps/v8/src/hydrogen-mark-unreachable.cc
@@ -51,4 +51,5 @@ void HMarkUnreachableBlocksPhase::Run() {
MarkUnreachableBlocks();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-osr.cc b/deps/v8/src/hydrogen-osr.cc
index 89c28acdab..8a4780c3d7 100644
--- a/deps/v8/src/hydrogen-osr.cc
+++ b/deps/v8/src/hydrogen-osr.cc
@@ -99,4 +99,5 @@ void HOsrBuilder::FinishOsrValues() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-range-analysis.cc b/deps/v8/src/hydrogen-range-analysis.cc
index f5c5a9f1be..c81dc1d365 100644
--- a/deps/v8/src/hydrogen-range-analysis.cc
+++ b/deps/v8/src/hydrogen-range-analysis.cc
@@ -287,4 +287,5 @@ void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-redundant-phi.cc b/deps/v8/src/hydrogen-redundant-phi.cc
index 0b9b0aaf1d..1b3c94a3db 100644
--- a/deps/v8/src/hydrogen-redundant-phi.cc
+++ b/deps/v8/src/hydrogen-redundant-phi.cc
@@ -62,4 +62,5 @@ void HRedundantPhiEliminationPhase::ProcessPhis(const ZoneList<HPhi*>* phis) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-removable-simulates.cc b/deps/v8/src/hydrogen-removable-simulates.cc
index 73d7a8e058..eb13cb28bd 100644
--- a/deps/v8/src/hydrogen-removable-simulates.cc
+++ b/deps/v8/src/hydrogen-removable-simulates.cc
@@ -184,4 +184,5 @@ void HMergeRemovableSimulatesPhase::Run() {
engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), state);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
index 33adf5aa9a..4af4e01a5b 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -210,4 +210,5 @@ void HRepresentationChangesPhase::Run() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-sce.cc b/deps/v8/src/hydrogen-sce.cc
index b7ab9fd7db..6944f7090e 100644
--- a/deps/v8/src/hydrogen-sce.cc
+++ b/deps/v8/src/hydrogen-sce.cc
@@ -36,4 +36,5 @@ void HStackCheckEliminationPhase::Run() {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-store-elimination.cc b/deps/v8/src/hydrogen-store-elimination.cc
index ee718e6407..f04ec44e44 100644
--- a/deps/v8/src/hydrogen-store-elimination.cc
+++ b/deps/v8/src/hydrogen-store-elimination.cc
@@ -118,4 +118,5 @@ void HStoreEliminationPhase::ProcessInstr(HInstruction* instr,
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-types.cc b/deps/v8/src/hydrogen-types.cc
index 8813b1fb06..7c50a1d887 100644
--- a/deps/v8/src/hydrogen-types.cc
+++ b/deps/v8/src/hydrogen-types.cc
@@ -70,4 +70,5 @@ std::ostream& operator<<(std::ostream& os, const HType& t) {
return os;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen-uint32-analysis.cc b/deps/v8/src/hydrogen-uint32-analysis.cc
index 8507584863..37f19ebdaf 100644
--- a/deps/v8/src/hydrogen-uint32-analysis.cc
+++ b/deps/v8/src/hydrogen-uint32-analysis.cc
@@ -237,4 +237,5 @@ void HUint32AnalysisPhase::Run() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 455af79bb6..2a0e2c3919 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -683,6 +683,11 @@ HConstant* HGraph::GetConstantMinus1() {
}
+HConstant* HGraph::GetConstantBool(bool value) {
+ return value ? GetConstantTrue() : GetConstantFalse();
+}
+
+
#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value) \
HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
@@ -1331,38 +1336,23 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(
HValue* current_capacity = AddLoadFixedArrayLength(elements);
- IfBuilder capacity_checker(this);
-
- capacity_checker.If<HCompareNumericAndBranch>(key, current_capacity,
- Token::GTE);
- capacity_checker.Then();
-
- // BuildCheckAndGrowElementsCapacity could de-opt without profitable feedback
- // therefore we defer calling it to a stub in optimized functions. It is
- // okay to call directly in a code stub though, because a bailout to the
- // runtime is tolerable in the corner cases.
if (top_info()->IsStub()) {
+ IfBuilder capacity_checker(this);
+ capacity_checker.If<HCompareNumericAndBranch>(key, current_capacity,
+ Token::GTE);
+ capacity_checker.Then();
HValue* new_elements = BuildCheckAndGrowElementsCapacity(
object, elements, kind, length, current_capacity, key);
environment()->Push(new_elements);
+ capacity_checker.Else();
+ environment()->Push(elements);
+ capacity_checker.End();
} else {
- GrowArrayElementsStub stub(isolate(), is_js_array, kind);
- GrowArrayElementsDescriptor descriptor(isolate());
- HConstant* target = Add<HConstant>(stub.GetCode());
- HValue* op_vals[] = {context(), object, key, current_capacity};
- HValue* new_elements = Add<HCallWithDescriptor>(
- target, 0, descriptor, Vector<HValue*>(op_vals, 4));
- // If the object changed to a dictionary, GrowArrayElements will return a
- // smi to signal that deopt is required.
- Add<HCheckHeapObject>(new_elements);
- environment()->Push(new_elements);
+ HValue* result = Add<HMaybeGrowElements>(
+ object, elements, key, current_capacity, is_js_array, kind);
+ environment()->Push(result);
}
- capacity_checker.Else();
-
- environment()->Push(elements);
- capacity_checker.End();
-
if (is_js_array) {
HValue* new_length = AddUncasted<HAdd>(key, graph_->GetConstant1());
new_length->ClearFlag(HValue::kCanOverflow);
@@ -1682,10 +1672,9 @@ HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
}
-HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
- HValue* elements,
- HValue* key,
- HValue* hash) {
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
+ HValue* receiver, HValue* elements, HValue* key, HValue* hash,
+ LanguageMode language_mode) {
HValue* capacity =
Add<HLoadKeyed>(elements, Add<HConstant>(NameDictionary::kCapacityIndex),
nullptr, FAST_ELEMENTS);
@@ -1728,9 +1717,12 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
// element == undefined means "not found". Call the runtime.
// TODO(jkummerow): walk the prototype chain instead.
Add<HPushArguments>(receiver, key);
- Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kKeyedGetProperty),
- 2));
+ Push(Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(is_strong(language_mode)
+ ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty),
+ 2));
}
if_undefined.Else();
{
@@ -1788,9 +1780,12 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
Push(Add<HLoadKeyed>(elements, result_index, nullptr, FAST_ELEMENTS));
details_compare.Else();
Add<HPushArguments>(receiver, key);
- Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kKeyedGetProperty),
- 2));
+ Push(Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(is_strong(language_mode)
+ ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty),
+ 2));
details_compare.End();
found_key_match.Else();
@@ -3505,8 +3500,8 @@ HGraph::HGraph(CompilationInfo* info)
if (info->IsStub()) {
CallInterfaceDescriptor descriptor =
info->code_stub()->GetCallInterfaceDescriptor();
- start_environment_ = new (zone_)
- HEnvironment(zone_, descriptor.GetEnvironmentParameterCount());
+ start_environment_ =
+ new (zone_) HEnvironment(zone_, descriptor.GetRegisterParameterCount());
} else {
if (info->is_tracking_positions()) {
info->TraceInlinedFunction(info->shared_info(), SourcePosition::Unknown(),
@@ -4528,6 +4523,11 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
if (rest) {
return Bailout(kRestParameter);
}
+
+ if (scope->this_function_var() != nullptr ||
+ scope->new_target_var() != nullptr) {
+ return Bailout(kSuperReference);
+ }
}
@@ -5216,14 +5216,19 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
Add<HCheckMapValue>(enumerable, environment()->ExpressionStackAt(3));
Bind(each_var, key);
} else {
- HValue* function = AddLoadJSBuiltin(Builtins::FILTER_KEY);
Add<HPushArguments>(enumerable, key);
- key = Add<HInvokeFunction>(function, 2);
+ Runtime::FunctionId function_id = Runtime::kForInFilter;
+ key = Add<HCallRuntime>(isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(function_id), 2);
Push(key);
Add<HSimulate>(stmt->FilterId());
key = Pop();
Bind(each_var, key);
- Add<HCheckHeapObject>(key);
+ IfBuilder if_undefined(this);
+ if_undefined.If<HCompareObjectEqAndBranch>(key,
+ graph()->GetConstantUndefined());
+ if_undefined.ThenDeopt(Deoptimizer::kUndefined);
+ if_undefined.End();
Add<HSimulate>(stmt->AssignmentId());
}
@@ -5296,11 +5301,8 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- Handle<SharedFunctionInfo> shared_info = expr->shared_info();
- if (shared_info.is_null()) {
- shared_info =
- Compiler::BuildFunctionInfo(expr, current_info()->script(), top_info());
- }
+ Handle<SharedFunctionInfo> shared_info = Compiler::GetSharedFunctionInfo(
+ expr, current_info()->script(), top_info());
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
HFunctionLiteral* instr =
@@ -5409,7 +5411,8 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
DCHECK(current_block()->HasPredecessor());
Variable* variable = expr->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
if (IsLexicalVariableMode(variable->mode())) {
// TODO(rossberg): should this be an DCHECK?
return Bailout(kReferenceToGlobalLexicalVariable);
@@ -5509,16 +5512,14 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
New<HLoadGlobalGeneric>(global_object,
variable->name(),
ast_context()->is_for_typeof());
- if (FLAG_vector_ics) {
- instr->SetVectorAndSlot(handle(current_feedback_vector(), isolate()),
- expr->VariableFeedbackSlot());
- }
+ instr->SetVectorAndSlot(handle(current_feedback_vector(), isolate()),
+ expr->VariableFeedbackSlot());
return ast_context()->ReturnInstruction(instr, expr->id());
}
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
HValue* value = LookupAndMakeLive(variable);
if (value == graph()->GetConstantHole()) {
DCHECK(IsDeclaredVariableMode(variable->mode()) &&
@@ -5528,7 +5529,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnValue(value);
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
HValue* context = BuildContextChainWalk(variable);
HLoadContextSlot::Mode mode;
switch (variable->mode()) {
@@ -5548,7 +5549,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
}
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
return Bailout(kReferenceToAVariableWhichRequiresDynamicLookup);
}
}
@@ -5659,7 +5660,6 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- expr->BuildConstantProperties(isolate());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
HInstruction* literal;
@@ -5704,8 +5704,6 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// of the property values and is the value of the entire expression.
Push(literal);
- expr->CalculateEmitStore(zone());
-
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->is_computed_name()) return Bailout(kComputedPropertyName);
@@ -5810,7 +5808,8 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate(), raw_boilerplate,
Runtime::CreateArrayLiteralBoilerplate(
- isolate(), literals, expr->constant_elements()),
+ isolate(), literals, expr->constant_elements(),
+ is_strong(function_language_mode())),
Bailout(kArrayBoilerplateCreationFailed));
boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
@@ -5880,6 +5879,10 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
+ if (subexpr->IsSpread()) {
+ return Bailout(kSpread);
+ }
+
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -5935,7 +5938,7 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
if (object->IsJSObject()) {
LookupIterator it(object, info->name(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
- Handle<Object> value = JSObject::GetDataProperty(&it);
+ Handle<Object> value = JSReceiver::GetDataProperty(&it);
if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
return New<HConstant>(value);
}
@@ -6256,7 +6259,6 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
if (!CanAccessMonomorphic()) return false;
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
if (maps->length() > kMaxLoadPolymorphism) return false;
-
HObjectAccess access = HObjectAccess::ForMap(); // bogus default
if (GetJSObjectFieldAccess(&access)) {
for (int i = 1; i < maps->length(); ++i) {
@@ -6267,7 +6269,6 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
}
return true;
}
-
if (GetJSArrayBufferViewFieldAccess(&access)) {
for (int i = 1; i < maps->length(); ++i) {
PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
@@ -6349,7 +6350,14 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
if (!info->IsFound()) {
DCHECK(info->IsLoad());
- return graph()->GetConstantUndefined();
+ if (is_strong(function_language_mode())) {
+ return New<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
+ 0);
+ } else {
+ return graph()->GetConstantUndefined();
+ }
}
if (info->IsData()) {
@@ -6752,14 +6760,15 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(operation));
switch (var->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
HandleGlobalVariableAssignment(var,
Top(),
expr->AssignmentId());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (var->mode() == CONST_LEGACY) {
return Bailout(kUnsupportedConstCompoundAssignment);
}
@@ -6769,7 +6778,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
BindIfLive(var, Top());
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
@@ -6808,7 +6817,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
break;
}
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
return Bailout(kCompoundAssignmentToLookupSlot);
}
return ast_context()->ReturnValue(Pop());
@@ -6878,15 +6887,16 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
// Handle the assignment.
switch (var->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
CHECK_ALIVE(VisitForValue(expr->value()));
HandleGlobalVariableAssignment(var,
Top(),
expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
// Perform an initialization check for let declared variables
// or parameters.
if (var->mode() == LET && expr->op() == Token::ASSIGN) {
@@ -6904,7 +6914,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
return ast_context()->ReturnValue(value);
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
@@ -6954,7 +6964,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
return ast_context()->ReturnValue(Pop());
}
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
return Bailout(kAssignmentToLOOKUPVariable);
}
} else {
@@ -7036,29 +7046,26 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
Deoptimizer::SOFT);
}
if (access_type == LOAD) {
- if (FLAG_vector_ics) {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
-
- if (!expr->AsProperty()->key()->IsPropertyName()) {
- // It's possible that a keyed load of a constant string was converted
- // to a named load. Here, at the last minute, we need to make sure to
- // use a generic Keyed Load if we are using the type vector, because
- // it has to share information with full code.
- HConstant* key = Add<HConstant>(name);
- HLoadKeyedGeneric* result =
- New<HLoadKeyedGeneric>(object, key, PREMONOMORPHIC);
- result->SetVectorAndSlot(vector, slot);
- return result;
- }
-
- HLoadNamedGeneric* result =
- New<HLoadNamedGeneric>(object, name, PREMONOMORPHIC);
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
+
+ if (!expr->AsProperty()->key()->IsPropertyName()) {
+ // It's possible that a keyed load of a constant string was converted
+ // to a named load. Here, at the last minute, we need to make sure to
+ // use a generic Keyed Load if we are using the type vector, because
+ // it has to share information with full code.
+ HConstant* key = Add<HConstant>(name);
+ HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(
+ object, key, function_language_mode(), PREMONOMORPHIC);
result->SetVectorAndSlot(vector, slot);
return result;
}
- return New<HLoadNamedGeneric>(object, name, PREMONOMORPHIC);
+
+ HLoadNamedGeneric* result = New<HLoadNamedGeneric>(
+ object, name, function_language_mode(), PREMONOMORPHIC);
+ result->SetVectorAndSlot(vector, slot);
+ return result;
} else {
return New<HStoreNamedGeneric>(object, name, value,
function_language_mode(), PREMONOMORPHIC);
@@ -7074,14 +7081,12 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
HValue* key,
HValue* value) {
if (access_type == LOAD) {
- InlineCacheState initial_state =
- FLAG_vector_ics ? expr->AsProperty()->GetInlineCacheState()
- : PREMONOMORPHIC;
- HLoadKeyedGeneric* result =
- New<HLoadKeyedGeneric>(object, key, initial_state);
+ InlineCacheState initial_state = expr->AsProperty()->GetInlineCacheState();
+ HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(
+ object, key, function_language_mode(), initial_state);
// HLoadKeyedGeneric with vector ics benefits from being encoded as
// MEGAMORPHIC because the vector/slot combo becomes unnecessary.
- if (FLAG_vector_ics && initial_state != MEGAMORPHIC) {
+ if (initial_state != MEGAMORPHIC) {
// We need to pass vector information.
Handle<TypeFeedbackVector> vector =
handle(current_feedback_vector(), isolate());
@@ -7155,7 +7160,8 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
static bool CanInlineElementAccess(Handle<Map> map) {
- return map->IsJSObjectMap() && !map->has_slow_elements_kind() &&
+ return map->IsJSObjectMap() && !map->has_dictionary_elements() &&
+ !map->has_sloppy_arguments_elements() &&
!map->has_indexed_interceptor() && !map->is_access_check_needed();
}
@@ -7261,7 +7267,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
elements_kind != GetInitialFastElementsKind()) {
possible_transitioned_maps.Add(map);
}
- if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
+ if (IsSloppyArgumentsElements(elements_kind)) {
HInstruction* result = BuildKeyedGeneric(access_type, expr, object, key,
val);
*has_side_effects = result->HasObservableSideEffects();
@@ -7272,7 +7278,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
Handle<Map> transitioned_map =
- map->FindTransitionedMap(&possible_transitioned_maps);
+ Map::FindTransitionedMap(map, &possible_transitioned_maps);
transition_target.Add(transitioned_map);
}
@@ -7705,9 +7711,8 @@ HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
isolate()->builtins()->ArgumentsAdaptorTrampoline();
HConstant* adaptor_value = Add<HConstant>(adaptor);
- return New<HCallWithDescriptor>(
- adaptor_value, argument_count, descriptor,
- Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
+ return New<HCallWithDescriptor>(adaptor_value, argument_count, descriptor,
+ Vector<HValue*>(op_vals, arraysize(op_vals)));
}
@@ -7975,9 +7980,12 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
Handle<JSFunction> caller = current_info()->closure();
Handle<SharedFunctionInfo> target_shared(target->shared());
- // Always inline builtins marked for inlining.
+ // Always inline functions that force inlining.
+ if (target_shared->force_inline()) {
+ return 0;
+ }
if (target->IsBuiltin()) {
- return target_shared->inline_builtin() ? 0 : kNotInlinable;
+ return kNotInlinable;
}
if (target_shared->IsApiFunction()) {
@@ -8124,6 +8132,15 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
return false;
}
+ // In strong mode it is an error to call a function with too few arguments.
+ // In that case do not inline because then the arity check would be skipped.
+ if (is_strong(function->language_mode()) &&
+ arguments_count < function->parameter_count()) {
+ TraceInline(target, caller,
+ "too few arguments passed to a strong function");
+ return false;
+ }
+
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function (so
// TryInline should always return true).
@@ -8951,19 +8968,17 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
ApiAccessorDescriptor descriptor(isolate());
- DCHECK(arraysize(op_vals) - 1 == descriptor.GetEnvironmentLength());
call = New<HCallWithDescriptor>(
code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
+ Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
} else if (argc <= CallApiFunctionWithFixedArgsStub::kMaxFixedArgs) {
CallApiFunctionWithFixedArgsStub stub(isolate(), argc, call_data_undefined);
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
ApiFunctionWithFixedArgsDescriptor descriptor(isolate());
- DCHECK(arraysize(op_vals) - 1 == descriptor.GetEnvironmentLength());
call = New<HCallWithDescriptor>(
code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
+ Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
Drop(1); // Drop function.
} else {
op_vals[arraysize(op_vals) - 1] = Add<HConstant>(argc);
@@ -8971,10 +8986,9 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
ApiFunctionDescriptor descriptor(isolate());
- DCHECK(arraysize(op_vals) == descriptor.GetEnvironmentLength());
- call = New<HCallWithDescriptor>(
- code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
+ call =
+ New<HCallWithDescriptor>(code_value, argc + 1, descriptor,
+ Vector<HValue*>(op_vals, arraysize(op_vals)));
Drop(1); // Drop function.
}
@@ -9357,7 +9371,6 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
return;
}
}
-
HValue* key = NULL;
if (!prop->key()->IsPropertyName()) {
CHECK_ALIVE(VisitForValue(prop->key()));
@@ -9723,6 +9736,12 @@ void HOptimizedGraphBuilder::BuildInitializeInobjectProperties(
HValue* HGraphBuilder::BuildAllocateEmptyArrayBuffer(HValue* byte_length) {
+ // We HForceRepresentation here to avoid allocations during an *-to-tagged
+ // HChange that could cause GC while the array buffer object is not fully
+ // initialized.
+ HObjectAccess byte_length_access(HObjectAccess::ForJSArrayBufferByteLength());
+ byte_length = AddUncasted<HForceRepresentation>(
+ byte_length, byte_length_access.representation());
HAllocate* result =
BuildAllocate(Add<HConstant>(JSArrayBuffer::kSizeWithInternalFields),
HType::JSObject(), JS_ARRAY_BUFFER_TYPE, HAllocationMode());
@@ -9738,10 +9757,19 @@ HValue* HGraphBuilder::BuildAllocateEmptyArrayBuffer(HValue* byte_length) {
native_context, nullptr,
HObjectAccess::ForContextSlot(Context::ARRAY_BUFFER_MAP_INDEX)));
- Add<HStoreNamedField>(result, HObjectAccess::ForJSArrayBufferBackingStore(),
- Add<HConstant>(ExternalReference()));
- Add<HStoreNamedField>(result, HObjectAccess::ForJSArrayBufferByteLength(),
- byte_length);
+ HConstant* empty_fixed_array =
+ Add<HConstant>(isolate()->factory()->empty_fixed_array());
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSArray::kPropertiesOffset),
+ empty_fixed_array);
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
+ empty_fixed_array);
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForJSArrayBufferBackingStore().WithRepresentation(
+ Representation::Smi()),
+ graph()->GetConstant0());
+ Add<HStoreNamedField>(result, byte_length_access, byte_length);
Add<HStoreNamedField>(result, HObjectAccess::ForJSArrayBufferBitFieldSlot(),
graph()->GetConstant0());
Add<HStoreNamedField>(
@@ -9876,8 +9904,8 @@ HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
ExternalArrayType array_type, size_t element_size,
- ElementsKind fixed_elements_kind,
- HValue* byte_length, HValue* length) {
+ ElementsKind fixed_elements_kind, HValue* byte_length, HValue* length,
+ bool initialize) {
STATIC_ASSERT(
(FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask) == 0);
HValue* total_size;
@@ -9913,10 +9941,12 @@ HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
Add<HStoreNamedField>(elements,
HObjectAccess::ForFixedArrayLength(),
length);
+ Add<HStoreNamedField>(
+ elements, HObjectAccess::ForFixedTypedArrayBaseBasePointer(), elements);
HValue* filler = Add<HConstant>(static_cast<int32_t>(0));
- {
+ if (initialize) {
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
HValue* key = builder.BeginBody(
@@ -9939,14 +9969,15 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
static const int kBufferArg = 2;
static const int kByteOffsetArg = 3;
static const int kByteLengthArg = 4;
- static const int kArgsLength = 5;
+ static const int kInitializeArg = 5;
+ static const int kArgsLength = 6;
DCHECK(arguments->length() == kArgsLength);
CHECK_ALIVE(VisitForValue(arguments->at(kObjectArg)));
HValue* obj = Pop();
- if (arguments->at(kArrayIdArg)->IsLiteral()) {
+ if (!arguments->at(kArrayIdArg)->IsLiteral()) {
// This should never happen in real use, but can happen when fuzzing.
// Just bail out.
Bailout(kNeedSmiLiteral);
@@ -9988,6 +10019,11 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg)));
HValue* byte_length = Pop();
+ CHECK(arguments->at(kInitializeArg)->IsLiteral());
+ bool initialize = static_cast<Literal*>(arguments->at(kInitializeArg))
+ ->value()
+ ->BooleanValue();
+
NoObservableSideEffectsScope scope(this);
IfBuilder byte_offset_smi(this);
@@ -10035,9 +10071,9 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
AddStoreMapConstant(obj, obj_map);
} else {
DCHECK(is_zero_byte_offset);
- elements = BuildAllocateFixedTypedArray(
- array_type, element_size, fixed_elements_kind,
- byte_length, length);
+ elements = BuildAllocateFixedTypedArray(array_type, element_size,
+ fixed_elements_kind, byte_length,
+ length, initialize);
}
Add<HStoreNamedField>(
obj, HObjectAccess::ForElementsPointer(), elements);
@@ -10051,6 +10087,7 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
Push(buffer);
Push(byte_offset);
Push(byte_length);
+ CHECK_ALIVE(VisitForValue(arguments->at(kInitializeArg)));
PushArgumentsFromEnvironment(kArgsLength);
Add<HCallRuntime>(expr->name(), expr->function(), kArgsLength);
}
@@ -10184,15 +10221,14 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
} else if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->IsUnallocated()) {
+ if (var->IsUnallocatedOrGlobalSlot()) {
Bailout(kDeleteWithGlobalVariable);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global variables is false. 'this' is not
- // really a variable, though we implement it as one. The
- // subexpression does not have side effects.
- HValue* value = var->is_this()
- ? graph()->GetConstantTrue()
- : graph()->GetConstantFalse();
+ // Result of deleting non-global variables is false. 'this' is not really
+ // a variable, though we implement it as one. The subexpression does not
+ // have side effects.
+ HValue* value = var->HasThisName(isolate()) ? graph()->GetConstantTrue()
+ : graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
Bailout(kDeleteWithNonGlobalVariable);
@@ -10283,7 +10319,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
rep = Representation::Smi();
}
- if (returns_original_input) {
+ if (returns_original_input && !is_strong(function_language_mode())) {
// We need an explicit HValue representing ToNumber(input). The
// actual HChange instruction we need is (sometimes) added in a later
// phase, so it is not available now to be used as an input to HAdd and
@@ -10302,15 +10338,19 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
HConstant* delta = (expr->op() == Token::INC)
? graph()->GetConstant1()
: graph()->GetConstantMinus1();
- HInstruction* instr = AddUncasted<HAdd>(Top(), delta,
- function_language_mode());
+ HInstruction* instr =
+ AddUncasted<HAdd>(Top(), delta, strength(function_language_mode()));
if (instr->IsAdd()) {
HAdd* add = HAdd::cast(instr);
add->set_observed_input_representation(1, rep);
add->set_observed_input_representation(2, Representation::Smi());
}
+ if (!is_strong(function_language_mode())) {
+ instr->ClearAllSideEffects();
+ } else {
+ Add<HSimulate>(expr->ToNumberId(), REMOVABLE_SIMULATE);
+ }
instr->SetFlag(HInstruction::kCannotBeTagged);
- instr->ClearAllSideEffects();
return instr;
}
@@ -10367,18 +10407,19 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
Push(after);
switch (var->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
HandleGlobalVariableAssignment(var,
after,
expr->AssignmentId());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
BindIfLive(var, after);
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
// Bail out if we try to mutate a parameter value in a function
// using the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
@@ -10405,7 +10446,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
break;
}
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
return Bailout(kLookupVariableInCountOperation);
}
@@ -10594,10 +10635,10 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
if (FLAG_allocation_site_pretenuring && !allocation_site.is_null()) {
allocation_mode = HAllocationMode(allocation_site);
}
-
HValue* result = HGraphBuilder::BuildBinaryOperation(
expr->op(), left, right, left_type, right_type, result_type,
- fixed_right_arg, allocation_mode, function_language_mode());
+ fixed_right_arg, allocation_mode, strength(function_language_mode()),
+ expr->id());
// Add a simulate after instructions with observable side effects, and
// after phis, which are the result of BuildBinaryOperation when we
// inlined some complex subgraph.
@@ -10615,15 +10656,9 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
HValue* HGraphBuilder::BuildBinaryOperation(
- Token::Value op,
- HValue* left,
- HValue* right,
- Type* left_type,
- Type* right_type,
- Type* result_type,
- Maybe<int> fixed_right_arg,
- HAllocationMode allocation_mode,
- LanguageMode language_mode) {
+ Token::Value op, HValue* left, HValue* right, Type* left_type,
+ Type* right_type, Type* result_type, Maybe<int> fixed_right_arg,
+ HAllocationMode allocation_mode, Strength strength, BailoutId opt_id) {
bool maybe_string_add = false;
if (op == Token::ADD) {
// If we are adding constant string with something for which we don't have
@@ -10666,7 +10701,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
maybe_string_add = op == Token::ADD;
}
- if (!maybe_string_add) {
+ if (!maybe_string_add && !is_strong(strength)) {
left = TruncateToNumber(left, &left_type);
right = TruncateToNumber(right, &right_type);
}
@@ -10685,27 +10720,27 @@ HValue* HGraphBuilder::BuildBinaryOperation(
}
// Convert left argument as necessary.
- if (left_type->Is(Type::Number()) && !is_strong(language_mode)) {
+ if (left_type->Is(Type::Number()) && !is_strong(strength)) {
DCHECK(right_type->Is(Type::String()));
left = BuildNumberToString(left, left_type);
} else if (!left_type->Is(Type::String())) {
DCHECK(right_type->Is(Type::String()));
- HValue* function = AddLoadJSBuiltin(is_strong(language_mode) ?
- Builtins::STRING_ADD_RIGHT_STRONG :
- Builtins::STRING_ADD_RIGHT);
+ HValue* function = AddLoadJSBuiltin(
+ is_strong(strength) ? Builtins::STRING_ADD_RIGHT_STRONG
+ : Builtins::STRING_ADD_RIGHT);
Add<HPushArguments>(left, right);
return AddUncasted<HInvokeFunction>(function, 2);
}
// Convert right argument as necessary.
- if (right_type->Is(Type::Number()) && !is_strong(language_mode)) {
+ if (right_type->Is(Type::Number()) && !is_strong(strength)) {
DCHECK(left_type->Is(Type::String()));
right = BuildNumberToString(right, right_type);
} else if (!right_type->Is(Type::String())) {
DCHECK(left_type->Is(Type::String()));
- HValue* function = AddLoadJSBuiltin(is_strong(language_mode) ?
- Builtins::STRING_ADD_LEFT_STRONG :
- Builtins::STRING_ADD_LEFT);
+ HValue* function = AddLoadJSBuiltin(is_strong(strength)
+ ? Builtins::STRING_ADD_LEFT_STRONG
+ : Builtins::STRING_ADD_LEFT);
Add<HPushArguments>(left, right);
return AddUncasted<HInvokeFunction>(function, 2);
}
@@ -10723,7 +10758,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
if (!right_string.is_null() && right_string->length() == 0) return left;
if (!left_string.is_null() && !right_string.is_null()) {
return AddUncasted<HStringAdd>(
- left, right, language_mode, allocation_mode.GetPretenureMode(),
+ left, right, strength, allocation_mode.GetPretenureMode(),
STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
}
@@ -10752,7 +10787,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
// Fallback to using the string add stub.
return AddUncasted<HStringAdd>(
- left, right, language_mode, allocation_mode.GetPretenureMode(),
+ left, right, strength, allocation_mode.GetPretenureMode(),
STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
}
@@ -10771,20 +10806,37 @@ HValue* HGraphBuilder::BuildBinaryOperation(
// inline several instructions (including the two pushes) for every tagged
// operation in optimized code, which is more expensive, than a stub call.
if (graph()->info()->IsStub() && is_non_primitive) {
- HValue* function = AddLoadJSBuiltin(
- BinaryOpIC::TokenToJSBuiltin(op, language_mode));
+ HValue* function =
+ AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op, strength));
Add<HPushArguments>(left, right);
instr = AddUncasted<HInvokeFunction>(function, 2);
} else {
+ if (is_strong(strength) && Token::IsBitOp(op)) {
+ // TODO(conradw): This is not efficient, but is necessary to prevent
+ // conversion of oddball values to numbers in strong mode. It would be
+ // better to prevent the conversion rather than adding a runtime check.
+ IfBuilder if_builder(this);
+ if_builder.If<HHasInstanceTypeAndBranch>(left, ODDBALL_TYPE);
+ if_builder.OrIf<HHasInstanceTypeAndBranch>(right, ODDBALL_TYPE);
+ if_builder.Then();
+ Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
+ 0);
+ if (!graph()->info()->IsStub()) {
+ Add<HSimulate>(opt_id, REMOVABLE_SIMULATE);
+ }
+ if_builder.End();
+ }
switch (op) {
case Token::ADD:
- instr = AddUncasted<HAdd>(left, right, language_mode);
+ instr = AddUncasted<HAdd>(left, right, strength);
break;
case Token::SUB:
- instr = AddUncasted<HSub>(left, right, language_mode);
+ instr = AddUncasted<HSub>(left, right, strength);
break;
case Token::MUL:
- instr = AddUncasted<HMul>(left, right, language_mode);
+ instr = AddUncasted<HMul>(left, right, strength);
break;
case Token::MOD: {
if (fixed_right_arg.IsJust() &&
@@ -10797,38 +10849,38 @@ HValue* HGraphBuilder::BuildBinaryOperation(
if_same.ElseDeopt(Deoptimizer::kUnexpectedRHSOfBinaryOperation);
right = fixed_right;
}
- instr = AddUncasted<HMod>(left, right, language_mode);
+ instr = AddUncasted<HMod>(left, right, strength);
break;
}
case Token::DIV:
- instr = AddUncasted<HDiv>(left, right, language_mode);
+ instr = AddUncasted<HDiv>(left, right, strength);
break;
case Token::BIT_XOR:
case Token::BIT_AND:
- instr = AddUncasted<HBitwise>(op, left, right, language_mode);
+ instr = AddUncasted<HBitwise>(op, left, right, strength);
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
if (left_type->Is(Type::Signed32()) &&
right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = AddUncasted<HRor>(operand, shift_amount, language_mode);
+ instr = AddUncasted<HRor>(operand, shift_amount, strength);
} else {
- instr = AddUncasted<HBitwise>(op, left, right, language_mode);
+ instr = AddUncasted<HBitwise>(op, left, right, strength);
}
break;
}
case Token::SAR:
- instr = AddUncasted<HSar>(left, right, language_mode);
+ instr = AddUncasted<HSar>(left, right, strength);
break;
case Token::SHR:
- instr = AddUncasted<HShr>(left, right, language_mode);
+ instr = AddUncasted<HShr>(left, right, strength);
if (instr->IsShr() && CanBeZero(right)) {
graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
- instr = AddUncasted<HShl>(left, right, language_mode);
+ instr = AddUncasted<HShl>(left, right, strength);
break;
default:
UNREACHABLE();
@@ -11216,7 +11268,8 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
return result;
} else {
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
- HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
+ HCompareGeneric* result = Add<HCompareGeneric>(
+ left, right, op, strength(function_language_mode()));
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
if (result->HasObservableSideEffects()) {
@@ -11232,8 +11285,8 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HBranch* branch = New<HBranch>(result);
return branch;
} else {
- HCompareNumericAndBranch* result =
- New<HCompareNumericAndBranch>(left, right, op);
+ HCompareNumericAndBranch* result = New<HCompareNumericAndBranch>(
+ left, right, op, strength(function_language_mode()));
result->set_observed_input_representation(left_rep, right_rep);
if (top_info()->is_tracking_positions()) {
result->SetOperandPositions(zone(), left_position, right_position);
@@ -11572,7 +11625,16 @@ void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
}
-void HOptimizedGraphBuilder::VisitSuperReference(SuperReference* expr) {
+void HOptimizedGraphBuilder::VisitSuperPropertyReference(
+ SuperPropertyReference* expr) {
+ DCHECK(!HasStackOverflow());
+ DCHECK(current_block() != NULL);
+ DCHECK(current_block()->HasPredecessor());
+ return Bailout(kSuperReference);
+}
+
+
+void HOptimizedGraphBuilder::VisitSuperCallReference(SuperCallReference* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
@@ -11605,20 +11667,21 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
globals_.Add(variable->name(), zone());
globals_.Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(), zone());
return;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
HValue* value = graph()->GetConstantHole();
environment()->Bind(variable, value);
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
HValue* value = graph()->GetConstantHole();
HValue* context = environment()->context();
@@ -11629,7 +11692,7 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
}
}
break;
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
return Bailout(kUnsupportedLookupSlotInDeclaration);
}
}
@@ -11640,23 +11703,24 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
globals_.Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function = Compiler::BuildFunctionInfo(
+ Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
declaration->fun(), current_info()->script(), top_info());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_.Add(function, zone());
return;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
BindIfLive(variable, value);
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
HValue* context = environment()->context();
@@ -11667,7 +11731,7 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
}
break;
}
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
return Bailout(kUnsupportedLookupSlotInDeclaration);
}
}
@@ -11747,6 +11811,16 @@ void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateIsTypedArray(CallRuntime* call) {
+ DCHECK(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HHasInstanceTypeAndBranch* result =
+ New<HHasInstanceTypeAndBranch>(value, JS_TYPED_ARRAY_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
+}
+
+
void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -11924,6 +11998,24 @@ void HOptimizedGraphBuilder::GenerateJSValueGetValue(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateIsDate(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HHasInstanceTypeAndBranch* result =
+ New<HHasInstanceTypeAndBranch>(value, JS_DATE_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateThrowNotDateError(CallRuntime* call) {
+ DCHECK_EQ(0, call->arguments()->length());
+ Add<HDeoptimize>(Deoptimizer::kNotADateObject, Deoptimizer::EAGER);
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
DCHECK_NOT_NULL(call->arguments()->at(1)->AsLiteral());
@@ -12060,8 +12152,8 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* result = NewUncasted<HStringAdd>(left, right,
- function_language_mode());
+ HInstruction* result =
+ NewUncasted<HStringAdd>(left, right, strength(function_language_mode()));
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -13157,4 +13249,5 @@ HPhase::~HPhase() {
#endif
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index eabcff4b7d..65e54e652b 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -331,6 +331,7 @@ class HGraph final : public ZoneObject {
HConstant* GetConstantMinus1();
HConstant* GetConstantTrue();
HConstant* GetConstantFalse();
+ HConstant* GetConstantBool(bool value);
HConstant* GetConstantHole();
HConstant* GetConstantNull();
HConstant* GetInvalidContext();
@@ -1352,9 +1353,9 @@ class HGraphBuilder {
HValue* key);
HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
- HValue* elements,
- HValue* key,
- HValue* hash);
+ HValue* elements, HValue* key,
+ HValue* hash,
+ LanguageMode language_mode);
HValue* BuildRegExpConstructResult(HValue* length,
HValue* index,
@@ -1429,15 +1430,12 @@ class HGraphBuilder {
HValue** operand,
HValue** shift_amount);
- HValue* BuildBinaryOperation(Token::Value op,
- HValue* left,
- HValue* right,
- Type* left_type,
- Type* right_type,
- Type* result_type,
- Maybe<int> fixed_right_arg,
+ HValue* BuildBinaryOperation(Token::Value op, HValue* left, HValue* right,
+ Type* left_type, Type* right_type,
+ Type* result_type, Maybe<int> fixed_right_arg,
HAllocationMode allocation_mode,
- LanguageMode language_mode);
+ Strength strength,
+ BailoutId opt_id = BailoutId::None());
HLoadNamedField* AddLoadFixedArrayLength(HValue *object,
HValue *dependency = NULL);
@@ -2175,6 +2173,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
#define FOR_EACH_HYDROGEN_INTRINSIC(F) \
F(IsSmi) \
F(IsArray) \
+ F(IsTypedArray) \
F(IsRegExp) \
F(IsJSProxy) \
F(IsConstructCall) \
@@ -2183,7 +2182,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(Arguments) \
F(ValueOf) \
F(SetValueOf) \
+ F(IsDate) \
F(DateField) \
+ F(ThrowNotDateError) \
F(StringCharFromCode) \
F(StringCharAt) \
F(OneByteSeqStringSetChar) \
@@ -2466,10 +2467,11 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
ExternalArrayType array_type,
bool is_zero_byte_offset,
HValue* buffer, HValue* byte_offset, HValue* length);
- HValue* BuildAllocateFixedTypedArray(
- ExternalArrayType array_type, size_t element_size,
- ElementsKind fixed_elements_kind,
- HValue* byte_length, HValue* length);
+ HValue* BuildAllocateFixedTypedArray(ExternalArrayType array_type,
+ size_t element_size,
+ ElementsKind fixed_elements_kind,
+ HValue* byte_length, HValue* length,
+ bool initialize);
// TODO(adamk): Move all OrderedHashTable functions to their own class.
HValue* BuildOrderedHashTableHashToBucket(HValue* hash, HValue* num_buckets);
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 9dfdd63f5b..1d735c97f1 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -738,7 +738,7 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
template<class T>
void DeleteNativeObjectAt(const v8::WeakCallbackData<v8::Value, void>& data,
int index) {
- v8::Local<v8::Object> obj = v8::Handle<v8::Object>::Cast(data.GetValue());
+ v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(data.GetValue());
delete reinterpret_cast<T*>(obj->GetAlignedPointerFromInternalField(index));
}
@@ -947,4 +947,5 @@ void BreakIterator::DeleteBreakIterator(
DestroyGlobalHandle(data);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/i18n.js b/deps/v8/src/i18n.js
index 59e95afd85..79e988062e 100644
--- a/deps/v8/src/i18n.js
+++ b/deps/v8/src/i18n.js
@@ -8,56 +8,82 @@
* Intl object is a single object that has some named properties,
* all of which are constructors.
*/
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalBoolean = global.Boolean;
var GlobalDate = global.Date;
var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
-
-var undefined = global.undefined;
+var ObjectDefineProperties = utils.ObjectDefineProperties;
+var ObjectDefineProperty = utils.ObjectDefineProperty;
+var SetFunctionName = utils.SetFunctionName;
+
+var ArrayIndexOf;
+var ArrayJoin;
+var IsFinite;
+var IsNaN;
+var MathFloor;
+var RegExpTest;
+var StringIndexOf;
+var StringLastIndexOf;
+var StringMatch;
+var StringReplace;
+var StringSplit;
+var StringSubstr;
+var StringSubstring;
+
+utils.Import(function(from) {
+ ArrayIndexOf = from.ArrayIndexOf;
+ ArrayJoin = from.ArrayJoin;
+ IsFinite = from.IsFinite;
+ IsNaN = from.IsNaN;
+ MathFloor = from.MathFloor;
+ RegExpTest = from.RegExpTest;
+ StringIndexOf = from.StringIndexOf;
+ StringLastIndexOf = from.StringLastIndexOf;
+ StringMatch = from.StringMatch;
+ StringReplace = from.StringReplace;
+ StringSplit = from.StringSplit;
+ StringSubstr = from.StringSubstr;
+ StringSubstring = from.StringSubstring;
+});
+
+// -------------------------------------------------------------------
var Intl = {};
%AddNamedProperty(global, "Intl", Intl, DONT_ENUM);
-var AVAILABLE_SERVICES = ['collator',
- 'numberformat',
- 'dateformat',
- 'breakiterator'];
-
-var NORMALIZATION_FORMS = ['NFC',
- 'NFD',
- 'NFKC',
- 'NFKD'];
-
/**
* Caches available locales for each service.
*/
var AVAILABLE_LOCALES = {
- 'collator': undefined,
- 'numberformat': undefined,
- 'dateformat': undefined,
- 'breakiterator': undefined
+ 'collator': UNDEFINED,
+ 'numberformat': UNDEFINED,
+ 'dateformat': UNDEFINED,
+ 'breakiterator': UNDEFINED
};
/**
* Caches default ICU locale.
*/
-var DEFAULT_ICU_LOCALE = undefined;
+var DEFAULT_ICU_LOCALE = UNDEFINED;
/**
* Unicode extension regular expression.
*/
-var UNICODE_EXTENSION_RE = undefined;
+var UNICODE_EXTENSION_RE = UNDEFINED;
function GetUnicodeExtensionRE() {
- if (UNICODE_EXTENSION_RE === undefined) {
+ if (IS_UNDEFINED(UNDEFINED)) {
UNICODE_EXTENSION_RE = new GlobalRegExp('-u(-[a-z0-9]{2,8})+', 'g');
}
return UNICODE_EXTENSION_RE;
@@ -66,10 +92,10 @@ function GetUnicodeExtensionRE() {
/**
* Matches any Unicode extension.
*/
-var ANY_EXTENSION_RE = undefined;
+var ANY_EXTENSION_RE = UNDEFINED;
function GetAnyExtensionRE() {
- if (ANY_EXTENSION_RE === undefined) {
+ if (IS_UNDEFINED(ANY_EXTENSION_RE)) {
ANY_EXTENSION_RE = new GlobalRegExp('-[a-z0-9]{1}-.*', 'g');
}
return ANY_EXTENSION_RE;
@@ -78,10 +104,10 @@ function GetAnyExtensionRE() {
/**
* Replace quoted text (single quote, anything but the quote and quote again).
*/
-var QUOTED_STRING_RE = undefined;
+var QUOTED_STRING_RE = UNDEFINED;
function GetQuotedStringRE() {
- if (QUOTED_STRING_RE === undefined) {
+ if (IS_UNDEFINED(QUOTED_STRING_RE)) {
QUOTED_STRING_RE = new GlobalRegExp("'[^']+'", 'g');
}
return QUOTED_STRING_RE;
@@ -90,10 +116,10 @@ function GetQuotedStringRE() {
/**
* Matches valid service name.
*/
-var SERVICE_RE = undefined;
+var SERVICE_RE = UNDEFINED;
function GetServiceRE() {
- if (SERVICE_RE === undefined) {
+ if (IS_UNDEFINED(SERVICE_RE)) {
SERVICE_RE =
new GlobalRegExp('^(collator|numberformat|dateformat|breakiterator)$');
}
@@ -104,10 +130,10 @@ function GetServiceRE() {
* Validates a language tag against bcp47 spec.
* Actual value is assigned on first run.
*/
-var LANGUAGE_TAG_RE = undefined;
+var LANGUAGE_TAG_RE = UNDEFINED;
function GetLanguageTagRE() {
- if (LANGUAGE_TAG_RE === undefined) {
+ if (IS_UNDEFINED(LANGUAGE_TAG_RE)) {
BuildLanguageTagREs();
}
return LANGUAGE_TAG_RE;
@@ -116,10 +142,10 @@ function GetLanguageTagRE() {
/**
* Helps find duplicate variants in the language tag.
*/
-var LANGUAGE_VARIANT_RE = undefined;
+var LANGUAGE_VARIANT_RE = UNDEFINED;
function GetLanguageVariantRE() {
- if (LANGUAGE_VARIANT_RE === undefined) {
+ if (IS_UNDEFINED(LANGUAGE_VARIANT_RE)) {
BuildLanguageTagREs();
}
return LANGUAGE_VARIANT_RE;
@@ -128,10 +154,10 @@ function GetLanguageVariantRE() {
/**
* Helps find duplicate singletons in the language tag.
*/
-var LANGUAGE_SINGLETON_RE = undefined;
+var LANGUAGE_SINGLETON_RE = UNDEFINED;
function GetLanguageSingletonRE() {
- if (LANGUAGE_SINGLETON_RE === undefined) {
+ if (IS_UNDEFINED(LANGUAGE_SINGLETON_RE)) {
BuildLanguageTagREs();
}
return LANGUAGE_SINGLETON_RE;
@@ -140,10 +166,10 @@ function GetLanguageSingletonRE() {
/**
* Matches valid IANA time zone names.
*/
-var TIMEZONE_NAME_CHECK_RE = undefined;
+var TIMEZONE_NAME_CHECK_RE = UNDEFINED;
function GetTimezoneNameCheckRE() {
- if (TIMEZONE_NAME_CHECK_RE === undefined) {
+ if (IS_UNDEFINED(TIMEZONE_NAME_CHECK_RE)) {
TIMEZONE_NAME_CHECK_RE =
new GlobalRegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
}
@@ -151,81 +177,19 @@ function GetTimezoneNameCheckRE() {
}
/**
- * Maps ICU calendar names into LDML type.
- */
-var ICU_CALENDAR_MAP = {
- 'gregorian': 'gregory',
- 'japanese': 'japanese',
- 'buddhist': 'buddhist',
- 'roc': 'roc',
- 'persian': 'persian',
- 'islamic-civil': 'islamicc',
- 'islamic': 'islamic',
- 'hebrew': 'hebrew',
- 'chinese': 'chinese',
- 'indian': 'indian',
- 'coptic': 'coptic',
- 'ethiopic': 'ethiopic',
- 'ethiopic-amete-alem': 'ethioaa'
-};
-
-/**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a collator.
- */
-var COLLATOR_KEY_MAP = {
- 'kn': {'property': 'numeric', 'type': 'boolean'},
- 'kf': {'property': 'caseFirst', 'type': 'string',
- 'values': ['false', 'lower', 'upper']}
-};
-
-/**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a number format.
- */
-var NUMBER_FORMAT_KEY_MAP = {
- 'nu': {'property': undefined, 'type': 'string'}
-};
-
-/**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a date/time format.
- */
-var DATETIME_FORMAT_KEY_MAP = {
- 'ca': {'property': undefined, 'type': 'string'},
- 'nu': {'property': undefined, 'type': 'string'}
-};
-
-/**
- * Allowed -u-co- values. List taken from:
- * http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
- */
-var ALLOWED_CO_VALUES = [
- 'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
- 'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
-];
-
-/**
- * Error message for when function object is created with new and it's not
- * a constructor.
- */
-var ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR =
- 'Function object that\'s not a constructor was created with new';
-
-
-/**
* Adds bound method to the prototype of the given object.
*/
function addBoundMethod(obj, methodName, implementation, length) {
+ %CheckIsBootstrapping();
function getter() {
if (!%IsInitializedIntlObject(this)) {
throw MakeTypeError(kMethodCalledOnWrongObject, methodName);
}
var internalName = '__bound' + methodName + '__';
- if (this[internalName] === undefined) {
+ if (IS_UNDEFINED(this[internalName])) {
var that = this;
var boundMethod;
- if (length === undefined || length === 2) {
+ if (IS_UNDEFINED(length) || length === 2) {
boundMethod = function(x, y) {
if (%_IsConstructCall()) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
@@ -254,7 +218,7 @@ function addBoundMethod(obj, methodName, implementation, length) {
}
}
}
- $setFunctionName(boundMethod, internalName);
+ SetFunctionName(boundMethod, internalName);
%FunctionRemovePrototype(boundMethod);
%SetNativeFlag(boundMethod);
this[internalName] = boundMethod;
@@ -262,11 +226,11 @@ function addBoundMethod(obj, methodName, implementation, length) {
return this[internalName];
}
- $setFunctionName(getter, methodName);
+ SetFunctionName(getter, methodName);
%FunctionRemovePrototype(getter);
%SetNativeFlag(getter);
- $objectDefineProperty(obj.prototype, methodName, {
+ ObjectDefineProperty(obj.prototype, methodName, {
get: getter,
enumerable: false,
configurable: true
@@ -279,19 +243,19 @@ function addBoundMethod(obj, methodName, implementation, length) {
* Parameter locales is treated as a priority list.
*/
function supportedLocalesOf(service, locales, options) {
- if (IS_NULL(service.match(GetServiceRE()))) {
+ if (IS_NULL(%_CallFunction(service, GetServiceRE(), StringMatch))) {
throw MakeError(kWrongServiceType, service);
}
// Provide defaults if matcher was not specified.
- if (options === undefined) {
+ if (IS_UNDEFINED(options)) {
options = {};
} else {
options = $toObject(options);
}
var matcher = options.localeMatcher;
- if (matcher !== undefined) {
+ if (!IS_UNDEFINED(matcher)) {
matcher = GlobalString(matcher);
if (matcher !== 'lookup' && matcher !== 'best fit') {
throw MakeRangeError(kLocaleMatcher, matcher);
@@ -303,7 +267,7 @@ function supportedLocalesOf(service, locales, options) {
var requestedLocales = initializeLocaleList(locales);
// Cache these, they don't ever change per service.
- if (AVAILABLE_LOCALES[service] === undefined) {
+ if (IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
}
@@ -327,19 +291,20 @@ function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
var matchedLocales = [];
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove -u- extension.
- var locale = requestedLocales[i].replace(GetUnicodeExtensionRE(), '');
+ var locale = %_CallFunction(requestedLocales[i], GetUnicodeExtensionRE(),
+ '', StringReplace);
do {
- if (availableLocales[locale] !== undefined) {
+ if (!IS_UNDEFINED(availableLocales[locale])) {
// Push requested locale not the resolved one.
- matchedLocales.push(requestedLocales[i]);
+ %_CallFunction(matchedLocales, requestedLocales[i], $arrayPush);
break;
}
// Truncate locale if possible, if not break.
- var pos = locale.lastIndexOf('-');
+ var pos = %_CallFunction(locale, '-', StringLastIndexOf);
if (pos === -1) {
break;
}
- locale = locale.substring(0, pos);
+ locale = %_CallFunction(locale, 0, pos, StringSubstring);
} while (true);
}
@@ -365,10 +330,10 @@ function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
* is out of range for that property it throws RangeError.
*/
function getGetOption(options, caller) {
- if (options === undefined) throw MakeError(kDefaultOptionsMissing, caller);
+ if (IS_UNDEFINED(options)) throw MakeError(kDefaultOptionsMissing, caller);
var getOption = function getOption(property, type, values, defaultValue) {
- if (options[property] !== undefined) {
+ if (!IS_UNDEFINED(options[property])) {
var value = options[property];
switch (type) {
case 'boolean':
@@ -383,7 +348,9 @@ function getGetOption(options, caller) {
default:
throw MakeError(kWrongValueType);
}
- if (values !== undefined && values.indexOf(value) === -1) {
+
+ if (!IS_UNDEFINED(values) &&
+ %_CallFunction(values, value, ArrayIndexOf) === -1) {
throw MakeRangeError(kValueOutOfRange, value, caller, property);
}
@@ -432,36 +399,39 @@ function resolveLocale(service, requestedLocales, options) {
* lookup algorithm.
*/
function lookupMatcher(service, requestedLocales) {
- if (IS_NULL(service.match(GetServiceRE()))) {
+ if (IS_NULL(%_CallFunction(service, GetServiceRE(), StringMatch))) {
throw MakeError(kWrongServiceType, service);
}
// Cache these, they don't ever change per service.
- if (AVAILABLE_LOCALES[service] === undefined) {
+ if (IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
}
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove all extensions.
- var locale = requestedLocales[i].replace(GetAnyExtensionRE(), '');
+ var locale = %_CallFunction(requestedLocales[i], GetAnyExtensionRE(), '',
+ StringReplace);
do {
- if (AVAILABLE_LOCALES[service][locale] !== undefined) {
+ if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
// Return the resolved locale and extension.
- var extensionMatch = requestedLocales[i].match(GetUnicodeExtensionRE());
+ var extensionMatch =
+ %_CallFunction(requestedLocales[i], GetUnicodeExtensionRE(),
+ StringMatch);
var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
return {'locale': locale, 'extension': extension, 'position': i};
}
// Truncate locale if possible.
- var pos = locale.lastIndexOf('-');
+ var pos = %_CallFunction(locale, '-', StringLastIndexOf);
if (pos === -1) {
break;
}
- locale = locale.substring(0, pos);
+ locale = %_CallFunction(locale, 0, pos, StringSubstring);
} while (true);
}
// Didn't find a match, return default.
- if (DEFAULT_ICU_LOCALE === undefined) {
+ if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
}
@@ -485,7 +455,7 @@ function bestFitMatcher(service, requestedLocales) {
* We are not concerned with the validity of the values at this point.
*/
function parseExtension(extension) {
- var extensionSplit = extension.split('-');
+ var extensionSplit = %_CallFunction(extension, '-', StringSplit);
// Assume ['', 'u', ...] input, but don't throw.
if (extensionSplit.length <= 2 ||
@@ -496,16 +466,16 @@ function parseExtension(extension) {
// Key is {2}alphanum, value is {3,8}alphanum.
// Some keys may not have explicit values (booleans).
var extensionMap = {};
- var previousKey = undefined;
+ var previousKey = UNDEFINED;
for (var i = 2; i < extensionSplit.length; ++i) {
var length = extensionSplit[i].length;
var element = extensionSplit[i];
if (length === 2) {
- extensionMap[element] = undefined;
+ extensionMap[element] = UNDEFINED;
previousKey = element;
- } else if (length >= 3 && length <=8 && previousKey !== undefined) {
+ } else if (length >= 3 && length <=8 && !IS_UNDEFINED(previousKey)) {
extensionMap[previousKey] = element;
- previousKey = undefined;
+ previousKey = UNDEFINED;
} else {
// There is a value that's too long, or that doesn't have a key.
return {};
@@ -538,21 +508,21 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
value = (value === 'true') ? true : false;
}
- if (property !== undefined) {
+ if (!IS_UNDEFINED(property)) {
defineWEProperty(outOptions, property, value);
}
}
for (var key in keyValues) {
- if (keyValues.hasOwnProperty(key)) {
- var value = undefined;
+ if (%HasOwnProperty(keyValues, key)) {
+ var value = UNDEFINED;
var map = keyValues[key];
- if (map.property !== undefined) {
+ if (!IS_UNDEFINED(map.property)) {
// This may return true if user specifies numeric: 'false', since
// Boolean('nonempty') === true.
value = getOption(map.property, map.type, map.values);
}
- if (value !== undefined) {
+ if (!IS_UNDEFINED(value)) {
updateProperty(map.property, map.type, value);
extension += updateExtension(key, value);
continue;
@@ -560,9 +530,9 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
// User options didn't have it, check Unicode extension.
// Here we want to convert strings 'true', 'false' into proper Boolean
// values (not a user error).
- if (extensionMap.hasOwnProperty(key)) {
+ if (%HasOwnProperty(extensionMap, key)) {
value = extensionMap[key];
- if (value !== undefined) {
+ if (!IS_UNDEFINED(value)) {
updateProperty(map.property, map.type, value);
extension += updateExtension(key, value);
} else if (map.type === 'boolean') {
@@ -584,15 +554,17 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
* configurable: false, writable: false, enumerable: true.
*/
function freezeArray(array) {
- array.forEach(function(element, index) {
- $objectDefineProperty(array, index, {value: element,
- configurable: false,
- writable: false,
- enumerable: true});
- });
+ var l = array.length;
+ for (var i = 0; i < l; i++) {
+ if (i in array) {
+ ObjectDefineProperty(array, i, {value: array[i],
+ configurable: false,
+ writable: false,
+ enumerable: true});
+ }
+ }
- $objectDefineProperty(array, 'length', {value: array.length,
- writable: false});
+ ObjectDefineProperty(array, 'length', {value: l, writable: false});
return array;
}
@@ -620,7 +592,7 @@ function getOptimalLanguageTag(original, resolved) {
// Preserve extensions of resolved locale, but swap base tags with original.
var resolvedBase = new GlobalRegExp('^' + locales[1].base);
- return resolved.replace(resolvedBase, locales[0].base);
+ return %_CallFunction(resolved, resolvedBase, locales[0].base, StringReplace);
}
@@ -634,8 +606,9 @@ function getAvailableLocalesOf(service) {
var available = %AvailableLocalesOf(service);
for (var i in available) {
- if (available.hasOwnProperty(i)) {
- var parts = i.match(/^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
+ if (%HasOwnProperty(available, i)) {
+ var parts = %_CallFunction(i, /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/,
+ StringMatch);
if (parts !== null) {
// Build xx-ZZ. We don't care about the actual value,
// as long it's not undefined.
@@ -653,8 +626,8 @@ function getAvailableLocalesOf(service) {
* Configurable is false by default.
*/
function defineWEProperty(object, property, value) {
- $objectDefineProperty(object, property,
- {value: value, writable: true, enumerable: true});
+ ObjectDefineProperty(object, property,
+ {value: value, writable: true, enumerable: true});
}
@@ -663,7 +636,7 @@ function defineWEProperty(object, property, value) {
* Sets configurable descriptor to false.
*/
function addWEPropertyIfDefined(object, property, value) {
- if (value !== undefined) {
+ if (!IS_UNDEFINED(value)) {
defineWEProperty(object, property, value);
}
}
@@ -673,10 +646,10 @@ function addWEPropertyIfDefined(object, property, value) {
* Defines a property and sets writable, enumerable and configurable to true.
*/
function defineWECProperty(object, property, value) {
- $objectDefineProperty(object, property, {value: value,
- writable: true,
- enumerable: true,
- configurable: true});
+ ObjectDefineProperty(object, property, {value: value,
+ writable: true,
+ enumerable: true,
+ configurable: true});
}
@@ -685,7 +658,7 @@ function defineWECProperty(object, property, value) {
* Sets all descriptors to true.
*/
function addWECPropertyIfDefined(object, property, value) {
- if (value !== undefined) {
+ if (!IS_UNDEFINED(value)) {
defineWECProperty(object, property, value);
}
}
@@ -695,7 +668,8 @@ function addWECPropertyIfDefined(object, property, value) {
* Returns titlecased word, aMeRricA -> America.
*/
function toTitleCaseWord(word) {
- return word.substr(0, 1).toUpperCase() + word.substr(1).toLowerCase();
+ return %StringToUpperCase(%_CallFunction(word, 0, 1, StringSubstr)) +
+ %StringToLowerCase(%_CallFunction(word, 1, StringSubstr));
}
/**
@@ -733,19 +707,18 @@ function canonicalizeLanguageTag(localeID) {
*/
function initializeLocaleList(locales) {
var seen = [];
- if (locales === undefined) {
+ if (IS_UNDEFINED(locales)) {
// Constructor is called without arguments.
seen = [];
} else {
// We allow single string localeID.
if (typeof locales === 'string') {
- seen.push(canonicalizeLanguageTag(locales));
+ %_CallFunction(seen, canonicalizeLanguageTag(locales), $arrayPush);
return freezeArray(seen);
}
var o = $toObject(locales);
- // Converts it to UInt32 (>>> is shr on 32bit integers).
- var len = o.length >>> 0;
+ var len = TO_UINT32(o.length);
for (var k = 0; k < len; k++) {
if (k in o) {
@@ -753,8 +726,8 @@ function initializeLocaleList(locales) {
var tag = canonicalizeLanguageTag(value);
- if (seen.indexOf(tag) === -1) {
- seen.push(tag);
+ if (%_CallFunction(seen, tag, ArrayIndexOf) === -1) {
+ %_CallFunction(seen, tag, $arrayPush);
}
}
}
@@ -775,39 +748,40 @@ function initializeLocaleList(locales) {
*/
function isValidLanguageTag(locale) {
// Check if it's well-formed, including grandfadered tags.
- if (GetLanguageTagRE().test(locale) === false) {
+ if (!%_CallFunction(GetLanguageTagRE(), locale, RegExpTest)) {
return false;
}
// Just return if it's a x- form. It's all private.
- if (locale.indexOf('x-') === 0) {
+ if (%_CallFunction(locale, 'x-', StringIndexOf) === 0) {
return true;
}
// Check if there are any duplicate variants or singletons (extensions).
// Remove private use section.
- locale = locale.split(/-x-/)[0];
+ locale = %_CallFunction(locale, /-x-/, StringSplit)[0];
// Skip language since it can match variant regex, so we start from 1.
// We are matching i-klingon here, but that's ok, since i-klingon-klingon
// is not valid and would fail LANGUAGE_TAG_RE test.
var variants = [];
var extensions = [];
- var parts = locale.split(/-/);
+ var parts = %_CallFunction(locale, /-/, StringSplit);
for (var i = 1; i < parts.length; i++) {
var value = parts[i];
- if (GetLanguageVariantRE().test(value) === true && extensions.length === 0) {
- if (variants.indexOf(value) === -1) {
- variants.push(value);
+ if (%_CallFunction(GetLanguageVariantRE(), value, RegExpTest) &&
+ extensions.length === 0) {
+ if (%_CallFunction(variants, value, ArrayIndexOf) === -1) {
+ %_CallFunction(variants, value, $arrayPush);
} else {
return false;
}
}
- if (GetLanguageSingletonRE().test(value) === true) {
- if (extensions.indexOf(value) === -1) {
- extensions.push(value);
+ if (%_CallFunction(GetLanguageSingletonRE(), value, RegExpTest)) {
+ if (%_CallFunction(extensions, value, ArrayIndexOf) === -1) {
+ %_CallFunction(extensions, value, $arrayPush);
} else {
return false;
}
@@ -866,7 +840,7 @@ function initializeCollator(collator, locales, options) {
throw MakeTypeError(kReinitializeIntl, "Collator");
}
- if (options === undefined) {
+ if (IS_UNDEFINED(options)) {
options = {};
}
@@ -879,13 +853,13 @@ function initializeCollator(collator, locales, options) {
var sensitivity = getOption('sensitivity', 'string',
['base', 'accent', 'case', 'variant']);
- if (sensitivity === undefined && internalOptions.usage === 'sort') {
+ if (IS_UNDEFINED(sensitivity) && internalOptions.usage === 'sort') {
sensitivity = 'variant';
}
defineWEProperty(internalOptions, 'sensitivity', sensitivity);
defineWEProperty(internalOptions, 'ignorePunctuation', getOption(
- 'ignorePunctuation', 'boolean', undefined, false));
+ 'ignorePunctuation', 'boolean', UNDEFINED, false));
var locale = resolveLocale('collator', locales, options);
@@ -894,13 +868,35 @@ function initializeCollator(collator, locales, options) {
// One exception is -co- which has to be part of the extension, but only for
// usage: sort, and its value can't be 'standard' or 'search'.
var extensionMap = parseExtension(locale.extension);
+
+ /**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a collator.
+ */
+ var COLLATOR_KEY_MAP = {
+ 'kn': {'property': 'numeric', 'type': 'boolean'},
+ 'kf': {'property': 'caseFirst', 'type': 'string',
+ 'values': ['false', 'lower', 'upper']}
+ };
+
setOptions(
options, extensionMap, COLLATOR_KEY_MAP, getOption, internalOptions);
var collation = 'default';
var extension = '';
- if (extensionMap.hasOwnProperty('co') && internalOptions.usage === 'sort') {
- if (ALLOWED_CO_VALUES.indexOf(extensionMap.co) !== -1) {
+ if (%HasOwnProperty(extensionMap, 'co') && internalOptions.usage === 'sort') {
+
+ /**
+ * Allowed -u-co- values. List taken from:
+ * http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
+ */
+ var ALLOWED_CO_VALUES = [
+ 'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
+ 'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
+ ];
+
+ if (%_CallFunction(ALLOWED_CO_VALUES, extensionMap.co, ArrayIndexOf) !==
+ -1) {
extension = '-u-co-' + extensionMap.co;
// ICU can't tell us what the collation is, so save user's input.
collation = extensionMap.co;
@@ -916,7 +912,7 @@ function initializeCollator(collator, locales, options) {
// problems. If malicious user decides to redefine Object.prototype.locale
// we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
// ObjectDefineProperties will either succeed defining or throw an error.
- var resolved = $objectDefineProperties({}, {
+ var resolved = ObjectDefineProperties({}, {
caseFirst: {writable: true},
collation: {value: internalOptions.collation, writable: true},
ignorePunctuation: {writable: true},
@@ -934,7 +930,7 @@ function initializeCollator(collator, locales, options) {
// Writable, configurable and enumerable are set to false by default.
%MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
- $objectDefineProperty(collator, 'resolved', {value: resolved});
+ ObjectDefineProperty(collator, 'resolved', {value: resolved});
return collator;
}
@@ -989,7 +985,7 @@ function initializeCollator(collator, locales, options) {
},
DONT_ENUM
);
-$setFunctionName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
+SetFunctionName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
@@ -1009,7 +1005,7 @@ $setFunctionName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
},
DONT_ENUM
);
-$setFunctionName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
+SetFunctionName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
%SetNativeFlag(Intl.Collator.supportedLocalesOf);
@@ -1040,7 +1036,7 @@ addBoundMethod(Intl.Collator, 'compare', compare, 2);
function isWellFormedCurrencyCode(currency) {
return typeof currency == "string" &&
currency.length == 3 &&
- currency.match(/[^A-Za-z]/) == null;
+ %_CallFunction(currency, /[^A-Za-z]/, StringMatch) == null;
}
@@ -1050,12 +1046,12 @@ function isWellFormedCurrencyCode(currency) {
*/
function getNumberOption(options, property, min, max, fallback) {
var value = options[property];
- if (value !== undefined) {
+ if (!IS_UNDEFINED(value)) {
value = GlobalNumber(value);
- if ($isNaN(value) || value < min || value > max) {
+ if (IsNaN(value) || value < min || value > max) {
throw MakeRangeError(kPropertyValueOutOfRange, property);
}
- return $floor(value);
+ return MathFloor(value);
}
return fallback;
@@ -1071,7 +1067,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
throw MakeTypeError(kReinitializeIntl, "NumberFormat");
}
- if (options === undefined) {
+ if (IS_UNDEFINED(options)) {
options = {};
}
@@ -1084,18 +1080,18 @@ function initializeNumberFormat(numberFormat, locales, options) {
'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
var currency = getOption('currency', 'string');
- if (currency !== undefined && !isWellFormedCurrencyCode(currency)) {
+ if (!IS_UNDEFINED(currency) && !isWellFormedCurrencyCode(currency)) {
throw MakeRangeError(kInvalidCurrencyCode, currency);
}
- if (internalOptions.style === 'currency' && currency === undefined) {
+ if (internalOptions.style === 'currency' && IS_UNDEFINED(currency)) {
throw MakeTypeError(kCurrencyCode);
}
var currencyDisplay = getOption(
'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
if (internalOptions.style === 'currency') {
- defineWEProperty(internalOptions, 'currency', currency.toUpperCase());
+ defineWEProperty(internalOptions, 'currency', %StringToUpperCase(currency));
defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
}
@@ -1111,7 +1107,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
var mnsd = options['minimumSignificantDigits'];
var mxsd = options['maximumSignificantDigits'];
- if (mnsd !== undefined || mxsd !== undefined) {
+ if (!IS_UNDEFINED(mnsd) || !IS_UNDEFINED(mxsd)) {
mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
@@ -1121,16 +1117,25 @@ function initializeNumberFormat(numberFormat, locales, options) {
// Grouping.
defineWEProperty(internalOptions, 'useGrouping', getOption(
- 'useGrouping', 'boolean', undefined, true));
+ 'useGrouping', 'boolean', UNDEFINED, true));
// ICU prefers options to be passed using -u- extension key/values for
// number format, so we need to build that.
var extensionMap = parseExtension(locale.extension);
+
+ /**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a number format.
+ */
+ var NUMBER_FORMAT_KEY_MAP = {
+ 'nu': {'property': UNDEFINED, 'type': 'string'}
+ };
+
var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
- var resolved = $objectDefineProperties({}, {
+ var resolved = ObjectDefineProperties({}, {
currency: {writable: true},
currencyDisplay: {writable: true},
locale: {writable: true},
@@ -1142,11 +1147,11 @@ function initializeNumberFormat(numberFormat, locales, options) {
style: {value: internalOptions.style, writable: true},
useGrouping: {writable: true}
});
- if (internalOptions.hasOwnProperty('minimumSignificantDigits')) {
- defineWEProperty(resolved, 'minimumSignificantDigits', undefined);
+ if (%HasOwnProperty(internalOptions, 'minimumSignificantDigits')) {
+ defineWEProperty(resolved, 'minimumSignificantDigits', UNDEFINED);
}
- if (internalOptions.hasOwnProperty('maximumSignificantDigits')) {
- defineWEProperty(resolved, 'maximumSignificantDigits', undefined);
+ if (%HasOwnProperty(internalOptions, 'maximumSignificantDigits')) {
+ defineWEProperty(resolved, 'maximumSignificantDigits', UNDEFINED);
}
var formatter = %CreateNumberFormat(requestedLocale,
internalOptions,
@@ -1155,12 +1160,12 @@ function initializeNumberFormat(numberFormat, locales, options) {
// We can't get information about number or currency style from ICU, so we
// assume user request was fulfilled.
if (internalOptions.style === 'currency') {
- $objectDefineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
- writable: true});
+ ObjectDefineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
+ writable: true});
}
%MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
- $objectDefineProperty(numberFormat, 'resolved', {value: resolved});
+ ObjectDefineProperty(numberFormat, 'resolved', {value: resolved});
return numberFormat;
}
@@ -1219,12 +1224,12 @@ function initializeNumberFormat(numberFormat, locales, options) {
format.resolved.currencyDisplay);
}
- if (format.resolved.hasOwnProperty('minimumSignificantDigits')) {
+ if (%HasOwnProperty(format.resolved, 'minimumSignificantDigits')) {
defineWECProperty(result, 'minimumSignificantDigits',
format.resolved.minimumSignificantDigits);
}
- if (format.resolved.hasOwnProperty('maximumSignificantDigits')) {
+ if (%HasOwnProperty(format.resolved, 'maximumSignificantDigits')) {
defineWECProperty(result, 'maximumSignificantDigits',
format.resolved.maximumSignificantDigits);
}
@@ -1233,8 +1238,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
},
DONT_ENUM
);
-$setFunctionName(Intl.NumberFormat.prototype.resolvedOptions,
- 'resolvedOptions');
+SetFunctionName(Intl.NumberFormat.prototype.resolvedOptions, 'resolvedOptions');
%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
@@ -1254,7 +1258,7 @@ $setFunctionName(Intl.NumberFormat.prototype.resolvedOptions,
},
DONT_ENUM
);
-$setFunctionName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
+SetFunctionName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
@@ -1315,7 +1319,7 @@ function toLDMLString(options) {
var hr12 = getOption('hour12', 'boolean');
option = getOption('hour', 'string', ['2-digit', 'numeric']);
- if (hr12 === undefined) {
+ if (IS_UNDEFINED(hr12)) {
ldmlString += appendToLDMLString(option, {'2-digit': 'jj', 'numeric': 'j'});
} else if (hr12 === true) {
ldmlString += appendToLDMLString(option, {'2-digit': 'hh', 'numeric': 'h'});
@@ -1340,7 +1344,7 @@ function toLDMLString(options) {
* Returns either LDML equivalent of the current option or empty string.
*/
function appendToLDMLString(option, pairs) {
- if (option !== undefined) {
+ if (!IS_UNDEFINED(option)) {
return pairs[option];
} else {
return '';
@@ -1353,57 +1357,58 @@ function appendToLDMLString(option, pairs) {
*/
function fromLDMLString(ldmlString) {
// First remove '' quoted text, so we lose 'Uhr' strings.
- ldmlString = ldmlString.replace(GetQuotedStringRE(), '');
+ ldmlString = %_CallFunction(ldmlString, GetQuotedStringRE(), '',
+ StringReplace);
var options = {};
- var match = ldmlString.match(/E{3,5}/g);
+ var match = %_CallFunction(ldmlString, /E{3,5}/g, StringMatch);
options = appendToDateTimeObject(
options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
- match = ldmlString.match(/G{3,5}/g);
+ match = %_CallFunction(ldmlString, /G{3,5}/g, StringMatch);
options = appendToDateTimeObject(
options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
- match = ldmlString.match(/y{1,2}/g);
+ match = %_CallFunction(ldmlString, /y{1,2}/g, StringMatch);
options = appendToDateTimeObject(
options, 'year', match, {y: 'numeric', yy: '2-digit'});
- match = ldmlString.match(/M{1,5}/g);
+ match = %_CallFunction(ldmlString, /M{1,5}/g, StringMatch);
options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
// Sometimes we get L instead of M for month - standalone name.
- match = ldmlString.match(/L{1,5}/g);
+ match = %_CallFunction(ldmlString, /L{1,5}/g, StringMatch);
options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
- match = ldmlString.match(/d{1,2}/g);
+ match = %_CallFunction(ldmlString, /d{1,2}/g, StringMatch);
options = appendToDateTimeObject(
options, 'day', match, {d: 'numeric', dd: '2-digit'});
- match = ldmlString.match(/h{1,2}/g);
+ match = %_CallFunction(ldmlString, /h{1,2}/g, StringMatch);
if (match !== null) {
options['hour12'] = true;
}
options = appendToDateTimeObject(
options, 'hour', match, {h: 'numeric', hh: '2-digit'});
- match = ldmlString.match(/H{1,2}/g);
+ match = %_CallFunction(ldmlString, /H{1,2}/g, StringMatch);
if (match !== null) {
options['hour12'] = false;
}
options = appendToDateTimeObject(
options, 'hour', match, {H: 'numeric', HH: '2-digit'});
- match = ldmlString.match(/m{1,2}/g);
+ match = %_CallFunction(ldmlString, /m{1,2}/g, StringMatch);
options = appendToDateTimeObject(
options, 'minute', match, {m: 'numeric', mm: '2-digit'});
- match = ldmlString.match(/s{1,2}/g);
+ match = %_CallFunction(ldmlString, /s{1,2}/g, StringMatch);
options = appendToDateTimeObject(
options, 'second', match, {s: 'numeric', ss: '2-digit'});
- match = ldmlString.match(/z|zzzz/g);
+ match = %_CallFunction(ldmlString, /z|zzzz/g, StringMatch);
options = appendToDateTimeObject(
options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
@@ -1413,8 +1418,8 @@ function fromLDMLString(ldmlString) {
function appendToDateTimeObject(options, option, match, pairs) {
if (IS_NULL(match)) {
- if (!options.hasOwnProperty(option)) {
- defineWEProperty(options, option, undefined);
+ if (!%HasOwnProperty(options, option)) {
+ defineWEProperty(options, option, UNDEFINED);
}
return options;
}
@@ -1430,7 +1435,7 @@ function appendToDateTimeObject(options, option, match, pairs) {
* Returns options with at least default values in it.
*/
function toDateTimeOptions(options, required, defaults) {
- if (options === undefined) {
+ if (IS_UNDEFINED(options)) {
options = {};
} else {
options = TO_OBJECT_INLINE(options);
@@ -1438,45 +1443,45 @@ function toDateTimeOptions(options, required, defaults) {
var needsDefault = true;
if ((required === 'date' || required === 'any') &&
- (options.weekday !== undefined || options.year !== undefined ||
- options.month !== undefined || options.day !== undefined)) {
+ (!IS_UNDEFINED(options.weekday) || !IS_UNDEFINED(options.year) ||
+ !IS_UNDEFINED(options.month) || !IS_UNDEFINED(options.day))) {
needsDefault = false;
}
if ((required === 'time' || required === 'any') &&
- (options.hour !== undefined || options.minute !== undefined ||
- options.second !== undefined)) {
+ (!IS_UNDEFINED(options.hour) || !IS_UNDEFINED(options.minute) ||
+ !IS_UNDEFINED(options.second))) {
needsDefault = false;
}
if (needsDefault && (defaults === 'date' || defaults === 'all')) {
- $objectDefineProperty(options, 'year', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- $objectDefineProperty(options, 'month', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- $objectDefineProperty(options, 'day', {value: 'numeric',
+ ObjectDefineProperty(options, 'year', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
- }
-
- if (needsDefault && (defaults === 'time' || defaults === 'all')) {
- $objectDefineProperty(options, 'hour', {value: 'numeric',
+ ObjectDefineProperty(options, 'month', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
- $objectDefineProperty(options, 'minute', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- $objectDefineProperty(options, 'second', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
+ ObjectDefineProperty(options, 'day', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
+ if (needsDefault && (defaults === 'time' || defaults === 'all')) {
+ ObjectDefineProperty(options, 'hour', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ ObjectDefineProperty(options, 'minute', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ ObjectDefineProperty(options, 'second', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
}
return options;
@@ -1493,7 +1498,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
throw MakeTypeError(kReinitializeIntl, "DateTimeFormat");
}
- if (options === undefined) {
+ if (IS_UNDEFINED(options)) {
options = {};
}
@@ -1520,11 +1525,21 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
// we need to build that.
var internalOptions = {};
var extensionMap = parseExtension(locale.extension);
+
+ /**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a date/time format.
+ */
+ var DATETIME_FORMAT_KEY_MAP = {
+ 'ca': {'property': UNDEFINED, 'type': 'string'},
+ 'nu': {'property': UNDEFINED, 'type': 'string'}
+ };
+
var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
- var resolved = $objectDefineProperties({}, {
+ var resolved = ObjectDefineProperties({}, {
calendar: {writable: true},
day: {writable: true},
era: {writable: true},
@@ -1547,12 +1562,12 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
var formatter = %CreateDateTimeFormat(
requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
- if (tz !== undefined && tz !== resolved.timeZone) {
+ if (!IS_UNDEFINED(tz) && tz !== resolved.timeZone) {
throw MakeRangeError(kUnsupportedTimeZone, tz);
}
%MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
- $objectDefineProperty(dateFormat, 'resolved', {value: resolved});
+ ObjectDefineProperty(dateFormat, 'resolved', {value: resolved});
return dateFormat;
}
@@ -1591,10 +1606,29 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
throw MakeTypeError(kResolvedOptionsCalledOnNonObject, "DateTimeFormat");
}
+ /**
+ * Maps ICU calendar names into LDML type.
+ */
+ var ICU_CALENDAR_MAP = {
+ 'gregorian': 'gregory',
+ 'japanese': 'japanese',
+ 'buddhist': 'buddhist',
+ 'roc': 'roc',
+ 'persian': 'persian',
+ 'islamic-civil': 'islamicc',
+ 'islamic': 'islamic',
+ 'hebrew': 'hebrew',
+ 'chinese': 'chinese',
+ 'indian': 'indian',
+ 'coptic': 'coptic',
+ 'ethiopic': 'ethiopic',
+ 'ethiopic-amete-alem': 'ethioaa'
+ };
+
var format = this;
var fromPattern = fromLDMLString(format.resolved.pattern);
var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
- if (userCalendar === undefined) {
+ if (IS_UNDEFINED(userCalendar)) {
// Use ICU name if we don't have a match. It shouldn't happen, but
// it would be too strict to throw for this.
userCalendar = format.resolved.calendar;
@@ -1625,7 +1659,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
},
DONT_ENUM
);
-$setFunctionName(Intl.DateTimeFormat.prototype.resolvedOptions,
+SetFunctionName(Intl.DateTimeFormat.prototype.resolvedOptions,
'resolvedOptions');
%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
@@ -1646,7 +1680,7 @@ $setFunctionName(Intl.DateTimeFormat.prototype.resolvedOptions,
},
DONT_ENUM
);
-$setFunctionName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
+SetFunctionName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
@@ -1658,13 +1692,13 @@ $setFunctionName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
*/
function formatDate(formatter, dateValue) {
var dateMs;
- if (dateValue === undefined) {
- dateMs = GlobalDate.now();
+ if (IS_UNDEFINED(dateValue)) {
+ dateMs = %DateCurrentTime();
} else {
dateMs = $toNumber(dateValue);
}
- if (!$isFinite(dateMs)) throw MakeRangeError(kDateRange);
+ if (!IsFinite(dateMs)) throw MakeRangeError(kDateRange);
return %InternalDateFormat(%GetImplFromInitializedIntlObject(formatter),
new GlobalDate(dateMs));
@@ -1694,12 +1728,12 @@ addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
*/
function canonicalizeTimeZoneID(tzID) {
// Skip undefined zones.
- if (tzID === undefined) {
+ if (IS_UNDEFINED(tzID)) {
return tzID;
}
// Special case handling (UTC, GMT).
- var upperID = tzID.toUpperCase();
+ var upperID = %StringToUpperCase(tzID);
if (upperID === 'UTC' || upperID === 'GMT' ||
upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
return 'UTC';
@@ -1707,12 +1741,12 @@ function canonicalizeTimeZoneID(tzID) {
// We expect only _ and / beside ASCII letters.
// All inputs should conform to Area/Location from now on.
- var match = GetTimezoneNameCheckRE().exec(tzID);
+ var match = %_CallFunction(tzID, GetTimezoneNameCheckRE(), StringMatch);
if (IS_NULL(match)) throw MakeRangeError(kExpectedLocation, tzID);
var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
var i = 3;
- while (match[i] !== undefined && i < match.length) {
+ while (!IS_UNDEFINED(match[i]) && i < match.length) {
result = result + '_' + toTitleCaseWord(match[i]);
i++;
}
@@ -1729,7 +1763,7 @@ function initializeBreakIterator(iterator, locales, options) {
throw MakeTypeError(kReinitializeIntl, "v8BreakIterator");
}
- if (options === undefined) {
+ if (IS_UNDEFINED(options)) {
options = {};
}
@@ -1741,7 +1775,7 @@ function initializeBreakIterator(iterator, locales, options) {
'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
var locale = resolveLocale('breakiterator', locales, options);
- var resolved = $objectDefineProperties({}, {
+ var resolved = ObjectDefineProperties({}, {
requestedLocale: {value: locale.locale, writable: true},
type: {value: internalOptions.type, writable: true},
locale: {writable: true}
@@ -1753,7 +1787,7 @@ function initializeBreakIterator(iterator, locales, options) {
%MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
internalIterator);
- $objectDefineProperty(iterator, 'resolved', {value: resolved});
+ ObjectDefineProperty(iterator, 'resolved', {value: resolved});
return iterator;
}
@@ -1804,7 +1838,7 @@ function initializeBreakIterator(iterator, locales, options) {
},
DONT_ENUM
);
-$setFunctionName(Intl.v8BreakIterator.prototype.resolvedOptions,
+SetFunctionName(Intl.v8BreakIterator.prototype.resolvedOptions,
'resolvedOptions');
%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
@@ -1826,7 +1860,7 @@ $setFunctionName(Intl.v8BreakIterator.prototype.resolvedOptions,
},
DONT_ENUM
);
-$setFunctionName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
+SetFunctionName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
@@ -1892,11 +1926,11 @@ var savedObjects = {
// Default (created with undefined locales and options parameters) collator,
// number and date format instances. They'll be created as needed.
var defaultObjects = {
- 'collator': undefined,
- 'numberformat': undefined,
- 'dateformatall': undefined,
- 'dateformatdate': undefined,
- 'dateformattime': undefined,
+ 'collator': UNDEFINED,
+ 'numberformat': UNDEFINED,
+ 'dateformatall': UNDEFINED,
+ 'dateformatdate': UNDEFINED,
+ 'dateformattime': UNDEFINED,
};
@@ -1905,9 +1939,9 @@ var defaultObjects = {
* We cache only default instances (where no locales or options are provided).
*/
function cachedOrNewService(service, locales, options, defaults) {
- var useOptions = (defaults === undefined) ? options : defaults;
- if (locales === undefined && options === undefined) {
- if (defaultObjects[service] === undefined) {
+ var useOptions = (IS_UNDEFINED(defaults)) ? options : defaults;
+ if (IS_UNDEFINED(locales) && IS_UNDEFINED(options)) {
+ if (IS_UNDEFINED(defaultObjects[service])) {
defaultObjects[service] = new savedObjects[service](locales, useOptions);
}
return defaultObjects[service];
@@ -1916,11 +1950,22 @@ function cachedOrNewService(service, locales, options, defaults) {
}
+function OverrideFunction(object, name, f) {
+ %CheckIsBootstrapping();
+ ObjectDefineProperty(object, name, { value: f,
+ writeable: true,
+ configurable: true,
+ enumerable: false });
+ SetFunctionName(f, name);
+ %FunctionRemovePrototype(f);
+ %SetNativeFlag(f);
+}
+
/**
* Compares this and that, and returns less than 0, 0 or greater than 0 value.
* Overrides the built-in method.
*/
-$overrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
+OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
if (%_IsConstructCall()) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1944,7 +1989,7 @@ $overrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
* If the form is not one of "NFC", "NFD", "NFKC", or "NFKD", then throw
* a RangeError Exception.
*/
-$overrideFunction(GlobalString.prototype, 'normalize', function(that) {
+OverrideFunction(GlobalString.prototype, 'normalize', function(that) {
if (%_IsConstructCall()) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1953,9 +1998,13 @@ $overrideFunction(GlobalString.prototype, 'normalize', function(that) {
var form = GlobalString(%_Arguments(0) || 'NFC');
- var normalizationForm = NORMALIZATION_FORMS.indexOf(form);
+ var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
+
+ var normalizationForm =
+ %_CallFunction(NORMALIZATION_FORMS, form, ArrayIndexOf);
if (normalizationForm === -1) {
- throw MakeRangeError(kNormalizationForm, NORMALIZATION_FORMS.join(', '));
+ throw MakeRangeError(kNormalizationForm,
+ %_CallFunction(NORMALIZATION_FORMS, ', ', ArrayJoin));
}
return %StringNormalize(this, normalizationForm);
@@ -1967,7 +2016,7 @@ $overrideFunction(GlobalString.prototype, 'normalize', function(that) {
* Formats a Number object (this) using locale and options values.
* If locale or options are omitted, defaults are used.
*/
-$overrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
+OverrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
if (%_IsConstructCall()) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1992,9 +2041,7 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
throw MakeTypeError(kMethodInvokedOnWrongType, "Date");
}
- if ($isNaN(date)) {
- return 'Invalid Date';
- }
+ if (IsNaN(date)) return 'Invalid Date';
var internalOptions = toDateTimeOptions(options, required, defaults);
@@ -2010,7 +2057,7 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
* If locale or options are omitted, defaults are used - both date and time are
* present in the output.
*/
-$overrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
+OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
if (%_IsConstructCall()) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2028,7 +2075,7 @@ $overrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
* If locale or options are omitted, defaults are used - only date is present
* in the output.
*/
-$overrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
+OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
if (%_IsConstructCall()) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2046,7 +2093,7 @@ $overrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
* If locale or options are omitted, defaults are used - only time is present
* in the output.
*/
-$overrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
+OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
if (%_IsConstructCall()) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 64ff491979..5a4036627d 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -411,6 +411,12 @@ void Assembler::emit(uint32_t x) {
}
+void Assembler::emit_q(uint64_t x) {
+ *reinterpret_cast<uint64_t*>(pc_) = x;
+ pc_ += sizeof(uint64_t);
+}
+
+
void Assembler::emit(Handle<Object> handle) {
AllowDeferredHandleDereference heap_object_check;
// Verify all Objects referred by code are NOT in new space.
@@ -475,14 +481,12 @@ void Assembler::emit_w(const Immediate& x) {
}
-Address Assembler::target_address_at(Address pc,
- ConstantPoolArray* constant_pool) {
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
-void Assembler::set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index a3c19af786..9066788b1f 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2921,6 +2921,12 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dq(uint64_t data) {
+ EnsureSpace ensure_space(this);
+ emit_q(data);
+}
+
+
void Assembler::dd(Label* label) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
@@ -2940,20 +2946,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return isolate->factory()->empty_constant_pool_array();
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return;
-}
-
-
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
@@ -2979,6 +2971,7 @@ void LogGeneratedCodeCoverage(const char* file_line) {
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 48f0603752..e77ef28ce7 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -512,15 +512,12 @@ class Assembler : public AssemblerBase {
void GetCode(CodeDesc* desc);
// Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc,
- ConstantPoolArray* constant_pool);
- inline static void set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
+ inline static Address target_address_at(Address pc, Address constant_pool);
+ inline static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
static inline void set_target_address_at(Address pc,
@@ -528,7 +525,7 @@ class Assembler : public AssemblerBase {
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target);
}
@@ -610,6 +607,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -1451,6 +1451,8 @@ class Assembler : public AssemblerBase {
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dq(uint64_t data);
+ void dp(uintptr_t data) { dd(data); }
void dd(Label* label);
// Check if there is less than kGap bytes available in the buffer.
@@ -1477,11 +1479,12 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
- // Allocate a constant pool of the correct size for the generated code.
- Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
-
- // Generate the constant pool for the generated code.
- void PopulateConstantPool(ConstantPoolArray* constant_pool);
+ void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+ }
protected:
void emit_sse_operand(XMMRegister reg, const Operand& adr);
@@ -1512,6 +1515,7 @@ class Assembler : public AssemblerBase {
TypeFeedbackId id = TypeFeedbackId::None());
inline void emit(const Immediate& x);
inline void emit_w(const Immediate& x);
+ inline void emit_q(uint64_t x);
// Emit the code-object-relative offset of the label's position
inline void emit_code_relative_offset(Label* label);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 499044360f..ef9f30d715 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -138,6 +138,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
+ bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- eax: number of arguments
@@ -158,12 +159,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(ebx);
}
- // Store a smi-tagged arguments count on the stack.
+ // Preserve the incoming parameters on the stack.
__ SmiTag(eax);
__ push(eax);
-
- // Push the function to invoke on the stack.
__ push(edi);
+ if (use_new_target) {
+ __ push(edx);
+ }
__ cmp(edx, edi);
Label normal_new;
@@ -358,17 +360,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// ebx: JSObject
// edi: FixedArray
// ecx: start of next object
- { Label loop, entry;
- __ mov(edx, factory->undefined_value());
- __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(eax, 0), edx);
- __ add(eax, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(eax, ecx);
- __ j(below, &loop);
- }
+ __ mov(edx, factory->undefined_value());
+ __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+ __ InitializeFieldsWithFiller(eax, ecx, edx);
// Store the initialized FixedArray into the properties field of
// the JSObject
@@ -399,7 +393,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated);
if (create_memento) {
- __ mov(ecx, Operand(esp, kPointerSize * 2));
+ int offset = (use_new_target ? 3 : 2) * kPointerSize;
+ __ mov(ecx, Operand(esp, offset));
__ cmp(ecx, masm->isolate()->factory()->undefined_value());
__ j(equal, &count_incremented);
// ecx is an AllocationSite. We are creating a memento from it, so we
@@ -409,13 +404,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
- // Retrieve the function from the stack.
- __ pop(edi);
+ // Restore the parameters.
+ if (use_new_target) {
+ __ pop(edx); // new.target
+ }
+ __ pop(edi); // Constructor function.
// Retrieve smi-tagged arguments count from the stack.
__ mov(eax, Operand(esp, 0));
__ SmiUntag(eax);
+ // Push new.target onto the construct frame. This is stored just below the
+ // receiver on the stack.
+ if (use_new_target) {
+ __ push(edx);
+ }
+
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
@@ -448,7 +452,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ // TODO(arv): Remove the "!use_new_target" before supporting optimization
+ // of functions that reference new.target
+ if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -473,9 +479,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&use_receiver);
__ mov(eax, Operand(esp, 0));
- // Restore the arguments count and leave the construct frame.
+ // Restore the arguments count and leave the construct frame. The arguments
+ // count is stored below the reciever and the new.target.
__ bind(&exit);
- __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count.
+ int offset = (use_new_target ? 2 : 1) * kPointerSize;
+ __ mov(ebx, Operand(esp, offset));
// Leave construct frame.
}
@@ -491,12 +499,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@@ -538,9 +551,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ dec(ecx);
__ j(greater_equal, &loop);
- __ inc(eax); // Pushed new.target.
-
-
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@@ -564,7 +574,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(ebx, Operand(esp, 0));
+ // Get arguments count, skipping over new.target.
+ __ mov(ebx, Operand(esp, kPointerSize));
}
__ pop(ecx); // Return address.
@@ -1052,13 +1063,22 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
__ mov(key, Operand(ebp, indexOffset));
__ jmp(&entry);
__ bind(&loop);
__ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
+ FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
+ Handle<TypeFeedbackVector> feedback_vector =
+ masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
+ int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ mov(slot, Immediate(Smi::FromInt(index)));
+ __ mov(vector, Immediate(feedback_vector));
+ Handle<Code> ic =
+ KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
@@ -1577,6 +1597,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
+
+ // If the function is strong we need to throw an error.
+ Label no_strong_error;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrongModeByteOffset),
+ 1 << SharedFunctionInfo::kStrongModeBitWithinByte);
+ __ j(equal, &no_strong_error, Label::kNear);
+
+ // What we really care about is the required number of arguments.
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kLengthOffset));
+ __ SmiUntag(ecx);
+ __ cmp(eax, ecx);
+ __ j(greater_equal, &no_strong_error, Label::kNear);
+
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ }
+
+ __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
// Copy receiver and all actual arguments.
@@ -1690,7 +1731,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
}
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 4d0a2953d1..7079dc9f77 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -106,15 +106,15 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- eax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ eax.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor.GetEnvironmentParameterRegister(i));
+ __ push(descriptor.GetRegisterParameter(i));
}
__ CallExternalReference(miss, param_count);
}
@@ -653,19 +653,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
- if (FLAG_vector_ics) {
- // With careful management, we won't have to save slot and vector on
- // the stack. Simply handle the possibly missing case first.
- // TODO(mvstanton): this code can be more efficient.
- __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(isolate()->factory()->the_hole_value()));
- __ j(equal, &miss);
- __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
- __ ret(0);
- } else {
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
- ebx, &miss);
- }
+ // With careful management, we won't have to save slot and vector on
+ // the stack. Simply handle the possibly missing case first.
+ // TODO(mvstanton): this code can be more efficient.
+ __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
+ Immediate(isolate()->factory()->the_hole_value()));
+ __ j(equal, &miss);
+ __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
+ __ ret(0);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -713,9 +708,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
DCHECK(!scratch.is(receiver) && !scratch.is(index));
Register result = eax;
DCHECK(!result.is(scratch));
- DCHECK(!FLAG_vector_ics ||
- (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
- result.is(VectorLoadICDescriptor::SlotRegister())));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+ result.is(LoadDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@@ -739,7 +733,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- CHECK(!has_new_target());
// The key is in edx and the parameter count is in eax.
DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
@@ -806,8 +799,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// esp[8] : receiver displacement
// esp[12] : function
- CHECK(!has_new_target());
-
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -836,8 +827,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// ebx = parameter count (tagged)
__ mov(ebx, Operand(esp, 1 * kPointerSize));
- CHECK(!has_new_target());
-
// Check if the calling frame is an arguments adaptor frame.
// TODO(rossberg): Factor out some of the bits that are shared with the other
// Generate* functions.
@@ -910,9 +899,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ jmp(&instantiate, Label::kNear);
__ bind(&has_mapped_parameters);
- __ mov(
- edi,
- Operand(edi, Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX)));
+ __ mov(edi, Operand(edi, Context::SlotOffset(
+ Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
__ bind(&instantiate);
// eax = address of new object (tagged)
@@ -1078,18 +1066,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- if (has_new_target()) {
- // If the constructor was [[Call]]ed, the call will not push a new.target
- // onto the stack. In that case the arguments array we construct is bogus,
- // bu we do not care as the constructor throws immediately.
- __ cmp(ecx, Immediate(Smi::FromInt(0)));
- Label skip_decrement;
- __ j(equal, &skip_decrement);
- // Subtract 1 from smi-tagged arguments count.
- __ sub(ecx, Immediate(2));
- __ bind(&skip_decrement);
- }
-
__ lea(edx, Operand(edx, ecx, times_2,
StandardFrameConstants::kCallerSPOffset));
__ mov(Operand(esp, 1 * kPointerSize), ecx);
@@ -1169,9 +1145,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// esp[0] : return address
- // esp[4] : index of rest parameter
- // esp[8] : number of parameters
- // esp[12] : receiver displacement
+ // esp[4] : language mode
+ // esp[8] : index of rest parameter
+ // esp[12] : number of parameters
+ // esp[16] : receiver displacement
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -1182,13 +1159,13 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 2 * kPointerSize), ecx);
+ __ mov(Operand(esp, 3 * kPointerSize), ecx);
__ lea(edx, Operand(edx, ecx, times_2,
StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 3 * kPointerSize), edx);
+ __ mov(Operand(esp, 4 * kPointerSize), edx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+ __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@@ -1666,7 +1643,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
- Label check_unequal_objects;
+ Label runtime_call, check_unequal_objects;
Condition cc = GetCondition();
Label miss;
@@ -1700,12 +1677,17 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
- Label check_for_nan;
__ cmp(edx, isolate()->factory()->undefined_value());
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
- __ bind(&check_for_nan);
+ if (is_strong(strength())) {
+ // In strong mode, this comparison must throw, so call the runtime.
+ __ j(equal, &runtime_call, Label::kFar);
+ } else {
+ Label check_for_nan;
+ __ j(not_equal, &check_for_nan, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
}
// Test for NaN. Compare heap numbers in a general way,
@@ -1714,12 +1696,20 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Immediate(isolate()->factory()->heap_number_map()));
__ j(equal, &generic_heap_number_comparison, Label::kNear);
if (cc != equal) {
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
+ __ cmpb(ecx, static_cast<uint8_t>(FIRST_SPEC_OBJECT_TYPE));
+ __ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
- __ CmpObjectType(eax, SYMBOL_TYPE, ecx);
- __ j(equal, &not_identical);
+ __ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
+ __ j(equal, &runtime_call, Label::kFar);
+ if (is_strong(strength())) {
+ // We have already tested for smis and heap numbers, so if both
+ // arguments are not strings we must proceed to the slow case.
+ __ test(ecx, Immediate(kIsNotStringMask));
+ __ j(not_zero, &runtime_call, Label::kFar);
+ }
}
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -1864,7 +1854,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label not_both_objects;
Label return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
@@ -1873,11 +1862,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagMask == 1);
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects, Label::kNear);
+ __ j(not_zero, &runtime_call, Label::kNear);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &not_both_objects, Label::kNear);
+ __ j(below, &runtime_call, Label::kNear);
__ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
- __ j(below, &not_both_objects, Label::kNear);
+ __ j(below, &runtime_call, Label::kNear);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
// The and of the undetectable flags is 1 if and only if they are equal.
@@ -1894,8 +1883,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
__ ret(0); // rax, rdx were pushed
- __ bind(&not_both_objects);
}
+ __ bind(&runtime_call);
// Push arguments below the return address.
__ pop(ecx);
@@ -1907,7 +1896,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == equal) {
builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
- builtin = Builtins::COMPARE;
+ builtin =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
__ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
}
@@ -2256,6 +2246,11 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
factory->allocation_site_map());
__ j(not_equal, &miss);
+ // Increment the call count for monomorphic function calls.
+ __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+
__ mov(ebx, ecx);
__ mov(edx, edi);
ArrayConstructorStub stub(masm->isolate(), arg_count());
@@ -2315,6 +2310,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(edi, &extra_checks_or_miss);
+ // Increment the call count for monomorphic function calls.
+ __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2386,6 +2386,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Update stats.
__ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+ // Initialize the call counter.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+
// Store the function. Use a stub since we need a frame for allocation.
// ebx - vector
// edx - slot
@@ -2962,9 +2967,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ push(VectorLoadICDescriptor::VectorRegister());
- __ push(VectorLoadICDescriptor::SlotRegister());
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ push(LoadWithVectorDescriptor::VectorRegister());
+ __ push(LoadDescriptor::SlotRegister());
}
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
@@ -2981,9 +2986,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ mov(index_, eax);
}
__ pop(object_);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ pop(VectorLoadICDescriptor::SlotRegister());
- __ pop(VectorLoadICDescriptor::VectorRegister());
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ pop(LoadDescriptor::SlotRegister());
+ __ pop(LoadWithVectorDescriptor::VectorRegister());
}
// Reload the instance type.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -3638,7 +3643,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -4414,15 +4419,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawLoadStub stub(isolate(), state());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawKeyedLoadStub stub(isolate());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4539,21 +4544,19 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
-void VectorRawLoadStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // edx
- Register name = VectorLoadICDescriptor::NameRegister(); // ecx
- Register vector = VectorLoadICDescriptor::VectorRegister(); // ebx
- Register slot = VectorLoadICDescriptor::SlotRegister(); // eax
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
+ Register name = LoadWithVectorDescriptor::NameRegister(); // ecx
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
Register scratch = edi;
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -4588,21 +4591,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
-void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // edx
- Register key = VectorLoadICDescriptor::NameRegister(); // ecx
- Register vector = VectorLoadICDescriptor::VectorRegister(); // ebx
- Register slot = VectorLoadICDescriptor::SlotRegister(); // eax
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
+ Register key = LoadWithVectorDescriptor::NameRegister(); // ecx
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
Register feedback = edi;
__ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -4628,7 +4631,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &try_poly_name);
Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
@@ -4646,6 +4649,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ StoreIC::GenerateMiss(masm);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, ebx);
CallICStub stub(isolate(), state());
@@ -5423,6 +5478,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 083f5dba5b..1fc42e0a8e 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -1041,6 +1041,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 00c20437bf..0b5c47b548 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -39,6 +39,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#endif
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 6d1c0f6384..5666cf4d22 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -179,53 +179,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for IC load call (from ic-ia32.cc).
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- RegList regs = receiver.bit() | name.bit();
- if (FLAG_vector_ics) {
- regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
- }
- Generate_DebugBreakCallHelper(masm, regs, 0, false);
-}
-
-
-void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for IC store call (from ic-ia32.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-ia32.cc).
- GenerateLoadICDebugBreak(masm);
-}
-
-
-void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC store call (from ic-ia32.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
- // Register state for CompareNil IC
- // ----------- S t a t e -------------
- // -- eax : value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
-}
-
-
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// Register state just before return from JS function (from codegen-ia32.cc).
// ----------- S t a t e -------------
@@ -286,8 +239,6 @@ void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
@@ -326,6 +277,7 @@ const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 5fbee322b6..46985793b8 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -423,7 +423,7 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- // No out-of-line constant pool support.
+ // No embedded constant pool support.
UNREACHABLE();
}
@@ -431,6 +431,7 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 80ac52864e..05cb1d5e25 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -1641,7 +1641,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*data == 0x16) {
data++;
int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
+ get_modrm(*data, &mod, &rm, &regop);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pextrd %s,%s,%d",
NameOfCPURegister(regop),
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index 18f1960558..a9c47274d6 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -31,12 +31,7 @@ Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
}
-Object*& ExitFrame::constant_pool_slot() const {
- UNREACHABLE();
- return Memory::Object_at(NULL);
-}
-
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index f9d804f667..fddace732e 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -79,36 +79,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kImplicitReceiverOffset = -5 * kPointerSize;
- static const int kConstructorOffset = kMinInt;
- static const int kLengthOffset = -4 * kPointerSize;
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 6ef215e16b..535f2c2c63 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -93,10 +93,6 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-ia32.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
- handler_table_ =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForRange(function()->handler_count()), TENURED));
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -184,17 +180,17 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info->scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
// Argument to NewContext is the function, which is still in edi.
if (info->scope()->is_script_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -210,8 +206,9 @@ void FullCodeGenerator::Generate() {
// Copy parameters into context if necessary.
int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -237,10 +234,48 @@ void FullCodeGenerator::Generate() {
}
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep is marked as such.
+ }
+ SetVar(this_function_var, edi, ebx, edx);
+ }
+
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ Label non_adaptor_frame;
+ __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &non_adaptor_frame);
+ __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
+
+ __ bind(&non_adaptor_frame);
+ __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+
+ Label non_construct_frame, done;
+ __ j(not_equal, &non_construct_frame);
+
+ // Construct frame
+ __ mov(eax,
+ Operand(eax, ConstructFrameConstants::kOriginalConstructorOffset));
+ __ jmp(&done);
+
+ // Non-construct frame
+ __ bind(&non_construct_frame);
+ __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
+
+ __ bind(&done);
+ SetVar(new_target_var, eax, ebx, edx);
+ }
+
// Possibly allocate RestParameters
int rest_index;
@@ -250,16 +285,13 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
- --num_parameters;
- ++rest_index;
- }
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
__ push(Immediate(Smi::FromInt(num_parameters)));
__ push(Immediate(Smi::FromInt(rest_index)));
+ __ push(Immediate(Smi::FromInt(language_mode())));
RestParamAccessStub stub(isolate());
__ CallStub(&stub);
@@ -296,7 +328,7 @@ void FullCodeGenerator::Generate() {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type, has_new_target);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, eax, ebx, edx);
@@ -321,7 +353,7 @@ void FullCodeGenerator::Generate() {
VariableDeclaration* function = scope()->function();
DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
@@ -437,7 +469,7 @@ void FullCodeGenerator::EmitReturnSequence() {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
- SetSourcePosition(function()->end_position() - 1);
+ SetReturnPosition(function());
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
@@ -446,9 +478,6 @@ void FullCodeGenerator::EmitReturnSequence() {
__ pop(ebp);
int arg_count = info_->scope()->num_parameters() + 1;
- if (IsSubclassConstructor(info_->function()->kind())) {
- arg_count++;
- }
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
// Check that the size of the code used for returning is large enough
@@ -794,15 +823,16 @@ void FullCodeGenerator::VisitVariableDeclaration(
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(), zone());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(StackOperand(variable),
@@ -810,7 +840,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
@@ -821,7 +851,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
__ push(esi);
__ push(Immediate(variable->name()));
@@ -851,25 +881,26 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ mov(StackOperand(variable), result_register());
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
@@ -886,7 +917,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ push(esi);
__ push(Immediate(variable->name()));
@@ -903,20 +934,21 @@ void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
// TODO(rossberg)
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::LOOKUP:
UNREACHABLE();
}
}
@@ -993,10 +1025,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ bind(&slow_case);
}
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ SetExpressionPosition(clause);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
+ strength(language_mode())).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1041,9 +1072,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ SetStatementPosition(stmt, SKIP_BREAK);
- SetStatementPosition(stmt);
+ FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
Label loop, exit;
ForIn loop_statement(this, stmt);
@@ -1051,7 +1082,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionPosition(stmt->enumerable());
+ SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, &exit);
@@ -1146,7 +1177,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- SetExpressionPosition(stmt->each());
+ SetExpressionAsStatementPosition(stmt->each());
__ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
__ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
@@ -1178,9 +1209,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kForInFilter, 2);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ test(eax, eax);
+ __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, loop_statement.continue_label());
__ mov(ebx, eax);
@@ -1190,7 +1221,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), ebx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each());
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
@@ -1250,39 +1281,16 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
-void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
- Comment cnmt(masm_, "[ SuperReference ");
-
- __ mov(LoadDescriptor::ReceiverRegister(),
- Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
- Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
- __ mov(LoadDescriptor::NameRegister(), home_object_symbol);
-
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
- }
-
- __ cmp(eax, isolate()->factory()->undefined_value());
- Label done;
- __ j(not_equal, &done);
- __ CallRuntime(Runtime::kThrowNonMethodError, 0);
- __ bind(&done);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset) {
+ int offset,
+ FeedbackVectorICSlot slot) {
if (NeedsHomeObject(initializer)) {
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(),
Operand(esp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
}
}
@@ -1336,20 +1344,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), proxy->var()->name());
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
-
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
- ? NOT_CONTEXTUAL
- : CONTEXTUAL;
-
- CallLoadIC(mode);
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_state);
}
@@ -1415,30 +1412,42 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), var->name());
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ // Inside typeof use a regular load, not a contextual load, to avoid
+ // a reference error.
+ CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), var->name());
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- CallGlobalLoadIC(var->name());
+ EmitGlobalVariableLoad(proxy, typeof_state);
context()->Plug(eax);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1506,16 +1515,20 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
__ bind(&slow);
__ push(esi); // Context.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Runtime::FunctionId function_id =
+ typeof_state == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ __ CallRuntime(function_id, 2);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1590,7 +1603,6 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->ComputeFlags();
// If any of the keys would store to the elements array, then we shouldn't
@@ -1617,13 +1629,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// result_saved is false the result is in eax.
bool result_saved = false;
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
AccessorTable accessor_table(zone());
int property_index = 0;
+ // store_slot_index points to the vector IC slot for the next store IC used.
+ // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
+ // and must be updated if the number of store ICs emitted here changes.
+ int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1650,7 +1661,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
__ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- CallStoreIC(key->LiteralFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ CallStoreIC();
+ } else {
+ CallStoreIC(key->LiteralFeedbackId());
+ }
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1658,6 +1674,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, 0));
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ }
CallStoreIC();
}
} else {
@@ -1669,7 +1688,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
__ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
@@ -1703,9 +1723,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(it->second->getter, 2);
+ EmitSetHomeObjectIfNeeded(
+ it->second->getter, 2,
+ expr->SlotForHomeObject(it->second->getter, &store_slot_index));
+
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(it->second->setter, 3);
+ EmitSetHomeObjectIfNeeded(
+ it->second->setter, 3,
+ expr->SlotForHomeObject(it->second->setter, &store_slot_index));
+
__ push(Immediate(Smi::FromInt(NONE)));
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
@@ -1738,7 +1764,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1780,6 +1807,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(eax);
}
+
+ // Verify that compilation exactly consumed the number of store ic slots that
+ // the ObjectLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1821,8 +1852,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ if (subexpr->IsSpread()) break;
+
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1837,7 +1871,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (has_constant_fast_elements) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
@@ -1849,16 +1883,41 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
INLINE_SMI_CHECK);
} else {
// Store the subexpression value in the array's elements.
- __ mov(ecx, Immediate(Smi::FromInt(i)));
+ __ mov(ecx, Immediate(Smi::FromInt(array_index)));
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ __ Drop(1); // literal index
+ __ Pop(eax);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ __ Push(eax);
+ if (subexpr->IsSpread()) {
+ VisitForStackValue(subexpr->AsSpread()->expression());
+ __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ } else {
+ VisitForStackValue(subexpr);
+ __ CallRuntime(Runtime::kAppendElement, 2);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ add(esp, Immediate(kPointerSize)); // literal index
+ __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(eax);
@@ -1870,9 +1929,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = GetAssignType(property);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1880,8 +1940,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Nothing to do here.
break;
case NAMED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ push(result_register());
if (expr->is_compound()) {
__ push(MemOperand(esp, kPointerSize));
@@ -1898,9 +1960,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
__ Push(result_register());
if (expr->is_compound()) {
@@ -1956,7 +2019,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- SetSourcePosition(expr->position() + 1);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
@@ -1972,14 +2034,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
}
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
+ expr->op(), expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
break;
@@ -2003,6 +2064,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
@@ -2087,7 +2150,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(eax); // result
- EnterTryBlock(expr->index(), &l_catch);
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(eax); // result
__ jmp(&l_suspend);
@@ -2097,7 +2161,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
const int generator_object_depth = kPointerSize + try_block_size;
__ mov(eax, Operand(esp, generator_object_depth));
__ push(eax); // g
- __ push(Immediate(Smi::FromInt(expr->index()))); // handler-index
+ __ push(Immediate(Smi::FromInt(handler_index))); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(l_continuation.pos())));
@@ -2111,7 +2175,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(eax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in eax
- ExitTryBlock(expr->index());
+ ExitTryBlock(handler_index);
// receiver = iter; f = iter.next; arg = received;
__ bind(&l_next);
@@ -2124,11 +2188,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result = receiver[f](arg);
__ bind(&l_call);
__ mov(load_receiver, Operand(esp, kPointerSize));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- }
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
@@ -2144,10 +2206,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Move(load_receiver, eax); // result
__ mov(load_name,
isolate()->factory()->done_string()); // "done"
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2158,10 +2218,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(load_receiver); // result
__ mov(load_name,
isolate()->factory()->value_string()); // "value"
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
context()->DropAndPlug(2, eax); // drop iter and g
break;
@@ -2293,52 +2351,45 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(!prop->IsSuperAccess());
__ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL, language_mode());
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ push(Immediate(key->value()));
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ __ push(Immediate(Smi::FromInt(language_mode())));
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
- } else {
- CallIC(ic, prop->PropertyFeedbackId());
- }
+ SetExpressionPosition(prop);
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallIC(ic);
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
- SetSourcePosition(prop->position());
-
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ SetExpressionPosition(prop);
+ __ push(Immediate(Smi::FromInt(language_mode())));
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
}
@@ -2357,8 +2408,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2437,7 +2488,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
+ int* used_store_slots) {
// Constructor is in eax.
DCHECK(lit != NULL);
__ push(eax);
@@ -2469,7 +2521,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(value, 2,
+ lit->SlotForHomeObject(value, used_store_slots));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -2502,8 +2555,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(edx);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2511,17 +2564,18 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorICSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
Property* prop = expr->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
break;
}
case NAMED_PROPERTY: {
@@ -2531,13 +2585,15 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ push(eax);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
// stack: value, this; eax: home_object
Register scratch = ecx;
Register scratch2 = edx;
@@ -2552,9 +2608,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
case KEYED_SUPER_PROPERTY: {
__ push(eax);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
Register scratch = ecx;
Register scratch2 = edx;
@@ -2577,6 +2633,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Move(StoreDescriptor::NameRegister(), eax);
__ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2598,12 +2655,13 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorICSlot slot) {
+ if (var->IsUnallocatedOrGlobalSlot()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
__ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
} else if (var->mode() == LET && op != Token::INIT_LET) {
@@ -2690,16 +2748,18 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
// eax : value
// esp[0] : receiver
-
Property* prop = expr->target()->AsProperty();
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2743,11 +2803,14 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(StoreDescriptor::NameRegister()); // Key.
__ pop(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(eax));
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -2756,6 +2819,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
Expression* key = expr->key();
if (key->IsPropertyName()) {
@@ -2764,9 +2829,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Move(LoadDescriptor::ReceiverRegister(), result_register());
EmitNamedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
EmitNamedSuperPropertyLoad(expr);
}
} else {
@@ -2777,9 +2842,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Move(LoadDescriptor::NameRegister(), result_register()); // Key.
EmitKeyedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
@@ -2828,30 +2893,31 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ SetExpressionPosition(expr);
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
- SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ push(eax);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ push(eax);
__ push(eax);
__ push(Operand(esp, kPointerSize * 2));
__ push(Immediate(key->value()));
+ __ push(Immediate(Smi::FromInt(language_mode())));
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2892,23 +2958,24 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
// Load the function from the receiver.
- SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ push(eax);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ push(eax);
__ push(eax);
__ push(Operand(esp, kPointerSize * 2));
VisitForStackValue(prop->key());
+ __ push(Immediate(Smi::FromInt(language_mode())));
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2924,14 +2991,11 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
}
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -2958,8 +3022,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the enclosing function.
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- // Push the receiver of the enclosing function.
- __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+
// Push the language mode.
__ push(Immediate(Smi::FromInt(language_mode())));
@@ -2967,28 +3030,64 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::EmitLoadSuperConstructor() {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kGetPrototype, 1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperReference* super_ref) {
- Variable* this_var = super_ref->this_var()->var();
+ SuperCallReference* super_call_ref, FeedbackVectorICSlot slot) {
+ Variable* this_var = super_call_ref->this_var()->var();
GetVar(ecx, this_var);
__ cmp(ecx, isolate()->factory()->the_hole_value());
+
Label uninitialized_this;
__ j(equal, &uninitialized_this);
__ push(Immediate(this_var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
- EmitVariableAssignment(this_var, Token::INIT_CONST);
+ EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
+}
+
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in eax) and
+ // the object holding it (returned in edx).
+ __ push(context_register());
+ __ push(Immediate(callee->name()));
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ push(eax); // Function.
+ __ push(edx); // Receiver.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the function
+ // and receiver and have the slow path jump around this code.
+ if (done.is_linked()) {
+ Label call;
+ __ jmp(&call, Label::kNear);
+ __ bind(&done);
+ // Push function.
+ __ push(eax);
+ // The receiver is implicitly the global receiver. Indicate this by
+ // passing the hole to the call function stub.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ }
}
@@ -3005,33 +3104,29 @@ void FullCodeGenerator::VisitCall(Call* expr) {
if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- // Reserved receiver slot.
- __ push(Immediate(isolate()->factory()->undefined_value()));
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
- // The runtime call returns a pair of values in eax (function) and
- // edx (receiver). Touch up the stack with the right values.
- __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+ PushCalleeAndWithBaseObject(expr);
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // Touch up the stack with the resolved function.
+ __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3044,41 +3139,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCallWithLoadIC(expr);
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
- VariableProxy* proxy = callee->AsVariableProxy();
- Label slow, done;
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
- }
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in eax) and
- // the object holding it (returned in edx).
- __ push(context_register());
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ push(eax); // Function.
- __ push(edx); // Receiver.
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ push(eax);
- // The receiver is implicitly the global receiver. Indicate this by
- // passing the hole to the call function stub.
- __ push(Immediate(isolate()->factory()->undefined_value()));
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found by
- // LoadContextSlot.
+ PushCalleeAndWithBaseObject(expr);
EmitCall(expr);
-
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
bool is_named_call = property->key()->IsPropertyName();
@@ -3089,10 +3151,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
- {
- PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
+ VisitForStackValue(property->obj());
if (is_named_call) {
EmitCallWithLoadIC(expr);
} else {
@@ -3104,9 +3163,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
+ VisitForStackValue(callee);
__ push(Immediate(isolate()->factory()->undefined_value()));
// Emit function call.
EmitCall(expr);
@@ -3128,7 +3185,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- DCHECK(!expr->expression()->IsSuperReference());
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
@@ -3140,7 +3197,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
@@ -3164,11 +3221,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(eax, new_target_var);
- __ push(eax);
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor();
+ VariableProxy* new_target_proxy = super_call_ref->new_target_var();
+ VisitForStackValue(new_target_proxy);
+
+ EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
// Push the arguments ("left-to-right") on the stack.
@@ -3180,7 +3240,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
@@ -3206,7 +3266,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
+ EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(eax);
}
@@ -3471,7 +3531,6 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
}
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3494,6 +3553,28 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_TYPED_ARRAY_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3742,6 +3823,28 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = nullptr;
+ Label* if_false = nullptr;
+ Label* fall_through = nullptr;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3750,19 +3853,14 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done, not_date_object;
Register object = eax;
Register result = eax;
Register scratch = ecx;
- __ JumpIfSmi(object, &not_date_object);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ j(not_equal, &not_date_object);
-
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand::StaticVariable(stamp));
@@ -3770,19 +3868,16 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ j(not_equal, &runtime, Label::kNear);
__ mov(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
__ mov(Operand(esp, 0), object);
__ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
+ __ bind(&done);
}
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
context()->Plug(result);
}
@@ -4083,11 +4178,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(eax, new_target_var);
- __ push(eax);
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+
+ // new.target
+ VisitForStackValue(args->at(0));
- EmitLoadSuperConstructor();
+ // .this_function
+ VisitForStackValue(args->at(1));
+ __ CallRuntime(Runtime::kGetPrototype, 1);
__ push(result_register());
// Check if the calling frame is an arguments adaptor frame.
@@ -4106,8 +4205,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(ecx);
- // Subtract 1 from arguments count, for new.target.
- __ sub(ecx, Immediate(1));
__ mov(eax, ecx);
__ lea(edx, Operand(edx, ecx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
@@ -4510,11 +4607,14 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr == CallRuntime("ReflectConstruct")
+ DCHECK_EQ(1, expr->arguments()->length());
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
+
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
- SuperReference* super_reference = args->at(0)->AsSuperReference();
+ SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
@@ -4523,8 +4623,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- // Push super
- EmitLoadSuperConstructor();
+ // Push super constructor
+ EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
// Push arguments array
@@ -4540,7 +4640,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, eax);
- EmitInitializeThisAfterSuper(super_reference);
+ // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
+ EmitInitializeThisAfterSuper(super_call_ref);
}
@@ -4552,13 +4653,9 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Load the function from the receiver.
__ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
}
@@ -4566,8 +4663,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4591,6 +4687,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
@@ -4615,6 +4712,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime function.
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
context()->Plug(eax);
}
@@ -4638,10 +4736,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- DCHECK(is_sloppy(language_mode()) || var->is_this());
- if (var->IsUnallocated()) {
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(SLOPPY)));
@@ -4651,7 +4750,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Result of deleting non-global variables is false. 'this' is
// not really a variable, though we implement it as one. The
// subexpression does not have side effects.
- context()->Plug(var->is_this());
+ context()->Plug(is_this);
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
@@ -4744,10 +4843,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4769,8 +4867,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
__ push(result_register());
__ push(MemOperand(esp, kPointerSize));
__ push(result_register());
@@ -4779,9 +4878,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
__ push(result_register());
__ push(MemOperand(esp, 2 * kPointerSize));
@@ -4862,9 +4961,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call, Label::kNear);
__ bind(&slow);
}
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ if (!is_strong(language_mode())) {
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4892,20 +4993,21 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // Record position before stub call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Call stub for +1/-1.
__ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(
- isolate(), expr->binary_op(), language_mode()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
+ strength(language_mode())).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
+ if (is_strong(language_mode())) {
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Store the value returned in eax.
switch (assign_type) {
case VARIABLE:
@@ -4913,7 +5015,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(eax);
}
@@ -4925,7 +5027,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -4934,7 +5036,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4972,7 +5079,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4988,47 +5100,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- DCHECK(!context()->IsEffect());
- DCHECK(!context()->IsTest());
-
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Immediate(proxy->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallLoadIC(NOT_CONTEXTUAL);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(eax);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ push(esi);
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(eax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Expression* sub_expr,
Handle<String> check) {
@@ -5104,7 +5175,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -5158,9 +5229,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic = CodeFactory::CompareIC(
+ isolate(), op, strength(language_mode())).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -5274,6 +5344,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_obj));
__ push(edx);
+
+ ClearPendingMessage();
}
@@ -5296,6 +5368,22 @@ void FullCodeGenerator::ExitFinallyBlock() {
}
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(edx));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ __ mov(Operand::StaticVariable(pending_message_obj), edx);
+}
+
+
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+ DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(slot)));
+}
+
+
#undef __
@@ -5376,6 +5464,7 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 2c7c0ddabe..4eefa94510 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -16,12 +16,9 @@ const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
const Register LoadDescriptor::ReceiverRegister() { return edx; }
const Register LoadDescriptor::NameRegister() { return ecx; }
+const Register LoadDescriptor::SlotRegister() { return eax; }
-
-const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return eax; }
-
-
-const Register VectorLoadICDescriptor::VectorRegister() { return ebx; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
const Register StoreDescriptor::ReceiverRegister() { return edx; }
@@ -29,6 +26,12 @@ const Register StoreDescriptor::NameRegister() { return ecx; }
const Register StoreDescriptor::ValueRegister() { return eax; }
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return edi; }
+
+
+const Register VectorStoreICDescriptor::VectorRegister() { return ebx; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
@@ -58,110 +61,102 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-const Register GrowArrayElementsDescriptor::CapacityRegister() { return ecx; }
-void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edi};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewContextDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// ToNumberStub invokes a function, and therefore needs a context.
- Register registers[] = {esi, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void NumberToStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastCloneShallowArrayDescriptor::Initialize(
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax, ebx, ecx};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {eax, ebx, ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowObjectDescriptor::Initialize(
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax, ebx, ecx, edx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {eax, ebx, ecx, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CreateAllocationSiteDescriptor::Initialize(
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ebx, edx};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {ebx, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ebx, edx, edi};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ebx, edx, edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreArrayLiteralElementDescriptor::Initialize(
+void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ecx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {ecx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edi};
- data->Initialize(arraysize(registers), registers, NULL);
+void CallFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CallFunctionWithFeedbackDescriptor::Initialize(
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edi, edx};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {edi, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edi, edx, ebx};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {edi, edx, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
// edx : (only if ebx is not the megamorphic symbol) slot in feedback
@@ -169,208 +164,183 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {esi, eax, edi, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {eax, edi, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void RegExpConstructResultDescriptor::Initialize(
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ecx, ebx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {ecx, ebx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void TransitionElementsKindDescriptor::Initialize(
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {eax, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void AllocateHeapNumberDescriptor::Initialize(
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- // esi -- context
- Register registers[] = {esi};
- data->Initialize(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-void ArrayConstructorConstantArgCountDescriptor::Initialize(
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
// ebx -- allocation site with elements kind
- Register registers[] = {esi, edi, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {edi, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {esi, edi, ebx, eax};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(),
- Representation::Tagged(), Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {edi, ebx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
- CallInterfaceDescriptorData* data) {
+void InternalArrayConstructorConstantArgCountDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
- Register registers[] = {esi, edi};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void InternalArrayConstructorDescriptor::Initialize(
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {esi, edi, eax};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {edi, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareNilDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToBooleanDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void BinaryOpWithAllocationSiteDescriptor::Initialize(
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ecx, edx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {ecx, edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void StringAddDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void KeyedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
ecx, // key
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void NamedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
ecx, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallHandlerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
edx, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
edi, // JSFunction
eax, // actual number of arguments
ebx, // expected number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
edi, // callee
ebx, // call_data
ecx, // holder
edx, // api_function_address
eax, // actual number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Integer32(), // actual number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiAccessorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
edi, // callee
ebx, // call_data
ecx, // holder
edx, // api_function_address
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void MathRoundVariantDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ edi, // math rounding function
+ edx, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 3f1bab73ee..24076ecd06 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -141,8 +141,8 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info_->is_native()) {
+ if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
+ !info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok;
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
@@ -275,8 +275,9 @@ bool LCodeGen::GeneratePrologue() {
// Copy parameters into context if necessary.
int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -617,53 +618,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- bool has_closure_id = !info()->closure().is_null() &&
- !info()->closure().is_identical_to(environment->closure());
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- default:
- UNREACHABLE();
- }
+ WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
+ AddToTranslation(
+ environment, translation, value, environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@@ -965,28 +930,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (auto function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
}
-
inlined_function_count_ = deoptimization_literals_.length();
}
@@ -1796,18 +1744,13 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
- Label runtime, done;
DCHECK(object.is(result));
DCHECK(object.is(eax));
- __ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
-
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand::StaticVariable(stamp));
@@ -2075,8 +2018,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(eax));
DCHECK(ToRegister(instr->result()).is(eax));
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), instr->op(), instr->language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2502,7 +2445,8 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2773,7 +2717,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2867,10 +2812,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = VectorLoadICDescriptor::SlotRegister();
- DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(eax));
AllowDeferredHandleDereference vector_structure_check;
@@ -2883,6 +2827,20 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ mov(vector_register, vector);
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ mov(slot_register, Immediate(Smi::FromInt(index)));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->global_object())
@@ -2890,11 +2848,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
__ mov(LoadDescriptor::NameRegister(), instr->name());
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3009,12 +2965,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
__ mov(LoadDescriptor::NameRegister(), instr->name());
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- }
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL,
- instr->hydrogen()->initialization_state()).code();
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3137,7 +3092,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3256,9 +3212,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3484,27 +3440,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
-void LCodeGen::DoTailCallThroughMegamorphicCache(
- LTailCallThroughMegamorphicCache* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register name = ToRegister(instr->name());
- DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(name.is(LoadDescriptor::NameRegister()));
- Register scratch = ebx;
- Register extra = edi;
- DCHECK(!scratch.is(receiver) && !scratch.is(name));
- DCHECK(!extra.is(receiver) && !extra.is(name));
-
- // The probe will tail call to a handler if found.
- // If --vector-ics is on, then it knows to pop the two args first.
- isolate()->stub_cache()->GenerateProbe(masm(), Code::LOAD_IC,
- instr->hydrogen()->flags(), false,
- receiver, name, scratch, extra);
-
- LoadIC::GenerateMiss(masm());
-}
-
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
@@ -4165,10 +4100,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
__ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic =
- StoreIC::initialize_stub(isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state());
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4254,7 +4193,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4345,6 +4285,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@@ -4362,6 +4306,95 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
}
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = eax;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ jmp(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ cmp(ToOperand(current_capacity), Immediate(constant_key));
+ __ j(less_equal, deferred->entry());
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ cmp(ToRegister(key), Immediate(constant_capacity));
+ __ j(greater_equal, deferred->entry());
+ } else {
+ __ cmp(ToRegister(key), ToRegister(current_capacity));
+ __ j(greater_equal, deferred->entry());
+ }
+
+ __ mov(result, ToOperand(instr->elements()));
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = eax;
+ __ Move(result, Immediate(0));
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ if (instr->object()->IsRegister()) {
+ __ Move(result, ToRegister(instr->object()));
+ } else {
+ __ mov(result, ToOperand(instr->object()));
+ }
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ mov(ebx, ToImmediate(key, Representation::Smi()));
+ } else {
+ __ Move(ebx, ToRegister(key));
+ __ SmiTag(ebx);
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+}
+
+
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
@@ -5778,6 +5811,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 32ea6bf17e..285c817343 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -29,7 +29,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@@ -101,6 +100,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -220,7 +220,6 @@ class LCodeGen: public LCodeGenBase {
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -309,6 +308,8 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
@@ -328,7 +329,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 682503b1e4..099e1f8ad4 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -477,6 +477,7 @@ void LGapResolver::EmitSwap(int index) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index b085555eb4..64677de83d 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -1133,10 +1133,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
ops.Add(target, zone());
- for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op =
- UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), esi);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@@ -1146,20 +1154,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
-LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
- HTailCallThroughMegamorphicCache* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* receiver_register =
- UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
- LOperand* name_register =
- UseFixed(instr->name(), LoadDescriptor::NameRegister());
-
- // Not marked as call. It can't deoptimize, and it never returns.
- return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
-}
-
-
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
@@ -1830,7 +1824,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* date = UseFixed(instr->value(), eax);
LDateField* result =
new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -2141,7 +2135,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -2195,7 +2189,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
context, object, vector);
@@ -2267,7 +2261,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadKeyedGeneric* result =
new(zone()) LLoadKeyedGeneric(context, object, key, vector);
@@ -2357,8 +2351,15 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LStoreKeyedGeneric* result =
- new(zone()) LStoreKeyedGeneric(context, object, key, value);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result = new (zone())
+ LStoreKeyedGeneric(context, object, key, value, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2393,6 +2394,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = Use(instr->object());
+ LOperand* elements = Use(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, eax);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool is_external_location = instr->access().IsExternalMemory() &&
@@ -2454,9 +2470,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
LStoreNamedGeneric* result =
- new(zone()) LStoreNamedGeneric(context, object, value);
+ new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2533,7 +2555,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@@ -2664,7 +2686,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
+ chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@@ -2734,6 +2756,7 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 4d245ecb50..6a123d6ace 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -121,6 +121,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@@ -154,7 +155,6 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
- V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -469,26 +469,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
};
-class LTailCallThroughMegamorphicCache final
- : public LTemplateInstruction<0, 3, 0> {
- public:
- LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
- LOperand* name) {
- inputs_[0] = context;
- inputs_[1] = receiver;
- inputs_[2] = name;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* name() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
- "tail-call-through-megamorphic-cache")
- DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -1190,6 +1170,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ Strength strength() { return hydrogen()->strength(); }
+
LOperand* context() { return inputs_[0]; }
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1559,7 +1541,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
+ Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@@ -1879,8 +1861,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
- : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ : inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
inputs_.AddAll(operands, zone);
}
@@ -1888,6 +1874,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@@ -2193,17 +2183,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2245,22 +2240,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* object,
- LOperand* key,
- LOperand* value) {
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2319,6 +2316,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
};
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 4d599eef8d..6e43c485fc 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1150,6 +1150,7 @@ void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
mov(scratch, r0);
shr(scratch, 16);
xor_(r0, scratch);
+ and_(r0, 0x3fffffff);
}
@@ -1750,7 +1751,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
add(start_offset, Immediate(kPointerSize));
bind(&entry);
cmp(start_offset, end_offset);
- j(less, &loop);
+ j(below, &loop);
}
@@ -3226,6 +3227,7 @@ void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 3df85754b9..36be56e46b 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1224,6 +1224,7 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index c3bf11c439..6fffeda08b 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -51,5 +51,5 @@ Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
return store_calling_convention();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index fe4369a1ad..61567a2224 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -54,14 +54,8 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Register receiver() const { return registers_[0]; }
Register name() const { return registers_[1]; }
- Register slot() const {
- DCHECK(FLAG_vector_ics);
- return VectorLoadICDescriptor::SlotRegister();
- }
- Register vector() const {
- DCHECK(FLAG_vector_ics);
- return VectorLoadICDescriptor::VectorRegister();
- }
+ Register slot() const { return LoadDescriptor::SlotRegister(); }
+ Register vector() const { return LoadWithVectorDescriptor::VectorRegister(); }
Register scratch1() const { return registers_[2]; }
Register scratch2() const { return registers_[3]; }
Register scratch3() const { return registers_[4]; }
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
index 4a4d688c05..73ef09663e 100644
--- a/deps/v8/src/ic/arm/access-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/access-compiler-arm.cc
@@ -40,7 +40,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 0343193c22..6af65e2cf2 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -788,7 +788,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index 093a7c1bb2..a3b74ce9f3 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -161,7 +161,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow) {
+ Register result, Label* slow,
+ LanguageMode language_mode) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -183,7 +184,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
- Label done, in_bounds, return_undefined;
+ Label done, in_bounds, absent;
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
@@ -202,7 +203,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ CompareRoot(scratch2, Heap::kNullValueRootIndex);
- __ b(eq, &return_undefined);
+ __ b(eq, &absent);
__ ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
@@ -217,9 +218,14 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ b(ne, slow);
__ jmp(&check_next_prototype);
- __ bind(&return_undefined);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
+ __ bind(&absent);
+ if (is_strong(language_mode)) {
+ // Strong mode accesses must throw in this case, so call the runtime.
+ __ jmp(slow);
+ } else {
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+ }
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -263,7 +269,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
}
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
+void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
Register dictionary = r0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -278,7 +284,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
}
@@ -289,14 +295,10 @@ static const Register LoadIC_TempRegister() { return r3; }
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- if (FLAG_vector_ics) {
- Register slot = VectorLoadICDescriptor::SlotRegister();
- Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
- __ Push(receiver, name, slot, vector);
- } else {
- __ Push(receiver, name);
- }
+ __ Push(receiver, name, slot, vector);
}
@@ -304,136 +306,30 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->load_miss(), 1, r4, r5);
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in lr.
__ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
__ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(
- MacroAssembler* masm, Register object, Register key, Register scratch1,
- Register scratch2, Register scratch3, Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the map check
- // later, we do not need to check for interceptors or whether it
- // requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
- __ b(lt, slow_case);
-
- // Check that the key is a positive smi.
- __ tst(key, Operand(0x80000001));
- __ b(ne, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
- __ cmp(key, Operand(scratch2));
- __ b(cs, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kOffset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, key, scratch3);
- __ add(scratch3, scratch3, Operand(kOffset));
-
- __ ldr(scratch2, MemOperand(scratch1, scratch3));
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, scratch3);
- __ b(eq, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, scratch2, scratch3);
- __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
- return MemOperand(scratch1, scratch3);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
- DONT_DO_SMI_CHECK);
- __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch));
- __ b(cs, slow_case);
- __ mov(scratch, Operand(kPointerSize >> 1));
- __ mul(scratch, key, scratch);
- __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- return MemOperand(backing_store, scratch);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(receiver.is(r1));
- DCHECK(key.is(r2));
- DCHECK(value.is(r0));
-
- Label slow, notin;
- MemOperand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, key, r3, r4, r5, &notin, &slow);
- __ str(value, mapped_location);
- __ add(r6, r3, r5);
- __ mov(r9, value);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow);
- __ str(value, unmapped_location);
- __ add(r6, r3, r4);
- __ mov(r9, value);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
+ : Runtime::kGetProperty,
+ 2, 1);
}
@@ -441,9 +337,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r4, r5);
LoadIC_PushArgs(masm);
@@ -451,21 +346,27 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // Perform tail call to the entry.
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty,
+ 2, 1);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -489,7 +390,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r0, r3, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow,
+ language_mode);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
__ Ret();
@@ -511,7 +413,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ bind(&slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
r3);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
@@ -527,20 +429,16 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ cmp(r4, ip);
__ b(eq, &probe_dictionary);
-
- if (FLAG_vector_ics) {
- // When vector ics are in use, the handlers in the stub cache expect a
- // vector and slot. Since we won't change the IC from any downstream
- // misses, a dummy vector can be used.
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r4, r5, r6, r9));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
- __ mov(slot, Operand(Smi::FromInt(int_slot)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r4, r5, r6, r9));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ mov(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
@@ -784,6 +682,20 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(key, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r4, &slow);
+
+ if (FLAG_vector_stores) {
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r3, r4, r5, r6));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_store_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ __ mov(slot, Operand(Smi::FromInt(slot_index)));
+ }
+
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
@@ -984,7 +896,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.EmitCondition(eq);
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc
index c5c0b7057e..e42f2f7898 100644
--- a/deps/v8/src/ic/arm/ic-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/ic-compiler-arm.cc
@@ -128,7 +128,7 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm/stub-cache-arm.cc b/deps/v8/src/ic/arm/stub-cache-arm.cc
index 1d6bd30b76..aa247d230f 100644
--- a/deps/v8/src/ic/arm/stub-cache-arm.cc
+++ b/deps/v8/src/ic/arm/stub-cache-arm.cc
@@ -125,8 +125,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
@@ -175,7 +175,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
index 58e6099ae6..e77476f0a8 100644
--- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -47,7 +47,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 516bf640d9..3986c0ed66 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -800,7 +800,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index bae6ac33cf..13dd3913ae 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -167,11 +167,12 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow) {
+ Register result, Label* slow,
+ LanguageMode language_mode) {
DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2));
Label check_prototypes, check_next_prototype;
- Label done, in_bounds, return_undefined;
+ Label done, in_bounds, absent;
// Check for fast array.
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -191,7 +192,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ Bind(&check_next_prototype);
__ Ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
- __ JumpIfRoot(scratch2, Heap::kNullValueRootIndex, &return_undefined);
+ __ JumpIfRoot(scratch2, Heap::kNullValueRootIndex, &absent);
__ Ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ Ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
@@ -204,9 +205,14 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex, slow);
__ B(&check_next_prototype);
- __ Bind(&return_undefined);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ B(&done);
+ __ Bind(&absent);
+ if (is_strong(language_mode)) {
+ // Strong mode accesses must throw in this case, so call the runtime.
+ __ B(slow);
+ } else {
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(&done);
+ }
__ Bind(&in_bounds);
// Fast case: Do the load.
@@ -258,94 +264,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
}
-// Neither 'object' nor 'key' are modified by this function.
-//
-// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
-// left with the object's elements map. Otherwise, it is used as a scratch
-// register.
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object, Register key,
- Register map, Register scratch1,
- Register scratch2,
- Label* unmapped_case,
- Label* slow_case) {
- DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
-
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case,
- lt);
-
- // Check that the key is a positive smi.
- __ JumpIfNotSmi(key, slow_case);
- __ Tbnz(key, kXSignBit, slow_case);
-
- // Load the elements object and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup.
- __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ Sub(scratch1, scratch1, Smi::FromInt(2));
- __ Cmp(key, scratch1);
- __ B(hs, unmapped_case);
-
- // Load element index and check whether it is the hole.
- static const int offset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ Add(scratch1, map, offset);
- __ SmiUntag(scratch2, key);
- __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
- __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
-
- // Load value from context and return it.
- __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
- __ SmiUntag(scratch1);
- __ Lsl(scratch1, scratch1, kPointerSizeLog2);
- __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
- // The base of the result (scratch2) is passed to RecordWrite in
- // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
- return MemOperand(scratch2, scratch1);
-}
-
-
-// The 'parameter_map' register must be loaded with the parameter map of the
-// arguments object and is overwritten.
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- DCHECK(!AreAliased(key, parameter_map, scratch));
-
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
- DONT_DO_SMI_CHECK);
- __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ Cmp(key, scratch);
- __ B(hs, slow_case);
-
- __ Add(backing_store, backing_store,
- FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(scratch, key);
- return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
+void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
Register dictionary = x0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -359,7 +278,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Dictionary load failed, go slow (but don't miss).
__ Bind(&slow);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
}
@@ -368,76 +287,30 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
ASM_LOCATION("LoadIC::GenerateMiss");
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(x4, x5, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(x4, x5, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->load_miss(), 1, x4, x5);
// Perform tail call to the entry.
- if (FLAG_vector_ics) {
- __ Push(VectorLoadICDescriptor::ReceiverRegister(),
- VectorLoadICDescriptor::NameRegister(),
- VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister());
- } else {
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
- }
+ __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
+ LoadWithVectorDescriptor::NameRegister(),
+ LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister());
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
- Label slow, notin;
- Register value = StoreDescriptor::ValueRegister();
- Register key = StoreDescriptor::NameRegister();
- Register receiver = StoreDescriptor::ReceiverRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
- DCHECK(value.is(x0));
-
- Register map = x3;
-
- // These registers are used by GenerateMappedArgumentsLookup to build a
- // MemOperand. They are live for as long as the MemOperand is live.
- Register mapped1 = x4;
- Register mapped2 = x5;
-
- MemOperand mapped = GenerateMappedArgumentsLookup(
- masm, receiver, key, map, mapped1, mapped2, &notin, &slow);
- Operand mapped_offset = mapped.OffsetAsOperand();
- __ Str(value, mapped);
- __ Add(x10, mapped.base(), mapped_offset);
- __ Mov(x11, value);
- __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
-
- __ Bind(&notin);
-
- // These registers are used by GenerateMappedArgumentsLookup to build a
- // MemOperand. They are live for as long as the MemOperand is live.
- Register unmapped1 = map; // This is assumed to alias 'map'.
- Register unmapped2 = x4;
- MemOperand unmapped =
- GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
- Operand unmapped_offset = unmapped.OffsetAsOperand();
- __ Str(value, unmapped);
- __ Add(x10, unmapped.base(), unmapped_offset);
- __ Mov(x11, value);
- __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- __ Ret();
- __ Bind(&slow);
- GenerateMiss(masm);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
+ : Runtime::kGetProperty,
+ 2, 1);
}
@@ -445,32 +318,32 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(x10, x11, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
- if (FLAG_vector_ics) {
- __ Push(VectorLoadICDescriptor::ReceiverRegister(),
- VectorLoadICDescriptor::NameRegister(),
- VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister());
- } else {
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
- }
+ __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
+ LoadWithVectorDescriptor::NameRegister(),
+ LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister());
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty,
+ 2, 1);
}
@@ -478,7 +351,8 @@ static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
Register receiver, Register scratch1,
Register scratch2, Register scratch3,
Register scratch4, Register scratch5,
- Label* slow) {
+ Label* slow,
+ LanguageMode language_mode) {
DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
scratch5));
@@ -494,7 +368,7 @@ static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
__ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
- result, slow);
+ result, slow, language_mode);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
scratch1, scratch2);
__ Ret();
@@ -533,19 +407,16 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
__ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
__ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
- if (FLAG_vector_ics) {
- // When vector ics are in use, the handlers in the stub cache expect a
- // vector and slot. Since we won't change the IC from any downstream
- // misses, a dummy vector can be used.
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
- __ Mov(slot, Operand(Smi::FromInt(int_slot)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ Mov(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
@@ -568,7 +439,8 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name;
@@ -581,13 +453,14 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ Bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
+ GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow,
+ language_mode);
// Slow case.
__ Bind(&slow);
__ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
x4, x3);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
__ Bind(&check_name);
GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
@@ -809,6 +682,20 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ Ldr(x10, FieldMemOperand(key, HeapObject::kMapOffset));
__ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(x10, &slow);
+
+ if (FLAG_vector_stores) {
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, x3, x4, x5, x6));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_store_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ __ Mov(slot, Operand(Smi::FromInt(slot_index)));
+ }
+
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
@@ -997,7 +884,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.tbz(smi_reg, 0, branch_imm);
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
index 08ce4cba21..f9eab7d9d2 100644
--- a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
@@ -132,7 +132,7 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/arm64/stub-cache-arm64.cc b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
index a9c56a31e7..ba5cbddb64 100644
--- a/deps/v8/src/ic/arm64/stub-cache-arm64.cc
+++ b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
@@ -116,8 +116,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
@@ -157,7 +157,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
extra3);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 85dc01acf0..31f5437228 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -111,5 +111,5 @@ void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
is_simple_api_call_ = true;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index c434ce515e..26d195326f 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -53,6 +53,16 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
while (true) {
if (current_map->is_dictionary_map()) cache_name = name;
if (current_map->prototype()->IsNull()) break;
+ if (name->IsPrivate()) {
+ // TODO(verwaest): Use nonexistent_private_symbol.
+ cache_name = name;
+ JSReceiver* prototype = JSReceiver::cast(current_map->prototype());
+ if (!prototype->map()->is_hidden_prototype() &&
+ !prototype->map()->IsGlobalObjectMap()) {
+ break;
+ }
+ }
+
last = handle(JSObject::cast(current_map->prototype()));
current_map = handle(last->map());
}
@@ -428,8 +438,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
if (is_nonexistent) {
// Find the top object.
Handle<JSObject> last;
+ PrototypeIterator::WhereToEnd end =
+ name->IsPrivate() ? PrototypeIterator::END_AT_NON_HIDDEN
+ : PrototypeIterator::END_AT_NULL;
PrototypeIterator iter(isolate(), holder());
- while (!iter.IsAtEnd()) {
+ while (!iter.IsAtEnd(end)) {
last = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
iter.Advance();
}
@@ -524,7 +537,8 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
void ElementHandlerCompiler::CompileElementHandlers(
- MapHandleList* receiver_maps, CodeHandleList* handlers) {
+ MapHandleList* receiver_maps, CodeHandleList* handlers,
+ LanguageMode language_mode) {
for (int i = 0; i < receiver_maps->length(); ++i) {
Handle<Map> receiver_map = receiver_maps->at(i);
Handle<Code> cached_stub;
@@ -532,7 +546,9 @@ void ElementHandlerCompiler::CompileElementHandlers(
if (receiver_map->IsStringMap()) {
cached_stub = LoadIndexedStringStub(isolate()).GetCode();
} else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
- cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
+ cached_stub = is_strong(language_mode)
+ ? isolate()->builtins()->KeyedLoadIC_Slow_Strong()
+ : isolate()->builtins()->KeyedLoadIC_Slow();
} else {
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
ElementsKind elements_kind = receiver_map->elements_kind();
@@ -540,8 +556,10 @@ void ElementHandlerCompiler::CompileElementHandlers(
// No need to check for an elements-free prototype chain here, the
// generated stub code needs to check that dynamically anyway.
bool convert_hole_to_undefined =
- is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
- *receiver_map == isolate()->get_initial_js_array_map(elements_kind);
+ (is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
+ *receiver_map ==
+ isolate()->get_initial_js_array_map(elements_kind)) &&
+ !is_strong(language_mode);
if (receiver_map->has_indexed_interceptor()) {
cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
@@ -554,12 +572,15 @@ void ElementHandlerCompiler::CompileElementHandlers(
convert_hole_to_undefined).GetCode();
} else {
DCHECK(elements_kind == DICTIONARY_ELEMENTS);
- cached_stub = LoadDictionaryElementStub(isolate()).GetCode();
+ LoadICState state =
+ LoadICState(is_strong(language_mode) ? LoadICState::kStrongModeState
+ : kNoExtraICState);
+ cached_stub = LoadDictionaryElementStub(isolate(), state).GetCode();
}
}
handlers->Add(cached_stub);
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 077db92307..99bf5e6a7a 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -290,7 +290,8 @@ class ElementHandlerCompiler : public PropertyHandlerCompiler {
virtual ~ElementHandlerCompiler() {}
void CompileElementHandlers(MapHandleList* receiver_maps,
- CodeHandleList* handlers);
+ CodeHandleList* handlers,
+ LanguageMode language_mode);
static void GenerateStoreSlow(MacroAssembler* masm);
};
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
index 9bcbef0b6f..422a0be5f0 100644
--- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -38,7 +38,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
}
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 8a7c2bdb87..8f5200aee6 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -130,10 +130,9 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
- DCHECK(!FLAG_vector_ics);
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(eax, scratch1);
- __ ret(0);
+ // TODO(mvstanton): This isn't used on ia32. Move all the other
+ // platform implementations into a code stub so this method can be removed.
+ UNREACHABLE();
}
@@ -809,7 +808,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
index 6788bc7a88..abeacc86d4 100644
--- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -128,7 +128,7 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 54fd053eaf..d59e58521a 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -172,7 +172,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register scratch,
Register scratch2, Register result,
- Label* slow) {
+ Label* slow, LanguageMode language_mode) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
@@ -182,7 +182,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
// result - holds the result on exit if the load succeeds and
// we fall through.
Label check_prototypes, check_next_prototype;
- Label done, in_bounds, return_undefined;
+ Label done, in_bounds, absent;
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(scratch);
@@ -200,7 +200,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ cmp(scratch2, masm->isolate()->factory()->null_value());
- __ j(equal, &return_undefined);
+ __ j(equal, &absent);
__ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
__ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
// scratch: elements of current prototype
@@ -215,9 +215,14 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ j(not_equal, slow);
__ jmp(&check_next_prototype);
- __ bind(&return_undefined);
- __ mov(result, masm->isolate()->factory()->undefined_value());
- __ jmp(&done);
+ __ bind(&absent);
+ if (is_strong(language_mode)) {
+ // Strong mode accesses must throw in this case, so call the runtime.
+ __ jmp(slow);
+ } else {
+ __ mov(result, masm->isolate()->factory()->undefined_value());
+ __ jmp(&done);
+ }
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -263,74 +268,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
}
-static Operand GenerateMappedArgumentsLookup(
- MacroAssembler* masm, Register object, Register key, Register scratch1,
- Register scratch2, Label* unmapped_case, Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
- Factory* factory = masm->isolate()->factory();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
- __ j(below, slow_case);
-
- // Check that the key is a positive smi.
- __ test(key, Immediate(0x80000001));
- __ j(not_zero, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, Immediate(Smi::FromInt(2)));
- __ cmp(key, scratch2);
- __ j(above_equal, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ mov(scratch2,
- FieldOperand(scratch1, key, times_half_pointer_size, kHeaderSize));
- __ cmp(scratch2, factory->the_hole_value());
- __ j(equal, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- const int kContextOffset = FixedArray::kHeaderSize;
- __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
- return FieldOperand(scratch1, scratch2, times_half_pointer_size,
- Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, scratch);
- __ j(greater_equal, slow_case);
- return FieldOperand(backing_store, key, times_half_pointer_size,
- FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -352,7 +291,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow,
+ language_mode);
Isolate* isolate = masm->isolate();
Counters* counters = isolate->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
@@ -384,7 +324,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ bind(&slow);
// Slow case: jump to runtime.
__ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
@@ -399,26 +339,21 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Immediate(isolate->factory()->hash_table_map()));
__ j(equal, &probe_dictionary);
- if (FLAG_vector_ics) {
- // When vector ics are in use, the handlers in the stub cache expect a
- // vector and slot. Since we won't change the IC from any downstream
- // misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- isolate->factory()->keyed_load_dummy_vector());
- int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ isolate->factory()->keyed_load_dummy_vector());
+ int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
false, receiver, key, ebx, edi);
- if (FLAG_vector_ics) {
- __ pop(VectorLoadICDescriptor::VectorRegister());
- __ pop(VectorLoadICDescriptor::SlotRegister());
- }
+ __ pop(LoadWithVectorDescriptor::VectorRegister());
+ __ pop(LoadDescriptor::SlotRegister());
// Cache miss.
GenerateMiss(masm);
@@ -442,37 +377,6 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow, notin;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(receiver.is(edx));
- DCHECK(name.is(ecx));
- DCHECK(value.is(eax));
-
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, name, ebx, edi, &notin, &slow);
- __ mov(mapped_location, value);
- __ lea(ecx, mapped_location);
- __ mov(edx, value);
- __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, name, ebx, edi, &slow);
- __ mov(unmapped_location, value);
- __ lea(edi, unmapped_location);
- __ mov(edx, value);
- __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
@@ -656,10 +560,28 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
+
+
+ if (FLAG_vector_stores) {
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_store_dummy_vector());
+ int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
+ }
+
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
+
+ if (FLAG_vector_stores) {
+ __ pop(VectorStoreICDescriptor::VectorRegister());
+ __ pop(VectorStoreICDescriptor::SlotRegister());
+ }
+
// Cache miss.
__ jmp(&miss);
@@ -711,7 +633,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
}
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
+void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
Register dictionary = eax;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -726,33 +648,25 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
}
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- if (FLAG_vector_ics) {
- Register slot = VectorLoadICDescriptor::SlotRegister();
- Register vector = VectorLoadICDescriptor::VectorRegister();
- DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
- !edi.is(vector));
-
- __ pop(edi);
- __ push(receiver);
- __ push(name);
- __ push(slot);
- __ push(vector);
- __ push(edi);
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(ebx);
- }
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
+ !edi.is(vector));
+
+ __ pop(edi);
+ __ push(receiver);
+ __ push(name);
+ __ push(slot);
+ __ push(vector);
+ __ push(edi);
}
@@ -764,12 +678,13 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// Return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -780,8 +695,10 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ push(name);
__ push(ebx);
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
+ : Runtime::kGetProperty,
+ 2, 1);
}
@@ -794,12 +711,13 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// Return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -810,8 +728,10 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ push(name);
__ push(ebx);
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty,
+ 2, 1);
}
@@ -962,7 +882,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
index cb560f12d7..aa807a77a6 100644
--- a/deps/v8/src/ic/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -61,8 +61,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
if (IC::ICUseVector(ic_kind)) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- __ pop(VectorLoadICDescriptor::VectorRegister());
- __ pop(VectorLoadICDescriptor::SlotRegister());
+ __ pop(LoadWithVectorDescriptor::VectorRegister());
+ __ pop(LoadDescriptor::SlotRegister());
}
if (leave_frame) __ leave();
@@ -112,8 +112,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
if (IC::ICUseVector(ic_kind)) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
DCHECK(!offset.is(vector) && !offset.is(slot));
__ pop(vector);
@@ -204,7 +204,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index dfee0127c6..a5ae6cfff4 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -52,6 +52,7 @@ Handle<Code> PropertyICCompiler::ComputeMonomorphic(
ExtraICState extra_ic_state) {
Isolate* isolate = name->GetIsolate();
if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) ||
+ handler.is_identical_to(isolate->builtins()->LoadIC_Normal_Strong()) ||
handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) {
name = isolate->factory()->normal_ic_symbol();
}
@@ -87,28 +88,8 @@ Handle<Code> PropertyICCompiler::ComputeMonomorphic(
}
-Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic(
- Handle<Map> receiver_map) {
- Isolate* isolate = receiver_map->GetIsolate();
- DCHECK(KeyedLoadIC::GetKeyType(kNoExtraICState) == ELEMENT);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
- Handle<Name> name = isolate->factory()->KeyedLoadMonomorphic_string();
-
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- Handle<Code> stub = ComputeKeyedLoadMonomorphicHandler(receiver_map);
- PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC);
- Handle<Code> code = compiler.CompileMonomorphic(
- receiver_map, stub, isolate->factory()->empty_string(), ELEMENT);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- return code;
-}
-
-
Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
- Handle<Map> receiver_map) {
+ Handle<Map> receiver_map, ExtraICState extra_ic_state) {
Isolate* isolate = receiver_map->GetIsolate();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
ElementsKind elements_kind = receiver_map->elements_kind();
@@ -117,8 +98,8 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
// stub code needs to check that dynamically anyway.
bool convert_hole_to_undefined =
is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
- *receiver_map == isolate->get_initial_js_array_map(elements_kind);
-
+ *receiver_map == isolate->get_initial_js_array_map(elements_kind) &&
+ !(is_strong(LoadICState::GetLanguageMode(extra_ic_state)));
Handle<Code> stub;
if (receiver_map->has_indexed_interceptor()) {
stub = LoadIndexedInterceptorStub(isolate).GetCode();
@@ -133,7 +114,8 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
stub = LoadFastElementStub(isolate, is_js_array, elements_kind,
convert_hole_to_undefined).GetCode();
} else {
- stub = LoadDictionaryElementStub(isolate).GetCode();
+ stub = LoadDictionaryElementStub(isolate, LoadICState(extra_ic_state))
+ .GetCode();
}
return stub;
}
@@ -189,31 +171,6 @@ static void FillCache(Isolate* isolate, Handle<Code> code) {
}
-Handle<Code> PropertyICCompiler::ComputeLoad(Isolate* isolate,
- InlineCacheState ic_state,
- ExtraICState extra_state) {
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state);
- Handle<UnseededNumberDictionary> cache =
- isolate->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- PropertyICCompiler compiler(isolate, Code::LOAD_IC);
- Handle<Code> code;
- if (ic_state == UNINITIALIZED) {
- code = compiler.CompileLoadInitialize(flags);
- } else if (ic_state == PREMONOMORPHIC) {
- code = compiler.CompileLoadPreMonomorphic(flags);
- } else if (ic_state == MEGAMORPHIC) {
- code = compiler.CompileLoadMegamorphic(flags);
- } else {
- UNREACHABLE();
- }
- FillCache(isolate, code);
- return code;
-}
-
-
Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate,
InlineCacheState ic_state,
ExtraICState extra_state) {
@@ -266,7 +223,7 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
- MapHandleList* receiver_maps) {
+ MapHandleList* receiver_maps, LanguageMode language_mode) {
Isolate* isolate = receiver_maps->at(0)->GetIsolate();
DCHECK(KeyedLoadIC::GetKeyType(kNoExtraICState) == ELEMENT);
Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
@@ -277,7 +234,7 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
CodeHandleList handlers(receiver_maps->length());
ElementHandlerCompiler compiler(isolate);
- compiler.CompileElementHandlers(receiver_maps, &handlers);
+ compiler.CompileElementHandlers(receiver_maps, &handlers, language_mode);
PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC);
Handle<Code> code = ic_compiler.CompilePolymorphic(
receiver_maps, &handlers, isolate->factory()->empty_string(),
@@ -334,23 +291,6 @@ Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
}
-Handle<Code> PropertyICCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
- LoadIC::GeneratePreMonomorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileLoadMegamorphic(Code::Flags flags) {
- MegamorphicLoadStub stub(isolate(), LoadICState(extra_ic_state_));
- auto code = stub.GetCode();
- PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
StoreIC::GenerateInitialize(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
@@ -370,7 +310,7 @@ Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- LanguageMode language_mode = StoreIC::GetLanguageMode(extra_state);
+ LanguageMode language_mode = StoreICState::GetLanguageMode(extra_state);
GenerateRuntimeSetProperty(masm(), language_mode);
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
@@ -409,7 +349,7 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Handle<Map> receiver_map(receiver_maps->at(i));
Handle<Code> cached_stub;
Handle<Map> transitioned_map =
- receiver_map->FindTransitionedMap(receiver_maps);
+ Map::FindTransitionedMap(receiver_map, receiver_maps);
// TODO(mvstanton): The code below is doing pessimistic elements
// transitions. I would like to stop doing that and rely on Allocation Site
@@ -426,9 +366,11 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
} else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
} else {
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
+ if (IsSloppyArgumentsElements(elements_kind)) {
+ cached_stub = KeyedStoreSloppyArgumentsStub(isolate()).GetCode();
+ } else if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
cached_stub = StoreFastElementStub(isolate(), is_js_array,
elements_kind, store_mode).GetCode();
} else {
@@ -456,9 +398,11 @@ Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
+ if (receiver_map->has_sloppy_arguments_elements()) {
+ stub = KeyedStoreSloppyArgumentsStub(isolate()).GetCode();
+ } else if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
store_mode).GetCode();
} else {
@@ -477,5 +421,5 @@ Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index a6c4e81ab6..b5226e9a6e 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -18,8 +18,6 @@ class PropertyICCompiler : public PropertyAccessCompiler {
ExtraICState extra_ic_state);
// Named
- static Handle<Code> ComputeLoad(Isolate* isolate, InlineCacheState ic_state,
- ExtraICState extra_state);
static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
ExtraICState extra_state);
@@ -34,13 +32,13 @@ class PropertyICCompiler : public PropertyAccessCompiler {
// Keyed
static Handle<Code> ComputeKeyedLoadMonomorphicHandler(
- Handle<Map> receiver_map);
- static Handle<Code> ComputeKeyedLoadMonomorphic(Handle<Map> receiver_map);
+ Handle<Map> receiver_map, ExtraICState extra_ic_state);
static Handle<Code> ComputeKeyedStoreMonomorphic(
Handle<Map> receiver_map, LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
- static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps);
+ static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps,
+ LanguageMode language_mode);
static Handle<Code> ComputeKeyedStorePolymorphic(
MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
LanguageMode language_mode);
@@ -69,8 +67,6 @@ class PropertyICCompiler : public PropertyAccessCompiler {
CacheHolderFlag cache_holder = kCacheOnReceiver);
Handle<Code> CompileLoadInitialize(Code::Flags flags);
- Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
- Handle<Code> CompileLoadMegamorphic(Code::Flags flags);
Handle<Code> CompileStoreInitialize(Code::Flags flags);
Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
Handle<Code> CompileStoreGeneric(Code::Flags flags);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 45dd3476cf..b6ee6b13e5 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -48,41 +48,40 @@ Address IC::address() const {
}
-ConstantPoolArray* IC::constant_pool() const {
- if (!FLAG_enable_ool_constant_pool) {
+Address IC::constant_pool() const {
+ if (!FLAG_enable_embedded_constant_pool) {
return NULL;
} else {
- Handle<ConstantPoolArray> result = raw_constant_pool_;
+ Address constant_pool = raw_constant_pool();
Debug* debug = isolate()->debug();
// First check if any break points are active if not just return the
// original constant pool.
- if (!debug->has_break_points()) return *result;
+ if (!debug->has_break_points()) return constant_pool;
// At least one break point is active perform additional test to ensure that
// break point locations are updated correctly.
Address target = Assembler::target_address_from_return_address(pc());
if (debug->IsDebugBreak(
- Assembler::target_address_at(target, raw_constant_pool()))) {
+ Assembler::target_address_at(target, constant_pool))) {
// If the call site is a call to debug break then we want to return the
// constant pool for the original code instead of the breakpointed code.
return GetOriginalCode()->constant_pool();
}
- return *result;
+ return constant_pool;
}
}
-ConstantPoolArray* IC::raw_constant_pool() const {
- if (FLAG_enable_ool_constant_pool) {
- return *raw_constant_pool_;
+Address IC::raw_constant_pool() const {
+ if (FLAG_enable_embedded_constant_pool) {
+ return *constant_pool_address_;
} else {
return NULL;
}
}
-Code* IC::GetTargetAtAddress(Address address,
- ConstantPoolArray* constant_pool) {
+Code* IC::GetTargetAtAddress(Address address, Address constant_pool) {
// Get the target address of the IC.
Address target = Assembler::target_address_at(address, constant_pool);
// Convert target address to the code object. Code::GetCodeFromTargetAddress
@@ -94,13 +93,16 @@ Code* IC::GetTargetAtAddress(Address address,
void IC::SetTargetAtAddress(Address address, Code* target,
- ConstantPoolArray* constant_pool) {
+ Address constant_pool) {
+ if (AddressIsDeoptimizedCode(target->GetIsolate(), address)) return;
+
DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
- // Don't use this for load_ics when --vector-ics is turned on.
- DCHECK(!(FLAG_vector_ics && target->is_inline_cache_stub()) ||
+ DCHECK(!target->is_inline_cache_stub() ||
(target->kind() != Code::LOAD_IC &&
- target->kind() != Code::KEYED_LOAD_IC));
+ target->kind() != Code::KEYED_LOAD_IC &&
+ (!FLAG_vector_stores || (target->kind() != Code::STORE_IC &&
+ target->kind() != Code::KEYED_STORE_IC))));
Heap* heap = target->GetHeap();
Code* old_target = GetTargetAtAddress(address, constant_pool);
@@ -109,8 +111,8 @@ void IC::SetTargetAtAddress(Address address, Code* target,
// ICs as language mode. The language mode of the IC must be preserved.
if (old_target->kind() == Code::STORE_IC ||
old_target->kind() == Code::KEYED_STORE_IC) {
- DCHECK(StoreIC::GetLanguageMode(old_target->extra_ic_state()) ==
- StoreIC::GetLanguageMode(target->extra_ic_state()));
+ DCHECK(StoreICState::GetLanguageMode(old_target->extra_ic_state()) ==
+ StoreICState::GetLanguageMode(target->extra_ic_state()));
}
#endif
Assembler::set_target_address_at(address, constant_pool,
@@ -134,6 +136,9 @@ void LoadIC::set_target(Code* code) {
// The contextual mode must be preserved across IC patching.
DCHECK(LoadICState::GetContextualMode(code->extra_ic_state()) ==
LoadICState::GetContextualMode(target()->extra_ic_state()));
+ // Strongness must be preserved across IC patching.
+ DCHECK(LoadICState::GetLanguageMode(code->extra_ic_state()) ==
+ LoadICState::GetLanguageMode(target()->extra_ic_state()));
IC::set_target(code);
}
@@ -141,15 +146,16 @@ void LoadIC::set_target(Code* code) {
void StoreIC::set_target(Code* code) {
// Language mode must be preserved across IC patching.
- DCHECK(GetLanguageMode(code->extra_ic_state()) ==
- GetLanguageMode(target()->extra_ic_state()));
+ DCHECK(StoreICState::GetLanguageMode(code->extra_ic_state()) ==
+ StoreICState::GetLanguageMode(target()->extra_ic_state()));
IC::set_target(code);
}
void KeyedStoreIC::set_target(Code* code) {
// Language mode must be preserved across IC patching.
- DCHECK(GetLanguageMode(code->extra_ic_state()) == language_mode());
+ DCHECK(StoreICState::GetLanguageMode(code->extra_ic_state()) ==
+ language_mode());
IC::set_target(code);
}
@@ -211,12 +217,25 @@ Handle<Map> IC::GetICCacheHolder(Handle<Map> map, Isolate* isolate,
}
-inline Code* IC::get_host() {
+Code* IC::get_host() {
return isolate()
->inner_pointer_to_code_cache()
->GetCacheEntry(address())
->code;
}
+
+
+bool IC::AddressIsDeoptimizedCode() const {
+ return AddressIsDeoptimizedCode(isolate(), address());
+}
+
+
+bool IC::AddressIsDeoptimizedCode(Isolate* isolate, Address address) {
+ Code* host =
+ isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+ return (host->kind() == Code::OPTIMIZED_FUNCTION &&
+ host->marked_for_deoptimization());
+}
}
} // namespace v8::internal
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index 0c71949d8a..8ab08bc08d 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -12,7 +12,7 @@ namespace internal {
// static
void ICUtility::Clear(Isolate* isolate, Address address,
- ConstantPoolArray* constant_pool) {
+ Address constant_pool) {
IC::Clear(isolate, address, constant_pool);
}
@@ -52,7 +52,7 @@ BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
isolate_(isolate) {
op_ =
static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
- strong_ = StrongField::decode(extra_ic_state);
+ strong_ = StrengthField::decode(extra_ic_state);
left_kind_ = LeftKindField::decode(extra_ic_state);
right_kind_ = fixed_right_arg_.IsJust()
? (Smi::IsValid(fixed_right_arg_.FromJust()) ? SMI : INT32)
@@ -66,8 +66,7 @@ BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
ExtraICState BinaryOpICState::GetExtraICState() const {
ExtraICState extra_ic_state =
OpField::encode(op_ - FIRST_TOKEN) | LeftKindField::encode(left_kind_) |
- ResultKindField::encode(result_kind_) |
- StrongField::encode(strong_) |
+ ResultKindField::encode(result_kind_) | StrengthField::encode(strong_) |
HasFixedRightArgField::encode(fixed_right_arg_.IsJust());
if (fixed_right_arg_.IsJust()) {
extra_ic_state = FixedRightArgValueField::update(
@@ -86,14 +85,14 @@ void BinaryOpICState::GenerateAheadOfTime(
// expensive at runtime. When solved we should be able to add most binops to
// the snapshot instead of hand-picking them.
// Generated list of commonly used stubs
-#define GENERATE(op, left_kind, right_kind, result_kind) \
- do { \
- BinaryOpICState state(isolate, op, LanguageMode::SLOPPY); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_ = Nothing<int>(); \
- state.right_kind_ = right_kind; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
+#define GENERATE(op, left_kind, right_kind, result_kind) \
+ do { \
+ BinaryOpICState state(isolate, op, Strength::WEAK); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_ = Nothing<int>(); \
+ state.right_kind_ = right_kind; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
} while (false)
GENERATE(Token::ADD, INT32, INT32, INT32);
GENERATE(Token::ADD, INT32, INT32, NUMBER);
@@ -190,7 +189,7 @@ void BinaryOpICState::GenerateAheadOfTime(
#undef GENERATE
#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind) \
do { \
- BinaryOpICState state(isolate, op, LanguageMode::SLOPPY); \
+ BinaryOpICState state(isolate, op, Strength::WEAK); \
state.left_kind_ = left_kind; \
state.fixed_right_arg_ = Just(fixed_right_arg_value); \
state.right_kind_ = SMI; \
@@ -224,6 +223,7 @@ Type* BinaryOpICState::GetResultType(Zone* zone) const {
std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s) {
os << "(" << Token::Name(s.op_);
if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
+ if (is_strong(s.strength())) os << "_Strong";
os << ":" << BinaryOpICState::KindToString(s.left_kind_) << "*";
if (s.fixed_right_arg_.IsJust()) {
os << s.fixed_right_arg_.FromJust();
@@ -511,5 +511,5 @@ CompareICState::State CompareICState::TargetState(
UNREACHABLE();
return GENERIC; // Make the compiler happy.
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 76c0155206..e1fed19133 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -17,8 +17,7 @@ const int kMaxKeyedPolymorphism = 4;
class ICUtility : public AllStatic {
public:
// Clear the inline cache to initial state.
- static void Clear(Isolate* isolate, Address address,
- ConstantPoolArray* constant_pool);
+ static void Clear(Isolate* isolate, Address address, Address constant_pool);
};
@@ -57,9 +56,9 @@ std::ostream& operator<<(std::ostream& os, const CallICState& s);
class BinaryOpICState final BASE_EMBEDDED {
public:
BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
- BinaryOpICState(Isolate* isolate, Token::Value op, LanguageMode language_mode)
+ BinaryOpICState(Isolate* isolate, Token::Value op, Strength strength)
: op_(op),
- strong_(is_strong(language_mode)),
+ strong_(is_strong(strength)),
left_kind_(NONE),
right_kind_(NONE),
result_kind_(NONE),
@@ -106,8 +105,8 @@ class BinaryOpICState final BASE_EMBEDDED {
return Max(left_kind_, right_kind_) == GENERIC;
}
- LanguageMode language_mode() const {
- return strong_ ? LanguageMode::STRONG : LanguageMode::SLOPPY;
+ Strength strength() const {
+ return strong_ ? Strength::STRONG : Strength::WEAK;
}
// Returns true if the IC should enable the inline smi code (i.e. if either
@@ -148,7 +147,7 @@ class BinaryOpICState final BASE_EMBEDDED {
class OpField : public BitField<int, 0, 4> {};
class ResultKindField : public BitField<Kind, 4, 3> {};
class LeftKindField : public BitField<Kind, 7, 3> {};
- class StrongField : public BitField<bool, 10, 1> {};
+ class StrengthField : public BitField<bool, 10, 1> {};
// When fixed right arg is set, we don't need to store the right kind.
// Thus the two fields can overlap.
class HasFixedRightArgField : public BitField<bool, 11, 1> {};
@@ -202,11 +201,24 @@ class CompareICState {
class LoadICState final BASE_EMBEDDED {
+ private:
+ class ContextualModeBits : public BitField<ContextualMode, 0, 1> {};
+ class LanguageModeBits
+ : public BitField<LanguageMode, ContextualModeBits::kNext, 2> {};
+ STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
+ const ExtraICState state_;
+
public:
+ static const uint32_t kNextBitFieldOffset = LanguageModeBits::kNext;
+
+ static const ExtraICState kStrongModeState = STRONG
+ << LanguageModeBits::kShift;
+
explicit LoadICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
- explicit LoadICState(ContextualMode mode)
- : state_(ContextualModeBits::encode(mode)) {}
+ explicit LoadICState(ContextualMode mode, LanguageMode language_mode)
+ : state_(ContextualModeBits::encode(mode) |
+ LanguageModeBits::encode(language_mode)) {}
ExtraICState GetExtraICState() const { return state_; }
@@ -214,14 +226,46 @@ class LoadICState final BASE_EMBEDDED {
return ContextualModeBits::decode(state_);
}
+ LanguageMode language_mode() const {
+ return LanguageModeBits::decode(state_);
+ }
+
static ContextualMode GetContextualMode(ExtraICState state) {
return LoadICState(state).contextual_mode();
}
- private:
- class ContextualModeBits : public BitField<ContextualMode, 0, 1> {};
- STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
+ static LanguageMode GetLanguageMode(ExtraICState state) {
+ return LoadICState(state).language_mode();
+ }
+};
+
+
+class StoreICState final BASE_EMBEDDED {
+ public:
+ explicit StoreICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
+ explicit StoreICState(LanguageMode mode)
+ : state_(LanguageModeState::encode(mode)) {}
+
+ ExtraICState GetExtraICState() const { return state_; }
+
+ LanguageMode language_mode() const {
+ return LanguageModeState::decode(state_);
+ }
+
+ static LanguageMode GetLanguageMode(ExtraICState state) {
+ return StoreICState(state).language_mode();
+ }
+
+ class LanguageModeState : public BitField<LanguageMode, 1, 2> {};
+ STATIC_ASSERT(i::LANGUAGE_END == 3);
+
+ // For convenience, a statically declared encoding of strict mode extra
+ // IC state.
+ static const ExtraICState kStrictModeState = STRICT
+ << LanguageModeState::kShift;
+
+ private:
const ExtraICState state_;
};
}
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 65b2e3df9a..9f75af8eb3 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -16,7 +16,7 @@
#include "src/ic/ic-inl.h"
#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
-#include "src/messages.h"
+#include "src/macro-assembler.h"
#include "src/prototype.h"
#include "src/runtime/runtime.h"
@@ -90,6 +90,7 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
void IC::TraceIC(const char* type, Handle<Object> name) {
if (FLAG_trace_ic) {
+ if (AddressIsDeoptimizedCode()) return;
State new_state =
UseVector() ? nexus()->StateFromFeedback() : raw_target()->ic_state();
TraceIC(type, name, state(), new_state);
@@ -150,10 +151,10 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus,
// levels of the stack frame iteration code. This yields a ~35% speedup when
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
- Address constant_pool = NULL;
- if (FLAG_enable_ool_constant_pool) {
- constant_pool =
- Memory::Address_at(entry + ExitFrameConstants::kConstantPoolOffset);
+ Address* constant_pool = NULL;
+ if (FLAG_enable_embedded_constant_pool) {
+ constant_pool = reinterpret_cast<Address*>(
+ entry + ExitFrameConstants::kConstantPoolOffset);
}
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
@@ -162,9 +163,9 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus,
// StubFailureTrampoline, we need to look one frame further down the stack to
// find the frame pointer and the return address stack slot.
if (depth == EXTRA_CALL_FRAME) {
- if (FLAG_enable_ool_constant_pool) {
- constant_pool =
- Memory::Address_at(fp + StandardFrameConstants::kConstantPoolOffset);
+ if (FLAG_enable_embedded_constant_pool) {
+ constant_pool = reinterpret_cast<Address*>(
+ fp + StandardFrameConstants::kConstantPoolOffset);
}
const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
@@ -177,10 +178,8 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus,
DCHECK(fp == frame->fp() && pc_address == frame->pc_address());
#endif
fp_ = fp;
- if (FLAG_enable_ool_constant_pool) {
- raw_constant_pool_ = handle(
- ConstantPoolArray::cast(reinterpret_cast<Object*>(constant_pool)),
- isolate);
+ if (FLAG_enable_embedded_constant_pool) {
+ constant_pool_address_ = constant_pool;
}
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
target_ = handle(raw_target(), isolate);
@@ -231,14 +230,6 @@ bool IC::AddressIsOptimizedCode() const {
}
-bool IC::AddressIsDeoptimizedCode() const {
- Code* host =
- isolate()->inner_pointer_to_code_cache()->GetCacheEntry(address())->code;
- return host->kind() == Code::OPTIMIZED_FUNCTION &&
- host->marked_for_deoptimization();
-}
-
-
static void LookupForRead(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -362,11 +353,10 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
}
-MaybeHandle<Object> IC::TypeError(const char* type, Handle<Object> object,
- Handle<Object> key) {
+MaybeHandle<Object> IC::TypeError(MessageTemplate::Template index,
+ Handle<Object> object, Handle<Object> key) {
HandleScope scope(isolate());
- Handle<Object> args[2] = {key, object};
- THROW_NEW_ERROR(isolate(), NewTypeError(type, HandleVector(args, 2)), Object);
+ THROW_NEW_ERROR(isolate(), NewTypeError(index, key, object), Object);
}
@@ -488,8 +478,7 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
-void IC::Clear(Isolate* isolate, Address address,
- ConstantPoolArray* constant_pool) {
+void IC::Clear(Isolate* isolate, Address address, Address constant_pool) {
Code* target = GetTargetAtAddress(address, constant_pool);
// Don't clear debug break inline cache as it will remove the break point.
@@ -497,14 +486,13 @@ void IC::Clear(Isolate* isolate, Address address,
switch (target->kind()) {
case Code::LOAD_IC:
- if (FLAG_vector_ics) return;
- return LoadIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_LOAD_IC:
- if (FLAG_vector_ics) return;
- return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
+ return;
case Code::STORE_IC:
+ if (FLAG_vector_stores) return;
return StoreIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_STORE_IC:
+ if (FLAG_vector_stores) return;
return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
case Code::COMPARE_IC:
return CompareIC::Clear(isolate, address, target, constant_pool);
@@ -522,18 +510,6 @@ void IC::Clear(Isolate* isolate, Address address,
}
-void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool) {
- DCHECK(!FLAG_vector_ics);
- if (IsCleared(target)) return;
-
- // Make sure to also clear the map used in inline fast cases. If we
- // do not clear these maps, cached code can keep objects alive
- // through the embedded maps.
- SetTargetAtAddress(address, *pre_monomorphic_stub(isolate), constant_pool);
-}
-
-
void KeyedLoadIC::Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus) {
if (IsCleared(nexus)) return;
// Make sure to also clear the map used in inline fast cases. If we
@@ -558,16 +534,6 @@ void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
}
-void LoadIC::Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool) {
- DCHECK(!FLAG_vector_ics);
- if (IsCleared(target)) return;
- Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::LOAD_IC,
- target->extra_ic_state());
- SetTargetAtAddress(address, code, constant_pool);
-}
-
-
void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
if (IsCleared(nexus)) return;
State state = nexus->StateFromFeedback();
@@ -577,7 +543,7 @@ void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool) {
+ Address constant_pool) {
if (IsCleared(target)) return;
Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::STORE_IC,
target->extra_ic_state());
@@ -585,34 +551,54 @@ void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
}
+void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
+ if (IsCleared(nexus)) return;
+ State state = nexus->StateFromFeedback();
+ nexus->ConfigurePremonomorphic();
+ OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+}
+
+
void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool) {
+ Address constant_pool) {
if (IsCleared(target)) return;
- SetTargetAtAddress(
- address, *pre_monomorphic_stub(
- isolate, StoreIC::GetLanguageMode(target->extra_ic_state())),
- constant_pool);
+ Handle<Code> code = pre_monomorphic_stub(
+ isolate, StoreICState::GetLanguageMode(target->extra_ic_state()));
+ SetTargetAtAddress(address, *code, constant_pool);
+}
+
+
+void KeyedStoreIC::Clear(Isolate* isolate, Code* host,
+ KeyedStoreICNexus* nexus) {
+ if (IsCleared(nexus)) return;
+ State state = nexus->StateFromFeedback();
+ nexus->ConfigurePremonomorphic();
+ OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
}
void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool) {
+ Address constant_pool) {
DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
CompareICStub stub(target->stub_key(), isolate);
// Only clear CompareICs that can retain objects.
if (stub.state() != CompareICState::KNOWN_OBJECT) return;
- SetTargetAtAddress(address, GetRawUninitialized(isolate, stub.op()),
+ SetTargetAtAddress(address,
+ GetRawUninitialized(isolate, stub.op(), stub.strength()),
constant_pool);
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
// static
-Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate) {
+Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate,
+ ExtraICState extra_state) {
if (FLAG_compiled_keyed_generic_loads) {
- return KeyedLoadGenericStub(isolate).GetCode();
+ return KeyedLoadGenericStub(isolate, LoadICState(extra_state)).GetCode();
} else {
- return isolate->builtins()->KeyedLoadIC_Megamorphic();
+ return is_strong(LoadICState::GetLanguageMode(extra_state))
+ ? isolate->builtins()->KeyedLoadIC_Megamorphic_Strong()
+ : isolate->builtins()->KeyedLoadIC_Megamorphic();
}
}
@@ -628,24 +614,10 @@ static bool MigrateDeprecated(Handle<Object> object) {
void IC::ConfigureVectorState(IC::State new_state) {
DCHECK(UseVector());
- if (kind() == Code::LOAD_IC) {
- LoadICNexus* nexus = casted_nexus<LoadICNexus>();
- if (new_state == PREMONOMORPHIC) {
- nexus->ConfigurePremonomorphic();
- } else if (new_state == MEGAMORPHIC) {
- nexus->ConfigureMegamorphic();
- } else {
- UNREACHABLE();
- }
- } else if (kind() == Code::KEYED_LOAD_IC) {
- KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
- if (new_state == PREMONOMORPHIC) {
- nexus->ConfigurePremonomorphic();
- } else if (new_state == MEGAMORPHIC) {
- nexus->ConfigureMegamorphic();
- } else {
- UNREACHABLE();
- }
+ if (new_state == PREMONOMORPHIC) {
+ nexus()->ConfigurePremonomorphic();
+ } else if (new_state == MEGAMORPHIC) {
+ nexus()->ConfigureMegamorphic();
} else {
UNREACHABLE();
}
@@ -662,10 +634,16 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
if (kind() == Code::LOAD_IC) {
LoadICNexus* nexus = casted_nexus<LoadICNexus>();
nexus->ConfigureMonomorphic(map, handler);
- } else {
- DCHECK(kind() == Code::KEYED_LOAD_IC);
+ } else if (kind() == Code::KEYED_LOAD_IC) {
KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
nexus->ConfigureMonomorphic(name, map, handler);
+ } else if (kind() == Code::STORE_IC) {
+ StoreICNexus* nexus = casted_nexus<StoreICNexus>();
+ nexus->ConfigureMonomorphic(map, handler);
+ } else {
+ DCHECK(kind() == Code::KEYED_STORE_IC);
+ KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+ nexus->ConfigureMonomorphic(name, map, handler);
}
vector_set_ = true;
@@ -680,10 +658,16 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
if (kind() == Code::LOAD_IC) {
LoadICNexus* nexus = casted_nexus<LoadICNexus>();
nexus->ConfigurePolymorphic(maps, handlers);
- } else {
- DCHECK(kind() == Code::KEYED_LOAD_IC);
+ } else if (kind() == Code::KEYED_LOAD_IC) {
KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
nexus->ConfigurePolymorphic(name, maps, handlers);
+ } else if (kind() == Code::STORE_IC) {
+ StoreICNexus* nexus = casted_nexus<StoreICNexus>();
+ nexus->ConfigurePolymorphic(maps, handlers);
+ } else {
+ DCHECK(kind() == Code::KEYED_STORE_IC);
+ KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+ nexus->ConfigurePolymorphic(name, maps, handlers);
}
vector_set_ = true;
@@ -696,7 +680,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_load", object, name);
+ return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
}
// Check if the name is trivially convertible to an index and get
@@ -705,18 +689,16 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
// Rewrite to the generic keyed load stub.
if (FLAG_use_ic) {
- if (UseVector()) {
- ConfigureVectorState(MEGAMORPHIC);
- } else {
- set_target(*megamorphic_stub());
- }
+ DCHECK(UseVector());
+ ConfigureVectorState(MEGAMORPHIC);
TRACE_IC("LoadIC", name);
TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
}
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- Runtime::GetElementOrCharAt(isolate(), object, index), Object);
+ Runtime::GetElementOrCharAt(isolate(), object, index, language_mode()),
+ Object);
return result;
}
@@ -759,8 +741,9 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// Get the property.
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, Object::GetProperty(&it),
- Object);
+
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result, Object::GetProperty(&it, language_mode()), Object);
if (it.IsFound()) {
return result;
} else if (!IsUndeclaredGlobal(object)) {
@@ -942,90 +925,36 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
ExtraICState extra_state) {
- if (FLAG_vector_ics) {
- return LoadICTrampolineStub(isolate, LoadICState(extra_state)).GetCode();
- }
-
- return PropertyICCompiler::ComputeLoad(isolate, UNINITIALIZED, extra_state);
-}
-
-
-Handle<Code> LoadIC::load_global(Isolate* isolate, Handle<GlobalObject> global,
- Handle<String> name) {
- // This special IC doesn't work with vector ics.
- DCHECK(!FLAG_vector_ics);
-
- Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table());
-
- ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
- return initialize_stub(isolate, LoadICState(CONTEXTUAL).GetExtraICState());
- }
-
- Handle<Map> global_map(global->map());
- Handle<Code> handler = PropertyHandlerCompiler::Find(
- name, global_map, Code::LOAD_IC, kCacheOnReceiver, Code::NORMAL);
- if (handler.is_null()) {
- LookupIterator it(global, name);
- if (!it.IsFound() || !it.GetHolder<JSObject>().is_identical_to(global) ||
- it.state() != LookupIterator::DATA) {
- return initialize_stub(isolate,
- LoadICState(CONTEXTUAL).GetExtraICState());
- }
- NamedLoadHandlerCompiler compiler(isolate, global_map, global,
- kCacheOnReceiver);
- Handle<PropertyCell> cell = it.GetPropertyCell();
- handler = compiler.CompileLoadGlobal(cell, name, it.IsConfigurable());
- Map::UpdateCodeCache(global_map, name, handler);
- }
- return PropertyICCompiler::ComputeMonomorphic(
- Code::LOAD_IC, name, handle(global->map()), handler,
- LoadICState(CONTEXTUAL).GetExtraICState());
+ return LoadICTrampolineStub(isolate, LoadICState(extra_state)).GetCode();
}
Handle<Code> LoadIC::initialize_stub_in_optimized_code(
Isolate* isolate, ExtraICState extra_state, State initialization_state) {
- if (FLAG_vector_ics) {
- return VectorRawLoadStub(isolate, LoadICState(extra_state)).GetCode();
- }
- return PropertyICCompiler::ComputeLoad(isolate, initialization_state,
- extra_state);
+ return LoadICStub(isolate, LoadICState(extra_state)).GetCode();
}
-Handle<Code> KeyedLoadIC::initialize_stub(Isolate* isolate) {
- if (FLAG_vector_ics) {
- return KeyedLoadICTrampolineStub(isolate).GetCode();
- }
-
- return isolate->builtins()->KeyedLoadIC_Initialize();
+Handle<Code> KeyedLoadIC::initialize_stub(Isolate* isolate,
+ ExtraICState extra_state) {
+ return KeyedLoadICTrampolineStub(isolate, LoadICState(extra_state)).GetCode();
}
Handle<Code> KeyedLoadIC::initialize_stub_in_optimized_code(
- Isolate* isolate, State initialization_state) {
- if (FLAG_vector_ics && initialization_state != MEGAMORPHIC) {
- return VectorRawKeyedLoadStub(isolate).GetCode();
- }
- switch (initialization_state) {
- case UNINITIALIZED:
- return isolate->builtins()->KeyedLoadIC_Initialize();
- case PREMONOMORPHIC:
- return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
- case MEGAMORPHIC:
- return isolate->builtins()->KeyedLoadIC_Megamorphic();
- default:
- UNREACHABLE();
+ Isolate* isolate, State initialization_state, ExtraICState extra_state) {
+ if (initialization_state != MEGAMORPHIC) {
+ return KeyedLoadICStub(isolate, LoadICState(extra_state)).GetCode();
}
- return Handle<Code>();
+ return is_strong(LoadICState::GetLanguageMode(extra_state))
+ ? isolate->builtins()->KeyedLoadIC_Megamorphic_Strong()
+ : isolate->builtins()->KeyedLoadIC_Megamorphic();
}
-Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
- LanguageMode language_mode,
- State initialization_state) {
+static Handle<Code> KeyedStoreICInitializeStubHelper(
+ Isolate* isolate, LanguageMode language_mode,
+ InlineCacheState initialization_state) {
switch (initialization_state) {
case UNINITIALIZED:
return is_strict(language_mode)
@@ -1046,36 +975,34 @@ Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
}
-Handle<Code> LoadIC::megamorphic_stub() {
- if (kind() == Code::LOAD_IC) {
- MegamorphicLoadStub stub(isolate(), LoadICState(extra_ic_state()));
+Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
+ LanguageMode language_mode,
+ State initialization_state) {
+ if (FLAG_vector_stores) {
+ VectorKeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
return stub.GetCode();
- } else {
- DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return KeyedLoadIC::ChooseMegamorphicStub(isolate());
}
-}
-
-Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate,
- ExtraICState extra_state) {
- DCHECK(!FLAG_vector_ics);
- return PropertyICCompiler::ComputeLoad(isolate, PREMONOMORPHIC, extra_state);
+ return KeyedStoreICInitializeStubHelper(isolate, language_mode,
+ initialization_state);
}
-Handle<Code> KeyedLoadIC::pre_monomorphic_stub(Isolate* isolate) {
- return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
+Handle<Code> KeyedStoreIC::initialize_stub_in_optimized_code(
+ Isolate* isolate, LanguageMode language_mode, State initialization_state) {
+ if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
+ VectorKeyedStoreICStub stub(isolate, StoreICState(language_mode));
+ return stub.GetCode();
+ }
+
+ return KeyedStoreICInitializeStubHelper(isolate, language_mode,
+ initialization_state);
}
-Handle<Code> LoadIC::pre_monomorphic_stub() const {
- if (kind() == Code::LOAD_IC) {
- return LoadIC::pre_monomorphic_stub(isolate(), extra_ic_state());
- } else {
- DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return KeyedLoadIC::pre_monomorphic_stub(isolate());
- }
+Handle<Code> LoadIC::megamorphic_stub() {
+ DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
+ return KeyedLoadIC::ChooseMegamorphicStub(isolate(), extra_ic_state());
}
@@ -1089,11 +1016,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
- if (UseVector()) {
- ConfigureVectorState(PREMONOMORPHIC);
- } else {
- set_target(*pre_monomorphic_stub());
- }
+ ConfigureVectorState(PREMONOMORPHIC);
TRACE_IC("LoadIC", lookup->name());
return;
}
@@ -1103,7 +1026,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
lookup->state() == LookupIterator::ACCESS_CHECK) {
code = slow_stub();
} else if (!lookup->IsFound()) {
- if (kind() == Code::LOAD_IC) {
+ if (kind() == Code::LOAD_IC && !is_strong(language_mode())) {
code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
receiver_map());
// TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
@@ -1334,7 +1257,9 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
// property must be found in the object for the stub to be
// applicable.
if (!receiver_is_holder) break;
- return isolate()->builtins()->LoadIC_Normal();
+ return is_strong(language_mode())
+ ? isolate()->builtins()->LoadIC_Normal_Strong()
+ : isolate()->builtins()->LoadIC_Normal();
}
// -------------- Fields --------------
@@ -1381,7 +1306,7 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
} else {
int int_value = FastD2I(value);
if (value == int_value && Smi::IsValid(int_value)) {
- key = Handle<Smi>(Smi::FromInt(int_value), isolate);
+ key = handle(Smi::FromInt(int_value), isolate);
}
}
} else if (key->IsUndefined()) {
@@ -1399,13 +1324,11 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
if (target_receiver_maps.length() == 0) {
- if (FLAG_vector_ics) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(receiver_map);
- ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
- return null_handle;
- }
- return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
+ receiver_map, extra_ic_state());
+ ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
+ return null_handle;
}
// The first time a receiver is seen that is a transitioned version of the
@@ -1419,13 +1342,11 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
IsMoreGeneralElementsKindTransition(
target_receiver_maps.at(0)->elements_kind(),
Handle<JSObject>::cast(receiver)->GetElementsKind())) {
- if (FLAG_vector_ics) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(receiver_map);
- ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
- return null_handle;
- }
- return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
+ receiver_map, extra_ic_state());
+ ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
+ return null_handle;
}
DCHECK(state() != GENERIC);
@@ -1446,16 +1367,12 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
return megamorphic_stub();
}
- if (FLAG_vector_ics) {
- CodeHandleList handlers(target_receiver_maps.length());
- ElementHandlerCompiler compiler(isolate());
- compiler.CompileElementHandlers(&target_receiver_maps, &handlers);
- ConfigureVectorState(Handle<Name>::null(), &target_receiver_maps,
- &handlers);
- return null_handle;
- }
-
- return PropertyICCompiler::ComputeKeyedLoadPolymorphic(&target_receiver_maps);
+ CodeHandleList handlers(target_receiver_maps.length());
+ ElementHandlerCompiler compiler(isolate());
+ compiler.CompileElementHandlers(&target_receiver_maps, &handlers,
+ language_mode());
+ ConfigureVectorState(Handle<Name>::null(), &target_receiver_maps, &handlers);
+ return null_handle;
}
@@ -1464,7 +1381,8 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
if (MigrateDeprecated(object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Runtime::GetObjectProperty(isolate(), object, key),
+ isolate(), result,
+ Runtime::GetObjectProperty(isolate(), object, key, language_mode()),
Object);
return result;
}
@@ -1489,33 +1407,24 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
}
}
- if (!UseVector()) {
- if (!is_target_set()) {
- Code* generic = *megamorphic_stub();
- if (*stub == generic) {
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
- }
-
- set_target(*stub);
- TRACE_IC("LoadIC", key);
+ DCHECK(UseVector());
+ if (!is_vector_set() || stub.is_null()) {
+ Code* generic = *megamorphic_stub();
+ if (!stub.is_null() && *stub == generic) {
+ ConfigureVectorState(MEGAMORPHIC);
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
}
- } else {
- if (!is_vector_set() || stub.is_null()) {
- Code* generic = *megamorphic_stub();
- if (!stub.is_null() && *stub == generic) {
- ConfigureVectorState(MEGAMORPHIC);
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
- }
- TRACE_IC("LoadIC", key);
- }
+ TRACE_IC("LoadIC", key);
}
if (!load_handle.is_null()) return load_handle;
+
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Runtime::GetObjectProperty(isolate(), object, key),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Runtime::GetObjectProperty(isolate(), object, key, language_mode()),
+ Object);
return result;
}
@@ -1596,7 +1505,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Context> script_context = ScriptContextTable::GetContext(
script_contexts, lookup_result.context_index);
if (lookup_result.mode == CONST) {
- return TypeError("const_assign", object, name);
+ return TypeError(MessageTemplate::kConstAssign, object, name);
}
Handle<Object> previous_value =
@@ -1632,7 +1541,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
// If the object is undefined or null it's illegal to try to set any
// properties on it; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
+ return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
}
// Check if the given name is an array index.
@@ -1646,8 +1555,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- JSObject::SetElement(receiver, index, value, NONE, language_mode()),
- Object);
+ JSObject::SetElement(receiver, index, value, language_mode()), Object);
return value;
}
@@ -1690,16 +1598,43 @@ Handle<Code> CallIC::initialize_stub_in_optimized_code(
}
+static Handle<Code> StoreICInitializeStubHelper(
+ Isolate* isolate, ExtraICState extra_state,
+ InlineCacheState initialization_state) {
+ Handle<Code> ic = PropertyICCompiler::ComputeStore(
+ isolate, initialization_state, extra_state);
+ return ic;
+}
+
+
Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
LanguageMode language_mode,
State initialization_state) {
DCHECK(initialization_state == UNINITIALIZED ||
initialization_state == PREMONOMORPHIC ||
initialization_state == MEGAMORPHIC);
- ExtraICState extra_state = ComputeExtraICState(language_mode);
- Handle<Code> ic = PropertyICCompiler::ComputeStore(
- isolate, initialization_state, extra_state);
- return ic;
+ if (FLAG_vector_stores) {
+ VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
+ return stub.GetCode();
+ }
+
+ return StoreICInitializeStubHelper(
+ isolate, ComputeExtraICState(language_mode), initialization_state);
+}
+
+
+Handle<Code> StoreIC::initialize_stub_in_optimized_code(
+ Isolate* isolate, LanguageMode language_mode, State initialization_state) {
+ DCHECK(initialization_state == UNINITIALIZED ||
+ initialization_state == PREMONOMORPHIC ||
+ initialization_state == MEGAMORPHIC);
+ if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
+ VectorStoreICStub stub(isolate, StoreICState(language_mode));
+ return stub.GetCode();
+ }
+
+ return StoreICInitializeStubHelper(
+ isolate, ComputeExtraICState(language_mode), initialization_state);
}
@@ -1740,7 +1675,11 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
- set_target(*pre_monomorphic_stub());
+ if (FLAG_vector_stores) {
+ ConfigureVectorState(PREMONOMORPHIC);
+ } else {
+ set_target(*pre_monomorphic_stub());
+ }
TRACE_IC("StoreIC", lookup->name());
return;
}
@@ -1780,7 +1719,8 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
// This is currently guaranteed by checks in StoreIC::Store.
Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- DCHECK(!receiver->IsAccessCheckNeeded());
+ DCHECK(!receiver->IsAccessCheckNeeded() ||
+ isolate()->IsInternallyUsedPropertyName(lookup->name()));
switch (lookup->state()) {
case LookupIterator::TRANSITION: {
@@ -1826,6 +1766,12 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0");
break;
}
+ if (AccessorInfo::cast(*accessors)->is_special_data_property() &&
+ !lookup->HolderIsReceiverOrHiddenPrototype()) {
+ TRACE_GENERIC_IC(isolate(), "StoreIC",
+ "special data property in prototype chain");
+ break;
+ }
if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
receiver_map())) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
@@ -2172,11 +2118,20 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
StoreIC::Store(object, Handle<Name>::cast(key), value,
JSReceiver::MAY_BE_STORE_FROM_KEYED),
Object);
- if (!is_target_set()) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- "unhandled internalized string key");
- TRACE_IC("StoreIC", key);
- set_target(*stub);
+ if (FLAG_vector_stores) {
+ if (!is_vector_set()) {
+ ConfigureVectorState(MEGAMORPHIC);
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ "unhandled internalized string key");
+ TRACE_IC("StoreIC", key);
+ }
+ } else {
+ if (!is_target_set()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ "unhandled internalized string key");
+ TRACE_IC("StoreIC", key);
+ set_target(*stub);
+ }
}
return store_handle;
}
@@ -2203,14 +2158,10 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
bool key_is_smi_like = !Object::ToSmi(isolate(), key).is_null();
if (receiver->elements()->map() ==
- isolate()->heap()->sloppy_arguments_elements_map()) {
- if (is_sloppy(language_mode())) {
- stub = sloppy_arguments_stub();
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
- }
- } else if (key_is_smi_like &&
- !(target().is_identical_to(sloppy_arguments_stub()))) {
+ isolate()->heap()->sloppy_arguments_elements_map() &&
+ !is_sloppy(language_mode())) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
+ } else if (key_is_smi_like) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
@@ -2237,17 +2188,28 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Object);
}
- DCHECK(!is_target_set());
- Code* megamorphic = *megamorphic_stub();
- if (*stub == megamorphic) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
- }
- if (*stub == *slow_stub()) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub");
- }
- DCHECK(!stub.is_null());
- if (!AddressIsDeoptimizedCode()) {
- set_target(*stub);
+ if (FLAG_vector_stores) {
+ if (!is_vector_set() || stub.is_null()) {
+ Code* megamorphic = *megamorphic_stub();
+ if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) {
+ ConfigureVectorState(MEGAMORPHIC);
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ *stub == megamorphic ? "set generic" : "slow stub");
+ }
+ }
+ } else {
+ DCHECK(!is_target_set());
+ Code* megamorphic = *megamorphic_stub();
+ if (*stub == megamorphic) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
+ } else if (*stub == *slow_stub()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub");
+ }
+
+ DCHECK(!stub.is_null());
+ if (!AddressIsDeoptimizedCode()) {
+ set_target(*stub);
+ }
}
TRACE_IC("StoreIC", key);
@@ -2296,7 +2258,7 @@ void CallIC::PatchMegamorphic(Handle<Object> function) {
// We are going generic.
CallICNexus* nexus = casted_nexus<CallICNexus>();
- nexus->ConfigureGeneric();
+ nexus->ConfigureMegamorphic();
// Vector-based ICs have a different calling convention in optimized code
// than full code so the correct stub has to be chosen.
@@ -2331,7 +2293,7 @@ void CallIC::HandleMiss(Handle<Object> function) {
if (feedback->IsWeakCell() || !function->IsJSFunction()) {
// We are going generic.
- nexus->ConfigureGeneric();
+ nexus->ConfigureMegamorphic();
} else {
// The feedback is either uninitialized or an allocation site.
// It might be an allocation site because if we re-compile the full code
@@ -2408,31 +2370,22 @@ RUNTIME_FUNCTION(LoadIC_Miss) {
Handle<Name> key = args.at<Name>(1);
Handle<Object> result;
- if (FLAG_vector_ics) {
- DCHECK(args.length() == 4);
- Handle<Smi> slot = args.at<Smi>(2);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
- // LoadIC miss handler if the handler misses. Since the vector Nexus is
- // set up outside the IC, handle that here.
- if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
- LoadICNexus nexus(vector, vector_slot);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Load(receiver, key));
- } else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Load(receiver, key));
- }
+ DCHECK(args.length() == 4);
+ Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
+ // LoadIC miss handler if the handler misses. Since the vector Nexus is
+ // set up outside the IC, handle that here.
+ if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ LoadICNexus nexus(vector, vector_slot);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
} else {
- DCHECK(args.length() == 2);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
}
@@ -2448,22 +2401,14 @@ RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
Handle<Object> key = args.at<Object>(1);
Handle<Object> result;
- if (FLAG_vector_ics) {
- DCHECK(args.length() == 4);
- Handle<Smi> slot = args.at<Smi>(2);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
- } else {
- DCHECK(args.length() == 2);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
- }
-
+ DCHECK(args.length() == 4);
+ Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
return *result;
}
@@ -2475,21 +2420,14 @@ RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
Handle<Object> key = args.at<Object>(1);
Handle<Object> result;
- if (FLAG_vector_ics) {
- DCHECK(args.length() == 4);
- Handle<Smi> slot = args.at<Smi>(2);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
- } else {
- DCHECK(args.length() == 2);
- KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
- }
+ DCHECK(args.length() == 4);
+ Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
return *result;
}
@@ -2499,14 +2437,37 @@ RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
RUNTIME_FUNCTION(StoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
- ic.UpdateState(receiver, key);
+ Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+
+ if (FLAG_vector_stores) {
+ DCHECK(args.length() == 5);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ if (vector->GetKind(vector_slot) == Code::STORE_IC) {
+ StoreICNexus nexus(vector, vector_slot);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ } else {
+ DCHECK(vector->GetKind(vector_slot) == Code::KEYED_STORE_IC);
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ }
+ } else {
+ DCHECK(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ }
return *result;
}
@@ -2514,14 +2475,37 @@ RUNTIME_FUNCTION(StoreIC_Miss) {
RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 3 || args.length() == 4);
- StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
- ic.UpdateState(receiver, key);
+ Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+
+ if (FLAG_vector_stores) {
+ DCHECK(args.length() == 5);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ if (vector->GetKind(vector_slot) == Code::STORE_IC) {
+ StoreICNexus nexus(vector, vector_slot);
+ StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ } else {
+ DCHECK(vector->GetKind(vector_slot) == Code::KEYED_STORE_IC);
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ }
+ } else {
+ DCHECK(args.length() == 3 || args.length() == 4);
+ StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ }
return *result;
}
@@ -2530,14 +2514,28 @@ RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
+ Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+
+ if (FLAG_vector_stores) {
+ DCHECK(args.length() == 5);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ } else {
+ DCHECK(args.length() == 3);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ }
return *result;
}
@@ -2545,14 +2543,28 @@ RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
+ Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+
+ if (FLAG_vector_stores) {
+ DCHECK(args.length() == 5);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ } else {
+ DCHECK(args.length() == 3);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+ }
return *result;
}
@@ -2618,7 +2630,7 @@ MaybeHandle<Object> BinaryOpIC::Transition(
// Compute the actual result using the builtin for the binary operation.
Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
- TokenToJSBuiltin(state.op(), state.language_mode()));
+ TokenToJSBuiltin(state.op(), state.strength()));
Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -2718,8 +2730,9 @@ RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) {
}
-Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
- CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
+Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op,
+ Strength strength) {
+ CompareICStub stub(isolate, op, strength, CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED);
Code* code = NULL;
@@ -2728,8 +2741,9 @@ Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
}
-Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
- CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
+Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op,
+ Strength strength) {
+ CompareICStub stub(isolate, op, strength, CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED);
return stub.GetCode();
@@ -2746,7 +2760,8 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
CompareICState::State state = CompareICState::TargetState(
old_stub.state(), old_stub.left(), old_stub.right(), op_,
HasInlinedSmiCode(address()), x, y);
- CompareICStub stub(isolate(), op_, new_left, new_right, state);
+ CompareICStub stub(isolate(), op_, old_stub.strength(), new_left, new_right,
+ state);
if (state == CompareICState::KNOWN_OBJECT) {
stub.set_known_map(
Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
@@ -2786,8 +2801,7 @@ RUNTIME_FUNCTION(CompareIC_Miss) {
}
-void CompareNilIC::Clear(Address address, Code* target,
- ConstantPoolArray* constant_pool) {
+void CompareNilIC::Clear(Address address, Code* target, Address constant_pool) {
if (IsCleared(target)) return;
ExtraICState state = target->extra_ic_state();
@@ -2856,8 +2870,8 @@ RUNTIME_FUNCTION(Unreachable) {
Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op,
- LanguageMode language_mode) {
- if (is_strong(language_mode)) {
+ Strength strength) {
+ if (is_strong(strength)) {
switch (op) {
default: UNREACHABLE();
case Token::ADD: return Builtins::ADD_STRONG;
@@ -2956,10 +2970,12 @@ RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
Handle<JSObject> holder =
args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
HandleScope scope(isolate);
- auto res = JSObject::GetPropertyWithInterceptor(holder, receiver, name);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ LookupIterator it(receiver, name, holder, LookupIterator::OWN);
+ bool done;
Handle<Object> result;
- if (res.ToHandle(&result)) return *result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSObject::GetPropertyWithInterceptor(&it, &done));
+ if (done) return *result;
return isolate->heap()->no_interceptor_result_sentinel();
}
@@ -2996,6 +3012,7 @@ RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
Handle<Object> result;
LookupIterator it(receiver, name, holder);
+ // TODO(conradw): Investigate strong mode semantics for this.
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
JSObject::GetProperty(&it));
@@ -3035,14 +3052,17 @@ RUNTIME_FUNCTION(StorePropertyWithInterceptor) {
RUNTIME_FUNCTION(LoadElementWithInterceptor) {
+ // TODO(verwaest): This should probably get the holder and receiver as input.
HandleScope scope(isolate);
Handle<JSObject> receiver = args.at<JSObject>(0);
DCHECK(args.smi_at(1) >= 0);
uint32_t index = args.smi_at(1);
Handle<Object> result;
+ // TODO(conradw): Investigate strong mode semantics for this.
+ LanguageMode language_mode = SLOPPY;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- JSObject::GetElementWithInterceptor(receiver, receiver, index, true));
+ Object::GetElement(isolate, receiver, index, language_mode));
return *result;
}
@@ -3054,31 +3074,22 @@ RUNTIME_FUNCTION(LoadIC_MissFromStubFailure) {
Handle<Name> key = args.at<Name>(1);
Handle<Object> result;
- if (FLAG_vector_ics) {
- DCHECK(args.length() == 4);
- Handle<Smi> slot = args.at<Smi>(2);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
- // LoadIC miss handler if the handler misses. Since the vector Nexus is
- // set up outside the IC, handle that here.
- if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
- LoadICNexus nexus(vector, vector_slot);
- LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Load(receiver, key));
- } else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Load(receiver, key));
- }
+ DCHECK(args.length() == 4);
+ Handle<Smi> slot = args.at<Smi>(2);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
+ // LoadIC miss handler if the handler misses. Since the vector Nexus is
+ // set up outside the IC, handle that here.
+ if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ LoadICNexus nexus(vector, vector_slot);
+ LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
} else {
- DCHECK(args.length() == 2);
- LoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
}
@@ -3095,5 +3106,5 @@ static const Address IC_utilities[] = {
Address IC::AddressFromUtilityId(IC::UtilityId id) { return IC_utilities[id]; }
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index d51309cffe..dec8318ae5 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -7,6 +7,7 @@
#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
+#include "src/messages.h"
namespace v8 {
namespace internal {
@@ -77,8 +78,7 @@ class IC {
}
// Clear the inline cache to initial state.
- static void Clear(Isolate* isolate, Address address,
- ConstantPoolArray* constant_pool);
+ static void Clear(Isolate* isolate, Address address, Address constant_pool);
#ifdef DEBUG
bool IsLoadStub() const {
@@ -113,9 +113,10 @@ class IC {
}
static bool ICUseVector(Code::Kind kind) {
- return (FLAG_vector_ics &&
- (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC)) ||
- kind == Code::CALL_IC;
+ return kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC ||
+ kind == Code::CALL_IC ||
+ (FLAG_vector_stores &&
+ (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC));
}
protected:
@@ -134,7 +135,9 @@ class IC {
Code* GetOriginalCode() const;
bool AddressIsOptimizedCode() const;
- bool AddressIsDeoptimizedCode() const;
+ inline bool AddressIsDeoptimizedCode() const;
+ inline static bool AddressIsDeoptimizedCode(Isolate* isolate,
+ Address address);
// Set the call-site target.
inline void set_target(Code* code);
@@ -162,15 +165,15 @@ class IC {
void TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state);
- MaybeHandle<Object> TypeError(const char* type, Handle<Object> object,
- Handle<Object> key);
+ MaybeHandle<Object> TypeError(MessageTemplate::Template,
+ Handle<Object> object, Handle<Object> key);
MaybeHandle<Object> ReferenceError(Handle<Name> name);
// Access the target code for the given IC address.
static inline Code* GetTargetAtAddress(Address address,
- ConstantPoolArray* constant_pool);
+ Address constant_pool);
static inline void SetTargetAtAddress(Address address, Code* target,
- ConstantPoolArray* constant_pool);
+ Address constant_pool);
static void OnTypeFeedbackChanged(Isolate* isolate, Address address,
State old_state, State new_state,
bool target_remains_ic_stub);
@@ -254,8 +257,8 @@ class IC {
private:
inline Code* raw_target() const;
- inline ConstantPoolArray* constant_pool() const;
- inline ConstantPoolArray* raw_constant_pool() const;
+ inline Address constant_pool() const;
+ inline Address raw_constant_pool() const;
void FindTargetMaps() {
if (target_maps_set_) return;
@@ -275,17 +278,17 @@ class IC {
// Frame pointer for the frame that uses (calls) the IC.
Address fp_;
- // All access to the program counter of an IC structure is indirect
- // to make the code GC safe. This feature is crucial since
+ // All access to the program counter and constant pool of an IC structure is
+ // indirect to make the code GC safe. This feature is crucial since
// GetProperty and SetProperty are called and they in turn might
// invoke the garbage collector.
Address* pc_address_;
- Isolate* isolate_;
-
// The constant pool of the code which originally called the IC (which might
// be for the breakpointed copy of the original code).
- Handle<ConstantPoolArray> raw_constant_pool_;
+ Address* constant_pool_address_;
+
+ Isolate* isolate_;
// The original code target that missed.
Handle<Code> target_;
@@ -351,17 +354,22 @@ class CallIC : public IC {
class LoadIC : public IC {
public:
- static ExtraICState ComputeExtraICState(ContextualMode contextual_mode) {
- return LoadICState(contextual_mode).GetExtraICState();
+ static ExtraICState ComputeExtraICState(ContextualMode contextual_mode,
+ LanguageMode language_mode) {
+ return LoadICState(contextual_mode, language_mode).GetExtraICState();
}
ContextualMode contextual_mode() const {
return LoadICState::GetContextualMode(extra_ic_state());
}
+ LanguageMode language_mode() const {
+ return LoadICState::GetLanguageMode(extra_ic_state());
+ }
+
LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
: IC(depth, isolate, nexus) {
- DCHECK(!FLAG_vector_ics || nexus != NULL);
+ DCHECK(nexus != NULL);
DCHECK(IsLoadStub());
}
@@ -387,19 +395,15 @@ class LoadIC : public IC {
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateNormal(MacroAssembler* masm);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+ static void GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode);
+ static void GenerateNormal(MacroAssembler* masm, LanguageMode language_mode);
static Handle<Code> initialize_stub(Isolate* isolate,
ExtraICState extra_state);
static Handle<Code> initialize_stub_in_optimized_code(
Isolate* isolate, ExtraICState extra_state, State initialization_state);
- static Handle<Code> load_global(Isolate* isolate, Handle<GlobalObject> global,
- Handle<String> name);
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Name> name);
@@ -411,10 +415,14 @@ class LoadIC : public IC {
Handle<Code> slow_stub() const {
if (kind() == Code::LOAD_IC) {
- return isolate()->builtins()->LoadIC_Slow();
+ return is_strong(language_mode())
+ ? isolate()->builtins()->LoadIC_Slow_Strong()
+ : isolate()->builtins()->LoadIC_Slow();
} else {
DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return isolate()->builtins()->KeyedLoadIC_Slow();
+ return is_strong(language_mode())
+ ? isolate()->builtins()->KeyedLoadIC_Slow_Strong()
+ : isolate()->builtins()->KeyedLoadIC_Slow();
}
}
@@ -429,14 +437,10 @@ class LoadIC : public IC {
CacheHolderFlag cache_holder) override;
private:
- virtual Handle<Code> pre_monomorphic_stub() const;
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- ExtraICState extra_state);
-
Handle<Code> SimpleFieldLoad(FieldIndex index);
static void Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool);
+ Address constant_pool);
friend class IC;
};
@@ -445,11 +449,13 @@ class LoadIC : public IC {
class KeyedLoadIC : public LoadIC {
public:
// ExtraICState bits (building on IC)
- class IcCheckTypeField : public BitField<IcCheckType, 1, 1> {};
+ class IcCheckTypeField
+ : public BitField<IcCheckType, LoadICState::kNextBitFieldOffset, 1> {};
static ExtraICState ComputeExtraICState(ContextualMode contextual_mode,
+ LanguageMode language_mode,
IcCheckType key_type) {
- return LoadICState(contextual_mode).GetExtraICState() |
+ return LoadICState(contextual_mode, language_mode).GetExtraICState() |
IcCheckTypeField::encode(key_type);
}
@@ -460,7 +466,7 @@ class KeyedLoadIC : public LoadIC {
KeyedLoadIC(FrameDepth depth, Isolate* isolate,
KeyedLoadICNexus* nexus = NULL)
: LoadIC(depth, isolate, nexus) {
- DCHECK(!FLAG_vector_ics || nexus != NULL);
+ DCHECK(nexus != NULL);
DCHECK(target()->is_keyed_load_stub());
}
@@ -469,12 +475,11 @@ class KeyedLoadIC : public LoadIC {
// Code generator routines.
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+ static void GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
- static void GenerateMegamorphic(MacroAssembler* masm);
+ static void GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
@@ -483,24 +488,22 @@ class KeyedLoadIC : public LoadIC {
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
- static Handle<Code> initialize_stub(Isolate* isolate);
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ ExtraICState extra_state);
static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, State initialization_state);
- static Handle<Code> ChooseMegamorphicStub(Isolate* isolate);
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate);
+ Isolate* isolate, State initialization_state, ExtraICState extra_state);
+ static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
+ ExtraICState extra_state);
static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
protected:
// receiver is HeapObject because it could be a String or a JSObject
Handle<Code> LoadElementStub(Handle<HeapObject> receiver);
- virtual Handle<Code> pre_monomorphic_stub() const {
- return pre_monomorphic_stub(isolate());
- }
private:
static void Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool);
+ Address constant_pool);
friend class IC;
};
@@ -508,26 +511,17 @@ class KeyedLoadIC : public LoadIC {
class StoreIC : public IC {
public:
- STATIC_ASSERT(i::LANGUAGE_END == 3);
- class LanguageModeState : public BitField<LanguageMode, 1, 2> {};
static ExtraICState ComputeExtraICState(LanguageMode flag) {
- return LanguageModeState::encode(flag);
- }
- static LanguageMode GetLanguageMode(ExtraICState state) {
- return LanguageModeState::decode(state);
+ return StoreICState(flag).GetExtraICState();
}
- // For convenience, a statically declared encoding of strict mode extra
- // IC state.
- static const ExtraICState kStrictModeState = STRICT
- << LanguageModeState::kShift;
-
- StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+ StoreIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
+ : IC(depth, isolate, nexus) {
DCHECK(IsStoreStub());
}
LanguageMode language_mode() const {
- return LanguageModeState::decode(extra_ic_state());
+ return StoreICState::GetLanguageMode(extra_ic_state());
}
// Code generators for stub routines. Only called once at startup.
@@ -545,6 +539,8 @@ class StoreIC : public IC {
static Handle<Code> initialize_stub(Isolate* isolate,
LanguageMode language_mode,
State initialization_state);
+ static Handle<Code> initialize_stub_in_optimized_code(
+ Isolate* isolate, LanguageMode language_mode, State initialization_state);
MUST_USE_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
@@ -554,6 +550,8 @@ class StoreIC : public IC {
bool LookupForWrite(LookupIterator* it, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode);
+ static void Clear(Isolate* isolate, Code* host, StoreICNexus* nexus);
+
protected:
// Stub accessors.
Handle<Code> megamorphic_stub() override;
@@ -578,7 +576,7 @@ class StoreIC : public IC {
inline void set_target(Code* code);
static void Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool);
+ Address constant_pool);
friend class IC;
};
@@ -603,7 +601,7 @@ class KeyedStoreIC : public StoreIC {
static ExtraICState ComputeExtraICState(LanguageMode flag,
KeyedAccessStoreMode mode) {
- return LanguageModeState::encode(flag) |
+ return StoreICState(flag).GetExtraICState() |
ExtraICStateKeyedAccessStoreMode::encode(mode) |
IcCheckTypeField::encode(ELEMENT);
}
@@ -617,7 +615,9 @@ class KeyedStoreIC : public StoreIC {
return IcCheckTypeField::decode(extra_state);
}
- KeyedStoreIC(FrameDepth depth, Isolate* isolate) : StoreIC(depth, isolate) {
+ KeyedStoreIC(FrameDepth depth, Isolate* isolate,
+ KeyedStoreICNexus* nexus = NULL)
+ : StoreIC(depth, isolate, nexus) {
DCHECK(target()->is_keyed_store_stub());
}
@@ -634,12 +634,16 @@ class KeyedStoreIC : public StoreIC {
static void GenerateSlow(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
LanguageMode language_mode);
- static void GenerateSloppyArguments(MacroAssembler* masm);
static Handle<Code> initialize_stub(Isolate* isolate,
LanguageMode language_mode,
State initialization_state);
+ static Handle<Code> initialize_stub_in_optimized_code(
+ Isolate* isolate, LanguageMode language_mode, State initialization_state);
+
+ static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
+
protected:
virtual Handle<Code> pre_monomorphic_stub() const {
return pre_monomorphic_stub(isolate(), language_mode());
@@ -659,13 +663,8 @@ class KeyedStoreIC : public StoreIC {
private:
inline void set_target(Code* code);
- // Stub accessors.
- Handle<Code> sloppy_arguments_stub() {
- return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
- }
-
static void Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool);
+ Address constant_pool);
KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
Handle<Object> key, Handle<Object> value);
@@ -683,7 +682,7 @@ class BinaryOpIC : public IC {
explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
static Builtins::JavaScript TokenToJSBuiltin(Token::Value op,
- LanguageMode language_mode);
+ Strength strength);
MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
Handle<Object> left,
@@ -703,7 +702,8 @@ class CompareIC : public IC {
static Condition ComputeCondition(Token::Value op);
// Factory method for getting an uninitialized compare stub.
- static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
+ static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op,
+ Strength strength);
private:
static bool HasInlinedSmiCode(Address address);
@@ -711,10 +711,11 @@ class CompareIC : public IC {
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
- static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
+ static Code* GetRawUninitialized(Isolate* isolate, Token::Value op,
+ Strength strength);
static void Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool);
+ Address constant_pool);
Token::Value op_;
@@ -730,8 +731,7 @@ class CompareNilIC : public IC {
static Handle<Code> GetUninitialized();
- static void Clear(Address address, Code* target,
- ConstantPoolArray* constant_pool);
+ static void Clear(Address address, Code* target, Address constant_pool);
static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil,
Handle<Object> object);
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
index dce7602ae0..eb519d22e4 100644
--- a/deps/v8/src/ic/mips/access-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/access-compiler-mips.cc
@@ -40,7 +40,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 60df292f41..13ce921b8b 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -778,7 +778,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc
index 179531235d..7b88f32331 100644
--- a/deps/v8/src/ic/mips/ic-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/ic-compiler-mips.cc
@@ -134,7 +134,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index 33b78cec58..5a6f95a231 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -163,7 +163,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow) {
+ Register result, Label* slow,
+ LanguageMode language_mode) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -185,7 +186,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
- Label done, in_bounds, return_undefined;
+ Label done, in_bounds, absent;
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
@@ -203,7 +204,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ lw(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(&return_undefined, eq, scratch2, Operand(at));
+ __ Branch(&absent, eq, scratch2, Operand(at));
__ lw(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ lw(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
@@ -218,9 +219,14 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ Branch(slow, ne, elements, Operand(at));
__ Branch(&check_next_prototype);
- __ bind(&return_undefined);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Branch(&done);
+ __ bind(&absent);
+ if (is_strong(language_mode)) {
+ // Strong mode accesses must throw in this case, so call the runtime.
+ __ Branch(slow);
+ } else {
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done);
+ }
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -270,7 +276,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
}
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
+void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -285,7 +291,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
}
@@ -296,14 +302,10 @@ static const Register LoadIC_TempRegister() { return a3; }
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- if (FLAG_vector_ics) {
- Register slot = VectorLoadICDescriptor::SlotRegister();
- Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
- __ Push(receiver, name, slot, vector);
- } else {
- __ Push(receiver, name);
- }
+ __ Push(receiver, name, slot, vector);
}
@@ -311,138 +313,30 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in ra.
Isolate* isolate = masm->isolate();
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->load_miss(), 1, t0, t1);
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in ra.
__ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
__ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(
- MacroAssembler* masm, Register object, Register key, Register scratch1,
- Register scratch2, Register scratch3, Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the map check
- // later, we do not need to check for interceptors or whether it
- // requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ GetObjectType(object, scratch1, scratch2);
- __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // Check that the key is a positive smi.
- __ And(scratch1, key, Operand(0x80000001));
- __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
- __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
-
- // Load element index and check whether it is the hole.
- const int kOffset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ li(scratch3, Operand(kPointerSize >> 1));
- __ Mul(scratch3, key, scratch3);
- __ Addu(scratch3, scratch3, Operand(kOffset));
-
- __ Addu(scratch2, scratch1, scratch3);
- __ lw(scratch2, MemOperand(scratch2));
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ li(scratch3, Operand(kPointerSize >> 1));
- __ Mul(scratch3, scratch2, scratch3);
- __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
- __ Addu(scratch2, scratch1, scratch3);
- return MemOperand(scratch2);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
- DONT_DO_SMI_CHECK);
- __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
- __ li(scratch, Operand(kPointerSize >> 1));
- __ Mul(scratch, key, scratch);
- __ Addu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(scratch, backing_store, scratch);
- return MemOperand(scratch);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(value.is(a0));
-
- Label slow, notin;
- // Store address is returned in register (of MemOperand) mapped_location.
- MemOperand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, key, a3, t0, t1, &notin, &slow);
- __ sw(value, mapped_location);
- __ mov(t5, value);
- DCHECK_EQ(mapped_location.offset(), 0);
- __ RecordWrite(a3, mapped_location.rm(), t5, kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value); // (In delay slot) return the value stored in v0.
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a3.
- // Store address is returned in register (of MemOperand) unmapped_location.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, a3, t0, &slow);
- __ sw(value, unmapped_location);
- __ mov(t5, value);
- DCHECK_EQ(unmapped_location.offset(), 0);
- __ RecordWrite(a3, unmapped_location.rm(), t5, kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0); // (In delay slot) return the value stored in v0.
- __ bind(&slow);
- GenerateMiss(masm);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
+ : Runtime::kGetProperty,
+ 2, 1);
}
@@ -450,9 +344,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in ra.
Isolate* isolate = masm->isolate();
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, t0, t1);
LoadIC_PushArgs(masm);
@@ -461,21 +354,26 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in ra.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty,
+ 2, 1);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in ra.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -499,7 +397,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow,
+ language_mode);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
__ Ret();
@@ -520,7 +419,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ bind(&slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, t0,
a3);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
@@ -536,19 +435,16 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&probe_dictionary, eq, t0, Operand(at));
- if (FLAG_vector_ics) {
- // When vector ics are in use, the handlers in the stub cache expect a
- // vector and slot. Since we won't change the IC from any downstream
- // misses, a dummy vector can be used.
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(int_slot)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
@@ -782,6 +678,20 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ lw(t0, FieldMemOperand(key, HeapObject::kMapOffset));
__ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(t0, &slow);
+
+ if (FLAG_vector_stores) {
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, a3, t0, t1, t2));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_store_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
+ }
+
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
@@ -990,7 +900,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.ChangeBranchCondition(eq);
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/stub-cache-mips.cc
index fab66d8963..2048531aba 100644
--- a/deps/v8/src/ic/mips/stub-cache-mips.cc
+++ b/deps/v8/src/ic/mips/stub-cache-mips.cc
@@ -121,8 +121,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
@@ -169,7 +169,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
index 5e3cfc52fd..74b4b93240 100644
--- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -40,7 +40,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index f60e8c6a43..d83c807e3c 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -779,7 +779,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
index a3dcdf7207..df67fb9a81 100644
--- a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
@@ -134,7 +134,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 1ec49c45d4..a6075dfcaa 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -162,7 +162,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow) {
+ Register result, Label* slow,
+ LanguageMode language_mode) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -184,7 +185,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
- Label done, in_bounds, return_undefined;
+ Label done, in_bounds, absent;
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
@@ -202,7 +203,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ ld(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(&return_undefined, eq, scratch2, Operand(at));
+ __ Branch(&absent, eq, scratch2, Operand(at));
__ ld(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ ld(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
@@ -217,9 +218,13 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ Branch(slow, ne, elements, Operand(at));
__ Branch(&check_next_prototype);
- __ bind(&return_undefined);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Branch(&done);
+ __ bind(&absent);
+ if (is_strong(language_mode)) {
+ __ Branch(slow);
+ } else {
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done);
+ }
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -269,7 +274,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
}
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
+void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -283,7 +288,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
}
@@ -294,14 +299,10 @@ static const Register LoadIC_TempRegister() { return a3; }
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- if (FLAG_vector_ics) {
- Register slot = VectorLoadICDescriptor::SlotRegister();
- Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
- __ Push(receiver, name, slot, vector);
- } else {
- __ Push(receiver, name);
- }
+ __ Push(receiver, name, slot, vector);
}
@@ -309,138 +310,30 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is on the stack.
Isolate* isolate = masm->isolate();
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->load_miss(), 1, a4, a5);
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in ra.
__ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
__ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(
- MacroAssembler* masm, Register object, Register key, Register scratch1,
- Register scratch2, Register scratch3, Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the map check
- // later, we do not need to check for interceptors or whether it
- // requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ GetObjectType(object, scratch1, scratch2);
- __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // Check that the key is a positive smi.
- __ NonNegativeSmiTst(key, scratch1);
- __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ ld(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ ld(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ Dsubu(scratch2, scratch2, Operand(Smi::FromInt(2)));
- __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
-
- // Load element index and check whether it is the hole.
- const int kOffset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ SmiUntag(scratch3, key);
- __ dsll(scratch3, scratch3, kPointerSizeLog2);
- __ Daddu(scratch3, scratch3, Operand(kOffset));
-
- __ Daddu(scratch2, scratch1, scratch3);
- __ ld(scratch2, MemOperand(scratch2));
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ ld(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ SmiUntag(scratch3, scratch2);
- __ dsll(scratch3, scratch3, kPointerSizeLog2);
- __ Daddu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
- __ Daddu(scratch2, scratch1, scratch3);
- return MemOperand(scratch2);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ ld(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
- DONT_DO_SMI_CHECK);
- __ ld(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
- __ SmiUntag(scratch, key);
- __ dsll(scratch, scratch, kPointerSizeLog2);
- __ Daddu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Daddu(scratch, backing_store, scratch);
- return MemOperand(scratch);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(value.is(a0));
-
- Label slow, notin;
- // Store address is returned in register (of MemOperand) mapped_location.
- MemOperand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, key, a3, a4, a5, &notin, &slow);
- __ sd(value, mapped_location);
- __ mov(t1, value);
- DCHECK_EQ(mapped_location.offset(), 0);
- __ RecordWrite(a3, mapped_location.rm(), t1, kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value); // (In delay slot) return the value stored in v0.
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a3.
- // Store address is returned in register (of MemOperand) unmapped_location.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, a3, a4, &slow);
- __ sd(value, unmapped_location);
- __ mov(t1, value);
- DCHECK_EQ(unmapped_location.offset(), 0);
- __ RecordWrite(a3, unmapped_location.rm(), t1, kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0); // (In delay slot) return the value stored in v0.
- __ bind(&slow);
- GenerateMiss(masm);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
+ : Runtime::kGetProperty,
+ 2, 1);
}
@@ -448,9 +341,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in ra.
Isolate* isolate = masm->isolate();
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a4, a5);
LoadIC_PushArgs(masm);
@@ -459,21 +351,26 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in ra.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty,
+ 2, 1);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in ra.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -497,7 +394,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow,
+ language_mode);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
__ Ret();
@@ -518,7 +416,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ bind(&slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, a4,
a3);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
@@ -534,19 +432,16 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&probe_dictionary, eq, a4, Operand(at));
- if (FLAG_vector_ics) {
- // When vector ics are in use, the handlers in the stub cache expect a
- // vector and slot. Since we won't change the IC from any downstream
- // misses, a dummy vector can be used.
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(int_slot)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
@@ -784,6 +679,20 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
__ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(a4, &slow);
+
+ if (FLAG_vector_stores) {
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, a3, a4, a5, a6));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_store_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
+ }
+
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
@@ -990,7 +899,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.ChangeBranchCondition(eq);
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
index 04883d7bc9..0d612903aa 100644
--- a/deps/v8/src/ic/mips64/stub-cache-mips64.cc
+++ b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
@@ -57,14 +57,16 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ Branch(&miss, ne, name, Operand(at));
// Check the map matches.
- __ ld(at, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ ld(at, MemOperand(base_addr,
+ static_cast<int32_t>(map_off_addr - key_off_addr)));
__ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Branch(&miss, ne, at, Operand(scratch2));
// Get the code entry from the cache.
Register code = scratch2;
scratch2 = no_reg;
- __ ld(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+ __ ld(code, MemOperand(base_addr,
+ static_cast<int32_t>(value_off_addr - key_off_addr)));
// Check that the flags match what we're looking for.
Register flags_reg = base_addr;
@@ -122,8 +124,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
@@ -170,7 +172,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
index e98f5172f8..aa3859a62c 100644
--- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -40,7 +40,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 5cb6f226bf..8988b08e2c 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -25,7 +25,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
@@ -62,7 +62,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(value());
@@ -649,7 +649,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
@@ -784,7 +784,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
index 9f33a59e7b..ad72c231de 100644
--- a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
@@ -129,7 +129,7 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index d0a2177f20..db56fbaf66 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -167,7 +167,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow) {
+ Register result, Label* slow,
+ LanguageMode language_mode) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -189,7 +190,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
- Label done, in_bounds, return_undefined;
+ Label done, in_bounds, absent;
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
@@ -208,7 +209,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ CompareRoot(scratch2, Heap::kNullValueRootIndex);
- __ beq(&return_undefined);
+ __ beq(&absent);
__ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
@@ -223,9 +224,14 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ bne(slow);
__ jmp(&check_next_prototype);
- __ bind(&return_undefined);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
+ __ bind(&absent);
+ if (is_strong(language_mode)) {
+ // Strong mode accesses must throw in this case, so call the runtime.
+ __ jmp(slow);
+ } else {
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+ }
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -273,7 +279,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
}
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
+void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
Register dictionary = r3;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -288,7 +294,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
}
@@ -299,14 +305,10 @@ static const Register LoadIC_TempRegister() { return r6; }
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- if (FLAG_vector_ics) {
- Register slot = VectorLoadICDescriptor::SlotRegister();
- Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
- __ Push(receiver, name, slot, vector);
- } else {
- __ Push(receiver, name);
- }
+ __ Push(receiver, name, slot, vector);
}
@@ -314,138 +316,30 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(r7, r8, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->load_miss(), 1, r7, r8);
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in lr.
__ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
__ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(
- MacroAssembler* masm, Register object, Register key, Register scratch1,
- Register scratch2, Register scratch3, Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the map check
- // later, we do not need to check for interceptors or whether it
- // requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
- __ blt(slow_case);
-
- // Check that the key is a positive smi.
- __ mov(scratch1, Operand(0x80000001));
- __ and_(r0, key, scratch1, SetRC);
- __ bne(slow_case, cr0);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ LoadP(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ LoadP(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ SubSmiLiteral(scratch2, scratch2, Smi::FromInt(2), r0);
- __ cmpl(key, scratch2);
- __ bge(unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kOffset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ SmiToPtrArrayOffset(scratch3, key);
- __ addi(scratch3, scratch3, Operand(kOffset));
-
- __ LoadPX(scratch2, MemOperand(scratch1, scratch3));
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, scratch3);
- __ beq(unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ LoadP(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ SmiToPtrArrayOffset(scratch3, scratch2);
- __ addi(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
- return MemOperand(scratch1, scratch3);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ LoadP(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
- DONT_DO_SMI_CHECK);
- __ LoadP(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpl(key, scratch);
- __ bge(slow_case);
- __ SmiToPtrArrayOffset(scratch, key);
- __ addi(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- return MemOperand(backing_store, scratch);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(receiver.is(r4));
- DCHECK(key.is(r5));
- DCHECK(value.is(r3));
-
- Label slow, notin;
- MemOperand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, key, r6, r7, r8, &notin, &slow);
- Register mapped_base = mapped_location.ra();
- Register mapped_offset = mapped_location.rb();
- __ StorePX(value, mapped_location);
- __ add(r9, mapped_base, mapped_offset);
- __ mr(r11, value);
- __ RecordWrite(mapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r6.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, r6, r7, &slow);
- Register unmapped_base = unmapped_location.ra();
- Register unmapped_offset = unmapped_location.rb();
- __ StorePX(value, unmapped_location);
- __ add(r9, unmapped_base, unmapped_offset);
- __ mr(r11, value);
- __ RecordWrite(unmapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
+ : Runtime::kGetProperty,
+ 2, 1);
}
@@ -453,9 +347,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(r7, r8, VectorLoadICDescriptor::SlotRegister(),
- VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r7, r8);
LoadIC_PushArgs(masm);
@@ -463,21 +356,26 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty,
+ 2, 1);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -501,7 +399,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r3, r6, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow,
+ language_mode);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6);
__ Ret();
@@ -523,7 +422,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ bind(&slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r7,
r6);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
@@ -540,19 +439,16 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ beq(&probe_dictionary);
- if (FLAG_vector_ics) {
- // When vector ics are in use, the handlers in the stub cache expect a
- // vector and slot. Since we won't change the IC from any downstream
- // misses, a dummy vector can be used.
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
- __ LoadSmiLiteral(slot, Smi::FromInt(int_slot));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
@@ -803,6 +699,20 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset));
__ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r7, &slow);
+
+ if (FLAG_vector_stores) {
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_store_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
+ }
+
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
@@ -994,11 +904,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.masm()->TestIfSmi(reg, r0);
} else {
DCHECK(check == DISABLE_INLINED_SMI_CHECK);
-#if V8_TARGET_ARCH_PPC64
- DCHECK(Assembler::IsRldicl(instr_at_patch));
-#else
- DCHECK(Assembler::IsRlwinm(instr_at_patch));
-#endif
+ DCHECK(Assembler::IsAndi(instr_at_patch));
patcher.masm()->cmp(reg, reg, cr0);
}
DCHECK(Assembler::IsBranch(branch_instr));
@@ -1011,7 +917,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
patcher.EmitCondition(eq);
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/stub-cache-ppc.cc b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
index 50f1f99850..57b32452d6 100644
--- a/deps/v8/src/ic/ppc/stub-cache-ppc.cc
+++ b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
@@ -142,8 +142,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
@@ -187,7 +187,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index e3a4938d6a..630e671613 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -143,5 +143,5 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
}
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
index cd9196f526..6acc950bc0 100644
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -40,7 +40,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 8288d8943d..43754c32b1 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -803,7 +803,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc
index 89e7aee7ff..4fcd7b7d3c 100644
--- a/deps/v8/src/ic/x64/ic-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/ic-compiler-x64.cc
@@ -136,7 +136,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index c183febbb8..3556d21000 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -171,7 +171,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch, Register result,
- Label* slow) {
+ Label* slow, LanguageMode language_mode) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -191,7 +191,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// scratch - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
- Label done, in_bounds, return_undefined;
+ Label done, in_bounds, absent;
__ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
@@ -210,7 +210,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
// scratch: current prototype
__ CompareRoot(scratch, Heap::kNullValueRootIndex);
- __ j(equal, &return_undefined);
+ __ j(equal, &absent);
__ movp(elements, FieldOperand(scratch, JSObject::kElementsOffset));
__ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
// elements: elements of current prototype
@@ -225,9 +225,14 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ j(not_equal, slow);
__ jmp(&check_next_prototype);
- __ bind(&return_undefined);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
+ __ bind(&absent);
+ if (is_strong(language_mode)) {
+ // Strong mode accesses must throw in this case, so call the runtime.
+ __ jmp(slow);
+ } else {
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+ }
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -274,7 +279,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -296,7 +302,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(rax, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow,
+ language_mode);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
__ ret(0);
@@ -317,7 +324,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ bind(&slow);
// Slow case: Jump to runtime.
__ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
+ KeyedLoadIC::GenerateRuntimeGetProperty(masm, language_mode);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
@@ -333,19 +340,16 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ j(equal, &probe_dictionary);
Register megamorphic_scratch = rdi;
- if (FLAG_vector_ics) {
- // When vector ics are in use, the handlers in the stub cache expect a
- // vector and slot. Since we won't change the IC from any downstream
- // misses, a dummy vector can be used.
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
- DCHECK(!AreAliased(megamorphic_scratch, vector, slot));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ Move(vector, dummy_vector);
- __ Move(slot, Smi::FromInt(int_slot));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ DCHECK(!AreAliased(megamorphic_scratch, vector, slot));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ Move(vector, dummy_vector);
+ __ Move(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
@@ -559,6 +563,19 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ movp(r9, FieldOperand(key, HeapObject::kMapOffset));
__ movzxbp(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
+
+ if (FLAG_vector_stores) {
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_store_dummy_vector());
+ int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ Move(vector, dummy_vector);
+ __ Move(slot, Smi::FromInt(slot_index));
+ }
+
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
@@ -611,111 +628,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
}
-static Operand GenerateMappedArgumentsLookup(
- MacroAssembler* masm, Register object, Register key, Register scratch1,
- Register scratch2, Register scratch3, Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
- __ j(below, slow_case);
-
- // Check that the key is a positive smi.
- Condition check = masm->CheckNonNegativeSmi(key);
- __ j(NegateCondition(check), slow_case);
-
- // Load the elements into scratch1 and check its map. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments.
- __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpp(key, scratch2);
- __ j(greater_equal, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ SmiToInteger64(scratch3, key);
- __ movp(scratch2,
- FieldOperand(scratch1, scratch3, times_pointer_size, kHeaderSize));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ j(equal, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
- __ SmiToInteger64(scratch3, scratch2);
- return FieldOperand(scratch1, scratch3, times_pointer_size,
- Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpp(key, scratch);
- __ j(greater_equal, slow_case);
- __ SmiToInteger64(scratch, key);
- return FieldOperand(backing_store, scratch, times_pointer_size,
- FixedArray::kHeaderSize);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, notin;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(name.is(rcx));
- DCHECK(value.is(rax));
-
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, name, rbx, rdi, r8, &notin, &slow);
- __ movp(mapped_location, value);
- __ leap(r9, mapped_location);
- __ movp(r8, value);
- __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, name, rbx, rdi, &slow);
- __ movp(unmapped_location, value);
- __ leap(r9, unmapped_location);
- __ movp(r8, value);
- __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
+void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
Register dictionary = rax;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -730,33 +643,24 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm);
+ LoadIC::GenerateRuntimeGetProperty(masm, language_mode);
}
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- if (FLAG_vector_ics) {
- Register slot = VectorLoadICDescriptor::SlotRegister();
- Register vector = VectorLoadICDescriptor::VectorRegister();
- DCHECK(!rdi.is(receiver) && !rdi.is(name) && !rdi.is(slot) &&
- !rdi.is(vector));
-
- __ PopReturnAddressTo(rdi);
- __ Push(receiver);
- __ Push(name);
- __ Push(slot);
- __ Push(vector);
- __ PushReturnAddressFrom(rdi);
- } else {
- DCHECK(!rbx.is(receiver) && !rbx.is(name));
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ DCHECK(!rdi.is(receiver) && !rdi.is(name) && !rdi.is(slot) &&
+ !rdi.is(vector));
- __ PopReturnAddressTo(rbx);
- __ Push(receiver);
- __ Push(name);
- __ PushReturnAddressFrom(rbx);
- }
+ __ PopReturnAddressTo(rdi);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(slot);
+ __ Push(vector);
+ __ PushReturnAddressFrom(rdi);
}
@@ -771,15 +675,17 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
+
DCHECK(!rbx.is(receiver) && !rbx.is(name));
__ PopReturnAddressTo(rbx);
@@ -787,8 +693,10 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ Push(name);
__ PushReturnAddressFrom(rbx);
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
+ : Runtime::kGetProperty,
+ 2, 1);
}
@@ -802,15 +710,17 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
+
DCHECK(!rbx.is(receiver) && !rbx.is(name));
__ PopReturnAddressTo(rbx);
@@ -818,8 +728,10 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ Push(name);
__ PushReturnAddressFrom(rbx);
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty,
+ 2, 1);
}
@@ -965,7 +877,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/stub-cache-x64.cc
index 4be0d5b330..34f51626a6 100644
--- a/deps/v8/src/ic/x64/stub-cache-x64.cc
+++ b/deps/v8/src/ic/x64/stub-cache-x64.cc
@@ -115,8 +115,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// the vector and slot registers, which need to be preserved for a handler
// call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch));
}
#endif
@@ -161,7 +161,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
index 9456ec899c..d5fde5d4b8 100644
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/access-compiler-x87.cc
@@ -38,7 +38,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
}
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index e9c8e4f713..056bd952c7 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -130,10 +130,9 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
- DCHECK(!FLAG_vector_ics);
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(eax, scratch1);
- __ ret(0);
+ // TODO(mvstanton): This isn't used on ia32. Move all the other
+ // platform implementations into a code stub so this method can be removed.
+ UNREACHABLE();
}
@@ -809,7 +808,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
index 160e9e9c67..3697708037 100644
--- a/deps/v8/src/ic/x87/ic-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc
@@ -128,7 +128,7 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index 92005bd097..b863c69132 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -172,7 +172,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register scratch,
Register scratch2, Register result,
- Label* slow) {
+ Label* slow, LanguageMode language_mode) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
@@ -182,7 +182,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
// result - holds the result on exit if the load succeeds and
// we fall through.
Label check_prototypes, check_next_prototype;
- Label done, in_bounds, return_undefined;
+ Label done, in_bounds, absent;
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(scratch);
@@ -200,7 +200,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ cmp(scratch2, masm->isolate()->factory()->null_value());
- __ j(equal, &return_undefined);
+ __ j(equal, &absent);
__ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
__ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
// scratch: elements of current prototype
@@ -215,9 +215,14 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ j(not_equal, slow);
__ jmp(&check_next_prototype);
- __ bind(&return_undefined);
- __ mov(result, masm->isolate()->factory()->undefined_value());
- __ jmp(&done);
+ __ bind(&absent);
+ if (is_strong(language_mode)) {
+ // Strong mode accesses must throw in this case, so call the runtime.
+ __ jmp(slow);
+ } else {
+ __ mov(result, masm->isolate()->factory()->undefined_value());
+ __ jmp(&done);
+ }
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -263,74 +268,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
}
-static Operand GenerateMappedArgumentsLookup(
- MacroAssembler* masm, Register object, Register key, Register scratch1,
- Register scratch2, Label* unmapped_case, Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
- Factory* factory = masm->isolate()->factory();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
- __ j(below, slow_case);
-
- // Check that the key is a positive smi.
- __ test(key, Immediate(0x80000001));
- __ j(not_zero, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, Immediate(Smi::FromInt(2)));
- __ cmp(key, scratch2);
- __ j(above_equal, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ mov(scratch2,
- FieldOperand(scratch1, key, times_half_pointer_size, kHeaderSize));
- __ cmp(scratch2, factory->the_hole_value());
- __ j(equal, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- const int kContextOffset = FixedArray::kHeaderSize;
- __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
- return FieldOperand(scratch1, scratch2, times_half_pointer_size,
- Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, scratch);
- __ j(greater_equal, slow_case);
- return FieldOperand(backing_store, key, times_half_pointer_size,
- FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -352,7 +291,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow,
+ language_mode);
Isolate* isolate = masm->isolate();
Counters* counters = isolate->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
@@ -384,7 +324,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ bind(&slow);
// Slow case: jump to runtime.
__ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
@@ -399,26 +339,21 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Immediate(isolate->factory()->hash_table_map()));
__ j(equal, &probe_dictionary);
- if (FLAG_vector_ics) {
- // When vector ics are in use, the handlers in the stub cache expect a
- // vector and slot. Since we won't change the IC from any downstream
- // misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- isolate->factory()->keyed_load_dummy_vector());
- int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ isolate->factory()->keyed_load_dummy_vector());
+ int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
false, receiver, key, ebx, edi);
- if (FLAG_vector_ics) {
- __ pop(VectorLoadICDescriptor::VectorRegister());
- __ pop(VectorLoadICDescriptor::SlotRegister());
- }
+ __ pop(LoadWithVectorDescriptor::VectorRegister());
+ __ pop(LoadDescriptor::SlotRegister());
// Cache miss.
GenerateMiss(masm);
@@ -442,37 +377,6 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow, notin;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(receiver.is(edx));
- DCHECK(name.is(ecx));
- DCHECK(value.is(eax));
-
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, name, ebx, edi, &notin, &slow);
- __ mov(mapped_location, value);
- __ lea(ecx, mapped_location);
- __ mov(edx, value);
- __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, name, ebx, edi, &slow);
- __ mov(unmapped_location, value);
- __ lea(edi, unmapped_location);
- __ mov(edx, value);
- __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
@@ -656,10 +560,28 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
+
+
+ if (FLAG_vector_stores) {
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_store_dummy_vector());
+ int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
+ }
+
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
+
+ if (FLAG_vector_stores) {
+ __ pop(VectorStoreICDescriptor::VectorRegister());
+ __ pop(VectorStoreICDescriptor::SlotRegister());
+ }
+
// Cache miss.
__ jmp(&miss);
@@ -711,7 +633,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
}
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
+void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
Register dictionary = eax;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -726,33 +648,25 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm);
+ GenerateRuntimeGetProperty(masm, language_mode);
}
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- if (FLAG_vector_ics) {
- Register slot = VectorLoadICDescriptor::SlotRegister();
- Register vector = VectorLoadICDescriptor::VectorRegister();
- DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
- !edi.is(vector));
-
- __ pop(edi);
- __ push(receiver);
- __ push(name);
- __ push(slot);
- __ push(vector);
- __ push(edi);
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(ebx);
- }
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
+ !edi.is(vector));
+
+ __ pop(edi);
+ __ push(receiver);
+ __ push(name);
+ __ push(slot);
+ __ push(vector);
+ __ push(edi);
}
@@ -764,12 +678,13 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// Return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -780,8 +695,10 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ push(name);
__ push(ebx);
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
+ : Runtime::kGetProperty,
+ 2, 1);
}
@@ -794,12 +711,13 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- int arg_count = FLAG_vector_ics ? 4 : 2;
+ int arg_count = 4;
__ TailCallExternalReference(ref, arg_count, 1);
}
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+ LanguageMode language_mode) {
// Return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -810,8 +728,10 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ push(name);
__ push(ebx);
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
+ : Runtime::kKeyedGetProperty,
+ 2, 1);
}
@@ -962,7 +882,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
index be456ce95c..dfbba47e3f 100644
--- a/deps/v8/src/ic/x87/stub-cache-x87.cc
+++ b/deps/v8/src/ic/x87/stub-cache-x87.cc
@@ -61,8 +61,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
if (IC::ICUseVector(ic_kind)) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- __ pop(VectorLoadICDescriptor::VectorRegister());
- __ pop(VectorLoadICDescriptor::SlotRegister());
+ __ pop(LoadWithVectorDescriptor::VectorRegister());
+ __ pop(LoadDescriptor::SlotRegister());
}
if (leave_frame) __ leave();
@@ -112,15 +112,14 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
if (IC::ICUseVector(ic_kind)) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- Register vector = VectorLoadICDescriptor::VectorRegister();
- Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
DCHECK(!offset.is(vector) && !offset.is(slot));
__ pop(vector);
__ pop(slot);
}
-
if (leave_frame) __ leave();
// Jump to the first instruction in the code stub.
@@ -205,7 +204,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
index b323942d02..02251306c8 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/icu_util.cc
@@ -86,4 +86,5 @@ bool InitializeICU(const char* icu_data_file) {
#endif
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 4c7a1159b5..fa41eb0036 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -9,39 +9,55 @@
namespace v8 {
namespace internal {
-void CallInterfaceDescriptorData::Initialize(
+namespace {
+// Constructors for common combined semantic and representation types.
+Type* SmiType() {
+ return Type::Intersect(Type::SignedSmall(), Type::TaggedSigned());
+}
+
+
+Type* UntaggedSigned32() {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32());
+}
+
+
+Type* AnyTagged() {
+ return Type::Intersect(
+ Type::Any(), Type::Union(Type::TaggedPointer(), Type::TaggedSigned()));
+}
+
+
+Type* ExternalPointer() {
+ return Type::Intersect(Type::Internal(), Type::UntaggedPointer());
+}
+}
+
+
+Type::FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
+ Isolate* isolate, int parameter_count) {
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(), Type::Undefined(), parameter_count,
+ isolate->interface_descriptor_zone());
+ while (parameter_count-- != 0) {
+ function->InitParameter(parameter_count, AnyTagged());
+ }
+ return function;
+}
+
+
+void CallInterfaceDescriptorData::InitializePlatformSpecific(
int register_parameter_count, Register* registers,
- Representation* register_param_representations,
PlatformInterfaceDescriptor* platform_descriptor) {
platform_specific_descriptor_ = platform_descriptor;
register_param_count_ = register_parameter_count;
- // An interface descriptor must have a context register.
- DCHECK(register_parameter_count > 0 &&
- registers[0].is(CallInterfaceDescriptor::ContextRegister()));
-
// InterfaceDescriptor owns a copy of the registers array.
register_params_.Reset(NewArray<Register>(register_parameter_count));
for (int i = 0; i < register_parameter_count; i++) {
register_params_[i] = registers[i];
}
-
- // If a representations array is specified, then the descriptor owns that as
- // well.
- if (register_param_representations != NULL) {
- register_param_representations_.Reset(
- NewArray<Representation>(register_parameter_count));
- for (int i = 0; i < register_parameter_count; i++) {
- // If there is a context register, the representation must be tagged.
- DCHECK(
- i != 0 ||
- register_param_representations[i].Equals(Representation::Tagged()));
- register_param_representations_[i] = register_param_representations[i];
- }
- }
}
-
const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
CallInterfaceDescriptorData* start = isolate->call_descriptor_data(0);
size_t index = data_ - start;
@@ -60,98 +76,299 @@ const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
}
-void LoadDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), ReceiverRegister(),
- NameRegister()};
- data->Initialize(arraysize(registers), registers, NULL);
+Type::FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged());
+ function->InitParameter(1, AnyTagged());
+ function->InitParameter(2, SmiType());
+ return function;
+}
+
+void LoadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
- ValueRegister()};
- data->Initialize(arraysize(registers), registers, NULL);
+void StoreDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreTransitionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
- ValueRegister(), MapRegister()};
- data->Initialize(arraysize(registers), registers, NULL);
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ElementTransitionAndStoreDescriptor::Initialize(
+void ElementTransitionAndStoreDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), ValueRegister(), MapRegister(),
- NameRegister(), ReceiverRegister()};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {ValueRegister(), MapRegister(), NameRegister(),
+ ReceiverRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InstanceofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), left(), right()};
- data->Initialize(arraysize(registers), registers, NULL);
+void InstanceofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {left(), right()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathPowTaggedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), exponent()};
- data->Initialize(arraysize(registers), registers, NULL);
+void MathPowTaggedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {exponent()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathPowIntegerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {exponent()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathPowIntegerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), exponent()};
- data->Initialize(arraysize(registers), registers, NULL);
+Type::FunctionType*
+LoadWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged());
+ function->InitParameter(1, AnyTagged());
+ function->InitParameter(2, SmiType());
+ function->InitParameter(3, AnyTagged());
+ return function;
}
-void VectorLoadICTrampolineDescriptor::Initialize(
+void LoadWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
- SlotRegister()};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
+ VectorRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void VectorLoadICDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
+Type::FunctionType*
+VectorStoreICDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 5, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged());
+ function->InitParameter(1, AnyTagged());
+ function->InitParameter(2, AnyTagged());
+ function->InitParameter(3, SmiType());
+ function->InitParameter(4, AnyTagged());
+ return function;
+}
+
+
+void VectorStoreICDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
SlotRegister(), VectorRegister()};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(),
- Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+Type::FunctionType*
+VectorStoreICTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged());
+ function->InitParameter(1, AnyTagged());
+ function->InitParameter(2, AnyTagged());
+ function->InitParameter(3, SmiType());
+ return function;
+}
+
+
+void VectorStoreICTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiGetterDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), function_address()};
- Representation representations[] = {Representation::Tagged(),
- Representation::External()};
- data->Initialize(arraysize(registers), registers, representations);
+Type::FunctionType*
+ApiGetterDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 1, isolate->interface_descriptor_zone());
+ function->InitParameter(0, ExternalPointer());
+ return function;
}
-void ArgumentsAccessReadDescriptor::Initialize(
+void ApiGetterDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), index(), parameter_count()};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {function_address()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ContextOnlyDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister()};
- data->Initialize(arraysize(registers), registers, NULL);
+void ArgumentsAccessReadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {index(), parameter_count()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void GrowArrayElementsDescriptor::Initialize(
+void ContextOnlyDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ContextRegister(), ObjectRegister(), KeyRegister(),
- CapacityRegister()};
- data->Initialize(arraysize(registers), registers, NULL);
+ data->InitializePlatformSpecific(0, nullptr);
+}
+
+
+void GrowArrayElementsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ObjectRegister(), KeyRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+Type::FunctionType*
+FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged());
+ function->InitParameter(1, SmiType());
+ function->InitParameter(2, AnyTagged());
+ return function;
+}
+
+
+Type::FunctionType*
+CreateAllocationSiteDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged());
+ function->InitParameter(1, SmiType());
+ return function;
+}
+
+
+Type::FunctionType*
+CreateWeakCellDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged());
+ function->InitParameter(1, SmiType());
+ function->InitParameter(2, AnyTagged());
+ return function;
+}
+
+
+Type::FunctionType*
+CallFunctionWithFeedbackDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
+ function->InitParameter(0, Type::Receiver()); // JSFunction
+ function->InitParameter(1, SmiType());
+ return function;
+}
+
+
+Type::FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
+ BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
+ int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
+ function->InitParameter(0, Type::Receiver()); // JSFunction
+ function->InitParameter(1, SmiType());
+ function->InitParameter(2, AnyTagged());
+ return function;
}
+
+
+Type::FunctionType*
+ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
+ function->InitParameter(0, Type::Receiver()); // JSFunction
+ function->InitParameter(1, AnyTagged());
+ function->InitParameter(2, UntaggedSigned32());
+ return function;
}
-} // namespace v8::internal
+
+
+Type::FunctionType*
+InternalArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
+ function->InitParameter(0, Type::Receiver()); // JSFunction
+ function->InitParameter(1, UntaggedSigned32());
+ return function;
+}
+
+
+Type::FunctionType*
+ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
+ function->InitParameter(0, Type::Receiver()); // JSFunction
+ function->InitParameter(1, UntaggedSigned32()); // actual number of arguments
+ function->InitParameter(2,
+ UntaggedSigned32()); // expected number of arguments
+ return function;
+}
+
+
+Type::FunctionType*
+ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 5, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged()); // callee
+ function->InitParameter(1, AnyTagged()); // call_data
+ function->InitParameter(2, AnyTagged()); // holder
+ function->InitParameter(3, ExternalPointer()); // api_function_address
+ function->InitParameter(4, UntaggedSigned32()); // actual number of arguments
+ return function;
+}
+
+
+Type::FunctionType*
+ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged()); // callee
+ function->InitParameter(1, AnyTagged()); // call_data
+ function->InitParameter(2, AnyTagged()); // holder
+ function->InitParameter(3, ExternalPointer()); // api_function_address
+ return function;
+}
+
+
+Type::FunctionType*
+MathRoundVariantDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
+ function->InitParameter(0, SmiType());
+ function->InitParameter(1, AnyTagged());
+ return function;
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index ab1517e0df..f206b55841 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -18,9 +18,10 @@ class PlatformInterfaceDescriptor;
V(Store) \
V(StoreTransition) \
V(ElementTransitionAndStore) \
+ V(VectorStoreICTrampoline) \
+ V(VectorStoreIC) \
V(Instanceof) \
- V(VectorLoadICTrampoline) \
- V(VectorLoadIC) \
+ V(LoadWithVector) \
V(FastNewClosure) \
V(FastNewContext) \
V(ToNumber) \
@@ -59,40 +60,50 @@ class PlatformInterfaceDescriptor;
V(MathPowTagged) \
V(MathPowInteger) \
V(ContextOnly) \
- V(GrowArrayElements)
+ V(GrowArrayElements) \
+ V(MathRoundVariant)
class CallInterfaceDescriptorData {
public:
- CallInterfaceDescriptorData() : register_param_count_(-1) {}
+ CallInterfaceDescriptorData()
+ : stack_paramater_count_(-1),
+ register_param_count_(-1),
+ function_type_(nullptr) {}
// A copy of the passed in registers and param_representations is made
// and owned by the CallInterfaceDescriptorData.
+ void InitializePlatformIndependent(int stack_paramater_count,
+ Type::FunctionType* function_type) {
+ function_type_ = function_type;
+ stack_paramater_count_ = stack_paramater_count;
+ }
+
// TODO(mvstanton): Instead of taking parallel arrays register and
// param_representations, how about a struct that puts the representation
// and register side by side (eg, RegRep(r1, Representation::Tagged()).
// The same should go for the CodeStubDescriptor class.
- void Initialize(int register_parameter_count, Register* registers,
- Representation* param_representations,
- PlatformInterfaceDescriptor* platform_descriptor = NULL);
+ void InitializePlatformSpecific(
+ int register_parameter_count, Register* registers,
+ PlatformInterfaceDescriptor* platform_descriptor = NULL);
bool IsInitialized() const { return register_param_count_ >= 0; }
int register_param_count() const { return register_param_count_; }
Register register_param(int index) const { return register_params_[index]; }
Register* register_params() const { return register_params_.get(); }
- Representation register_param_representation(int index) const {
- return register_param_representations_[index];
- }
- Representation* register_param_representations() const {
- return register_param_representations_.get();
+ Type* register_param_type(int index) const {
+ return function_type_->Parameter(index);
}
PlatformInterfaceDescriptor* platform_specific_descriptor() const {
return platform_specific_descriptor_;
}
+ Type::FunctionType* function_type() const { return function_type_; }
+
private:
+ int stack_paramater_count_;
int register_param_count_;
// The Register params are allocated dynamically by the
@@ -100,11 +111,9 @@ class CallInterfaceDescriptorData {
// arrays of Registers cause creation of runtime static initializers
// which we don't want.
SmartArrayPointer<Register> register_params_;
- // Specifies Representations for the stub's parameter. Points to an array of
- // Representations of the same length of the numbers of parameters to the
- // stub, or if NULL (the default value), Representation of each parameter
- // assumed to be Tagged().
- SmartArrayPointer<Representation> register_param_representations_;
+
+ // Specifies types for parameters and return
+ Type::FunctionType* function_type_;
PlatformInterfaceDescriptor* platform_specific_descriptor_;
@@ -126,41 +135,26 @@ class CallDescriptors {
class CallInterfaceDescriptor {
public:
CallInterfaceDescriptor() : data_(NULL) {}
+ virtual ~CallInterfaceDescriptor() {}
CallInterfaceDescriptor(Isolate* isolate, CallDescriptors::Key key)
: data_(isolate->call_descriptor_data(key)) {}
- int GetEnvironmentLength() const { return data()->register_param_count(); }
-
int GetRegisterParameterCount() const {
return data()->register_param_count();
}
- Register GetParameterRegister(int index) const {
- return data()->register_param(index);
- }
-
- Representation GetParameterRepresentation(int index) const {
- DCHECK(index < data()->register_param_count());
- if (data()->register_param_representations() == NULL) {
- return Representation::Tagged();
- }
-
- return data()->register_param_representation(index);
- }
-
- // "Environment" versions of parameter functions. The first register
- // parameter (context) is not included.
- int GetEnvironmentParameterCount() const {
- return GetEnvironmentLength() - 1;
+ int GetStackParameterCount() const {
+ return data()->function_type()->Arity() - data()->register_param_count();
}
- Register GetEnvironmentParameterRegister(int index) const {
- return GetParameterRegister(index + 1);
+ Register GetRegisterParameter(int index) const {
+ return data()->register_param(index);
}
- Representation GetEnvironmentParameterRepresentation(int index) const {
- return GetParameterRepresentation(index + 1);
+ Type* GetParameterType(int index) const {
+ DCHECK(index < data()->register_param_count());
+ return data()->register_param_type(index);
}
// Some platforms have extra information to associate with the descriptor.
@@ -168,40 +162,75 @@ class CallInterfaceDescriptor {
return data()->platform_specific_descriptor();
}
+ Type::FunctionType* GetFunctionType() const {
+ return data()->function_type();
+ }
+
static const Register ContextRegister();
const char* DebugName(Isolate* isolate) const;
+ static Type::FunctionType* BuildDefaultFunctionType(Isolate* isolate,
+ int paramater_count);
+
protected:
const CallInterfaceDescriptorData* data() const { return data_; }
+ virtual Type::FunctionType* BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int register_param_count) {
+ return BuildDefaultFunctionType(isolate, register_param_count);
+ }
+
+ virtual void InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ UNREACHABLE();
+ }
+
+ void Initialize(Isolate* isolate, CallDescriptors::Key key) {
+ if (!data()->IsInitialized()) {
+ CallInterfaceDescriptorData* d = isolate->call_descriptor_data(key);
+ InitializePlatformSpecific(d);
+ Type::FunctionType* function_type =
+ BuildCallInterfaceDescriptorFunctionType(isolate,
+ d->register_param_count());
+ d->InitializePlatformIndependent(0, function_type);
+ }
+ }
+
private:
const CallInterfaceDescriptorData* data_;
};
-#define DECLARE_DESCRIPTOR(name, base) \
- explicit name(Isolate* isolate) : base(isolate, key()) { \
- if (!data()->IsInitialized()) \
- Initialize(isolate->call_descriptor_data(key())); \
- } \
- \
- protected: \
- void Initialize(CallInterfaceDescriptorData* data); \
- name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
- \
- public: \
+#define DECLARE_DESCRIPTOR(name, base) \
+ explicit name(Isolate* isolate) : base(isolate, key()) { \
+ Initialize(isolate, key()); \
+ } \
+ \
+ protected: \
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override; \
+ name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
+ \
+ public: \
static inline CallDescriptors::Key key();
+#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
+ DECLARE_DESCRIPTOR(name, base) \
+ protected: \
+ virtual Type::FunctionType* BuildCallInterfaceDescriptorFunctionType( \
+ Isolate* isolate, int register_param_count) override; \
+ \
+ public:
// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
class LoadDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(LoadDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadDescriptor,
+ CallInterfaceDescriptor)
- enum ParameterIndices { kReceiverIndex, kNameIndex };
+ enum ParameterIndices { kReceiverIndex, kNameIndex, kSlotIndex };
static const Register ReceiverRegister();
static const Register NameRegister();
+ static const Register SlotRegister();
};
@@ -255,19 +284,38 @@ class InstanceofDescriptor : public CallInterfaceDescriptor {
};
-class VectorLoadICTrampolineDescriptor : public LoadDescriptor {
+class VectorStoreICTrampolineDescriptor : public StoreDescriptor {
public:
- DECLARE_DESCRIPTOR(VectorLoadICTrampolineDescriptor, LoadDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ VectorStoreICTrampolineDescriptor, StoreDescriptor)
- enum ParameterIndices { kReceiverIndex, kNameIndex, kSlotIndex };
+ enum ParameterIndices { kReceiverIndex, kNameIndex, kValueIndex, kSlotIndex };
static const Register SlotRegister();
};
-class VectorLoadICDescriptor : public VectorLoadICTrampolineDescriptor {
+class VectorStoreICDescriptor : public VectorStoreICTrampolineDescriptor {
public:
- DECLARE_DESCRIPTOR(VectorLoadICDescriptor, VectorLoadICTrampolineDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ VectorStoreICDescriptor, VectorStoreICTrampolineDescriptor)
+
+ enum ParameterIndices {
+ kReceiverIndex,
+ kNameIndex,
+ kValueIndex,
+ kSlotIndex,
+ kVectorIndex
+ };
+
+ static const Register VectorRegister();
+};
+
+
+class LoadWithVectorDescriptor : public LoadDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadWithVectorDescriptor,
+ LoadDescriptor)
enum ParameterIndices {
kReceiverIndex,
@@ -312,7 +360,8 @@ class TypeofDescriptor : public CallInterfaceDescriptor {
class FastCloneShallowArrayDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(FastCloneShallowArrayDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastCloneShallowArrayDescriptor,
+ CallInterfaceDescriptor)
};
@@ -324,7 +373,8 @@ class FastCloneShallowObjectDescriptor : public CallInterfaceDescriptor {
class CreateAllocationSiteDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(CreateAllocationSiteDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CreateAllocationSiteDescriptor,
+ CallInterfaceDescriptor)
};
@@ -337,7 +387,8 @@ class CreateWeakCellDescriptor : public CallInterfaceDescriptor {
kParameterCount
};
- DECLARE_DESCRIPTOR(CreateWeakCellDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CreateWeakCellDescriptor,
+ CallInterfaceDescriptor)
};
@@ -349,16 +400,16 @@ class CallFunctionDescriptor : public CallInterfaceDescriptor {
class CallFunctionWithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(CallFunctionWithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ CallFunctionWithFeedbackDescriptor, CallInterfaceDescriptor)
};
class CallFunctionWithFeedbackAndVectorDescriptor
: public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(CallFunctionWithFeedbackAndVectorDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ CallFunctionWithFeedbackAndVectorDescriptor, CallInterfaceDescriptor)
};
@@ -396,7 +447,8 @@ class ArrayConstructorConstantArgCountDescriptor
class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(ArrayConstructorDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArrayConstructorDescriptor,
+ CallInterfaceDescriptor)
};
@@ -410,8 +462,8 @@ class InternalArrayConstructorConstantArgCountDescriptor
class InternalArrayConstructorDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(InternalArrayConstructorDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ InternalArrayConstructorDescriptor, CallInterfaceDescriptor)
};
@@ -472,25 +524,29 @@ class CallHandlerDescriptor : public CallInterfaceDescriptor {
class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(ArgumentAdaptorDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArgumentAdaptorDescriptor,
+ CallInterfaceDescriptor)
};
class ApiFunctionDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(ApiFunctionDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiFunctionDescriptor,
+ CallInterfaceDescriptor)
};
class ApiAccessorDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(ApiAccessorDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiAccessorDescriptor,
+ CallInterfaceDescriptor)
};
class ApiGetterDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(ApiGetterDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiGetterDescriptor,
+ CallInterfaceDescriptor)
static const Register function_address();
};
@@ -528,6 +584,13 @@ class MathPowIntegerDescriptor : public CallInterfaceDescriptor {
};
+class MathRoundVariantDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(MathRoundVariantDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
class ContextOnlyDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
@@ -538,10 +601,9 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(GrowArrayElementsDescriptor, CallInterfaceDescriptor)
- enum RegisterInfo { kObjectIndex, kKeyIndex, kCapacityIndex };
+ enum RegisterInfo { kObjectIndex, kKeyIndex };
static const Register ObjectRegister();
static const Register KeyRegister();
- static const Register CapacityRegister();
};
#undef DECLARE_DESCRIPTOR
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc
index 2aedfb48b1..97c9ba0551 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/interpreter-irregexp.cc
@@ -606,4 +606,5 @@ RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index d53d0d8d32..e9d526a453 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -280,9 +280,7 @@ Handle<String> Isolate::StackTraceString() {
}
-void Isolate::PushStackTraceAndDie(unsigned int magic,
- Object* object,
- Map* map,
+void Isolate::PushStackTraceAndDie(unsigned int magic, void* ptr1, void* ptr2,
unsigned int magic2) {
const int kMaxStackTraceSize = 32 * KB;
Handle<String> trace = StackTraceString();
@@ -291,9 +289,8 @@ void Isolate::PushStackTraceAndDie(unsigned int magic,
String::WriteToFlat(*trace, buffer, 0, length);
buffer[length] = '\0';
// TODO(dcarney): convert buffer to utf8?
- base::OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", magic, magic2,
- static_cast<void*>(object), static_cast<void*>(map),
- reinterpret_cast<char*>(buffer));
+ base::OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", magic, magic2, ptr1,
+ ptr2, reinterpret_cast<char*>(buffer));
base::OS::Abort();
}
@@ -323,7 +320,7 @@ static bool IsVisibleInStackTrace(JSFunction* fun,
if (receiver->IsJSBuiltinsObject()) return false;
if (fun->IsBuiltin()) {
return fun->shared()->native();
- } else if (fun->IsFromNativeScript() || fun->IsFromExtensionScript()) {
+ } else if (!fun->IsSubjectToDebugging()) {
return false;
}
}
@@ -341,9 +338,8 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
Handle<String> stackTraceLimit =
factory()->InternalizeUtf8String("stackTraceLimit");
DCHECK(!stackTraceLimit.is_null());
- Handle<Object> stack_trace_limit =
- JSObject::GetDataProperty(Handle<JSObject>::cast(error),
- stackTraceLimit);
+ Handle<Object> stack_trace_limit = JSReceiver::GetDataProperty(
+ Handle<JSObject>::cast(error), stackTraceLimit);
if (!stack_trace_limit->IsNumber()) return factory()->undefined_value();
int limit = FastD2IChecked(stack_trace_limit->Number());
limit = Max(limit, 0); // Ensure that limit is not negative.
@@ -446,7 +442,7 @@ MaybeHandle<JSObject> Isolate::CaptureAndSetSimpleStackTrace(
Handle<JSArray> Isolate::GetDetailedStackTrace(Handle<JSObject> error_object) {
Handle<Name> key_detailed = factory()->detailed_stack_trace_symbol();
Handle<Object> stack_trace =
- JSObject::GetDataProperty(error_object, key_detailed);
+ JSReceiver::GetDataProperty(error_object, key_detailed);
if (stack_trace->IsJSArray()) return Handle<JSArray>::cast(stack_trace);
if (!capture_stack_trace_for_uncaught_exceptions_) return Handle<JSArray>();
@@ -600,7 +596,7 @@ int PositionFromStackTrace(Handle<FixedArray> elements, int index) {
Handle<JSArray> Isolate::GetDetailedFromSimpleStackTrace(
Handle<JSObject> error_object) {
Handle<Name> key = factory()->stack_trace_symbol();
- Handle<Object> property = JSObject::GetDataProperty(error_object, key);
+ Handle<Object> property = JSReceiver::GetDataProperty(error_object, key);
if (!property->IsJSArray()) return Handle<JSArray>();
Handle<JSArray> simple_stack_trace = Handle<JSArray>::cast(property);
@@ -709,7 +705,7 @@ static void PrintFrames(Isolate* isolate,
void Isolate::PrintStack(StringStream* accumulator, PrintStackMode mode) {
// The MentionedObjectCache is not GC-proof at the moment.
DisallowHeapAllocation no_gc;
- DCHECK(StringStream::IsMentionedObjectCacheClear(this));
+ DCHECK(accumulator->IsMentionedObjectCacheClear(this));
// Avoid printing anything if there are no frames.
if (c_entry_fp(thread_local_top()) == 0) return;
@@ -748,16 +744,9 @@ static inline AccessCheckInfo* GetAccessCheckInfo(Isolate* isolate,
}
-static void ThrowAccessCheckError(Isolate* isolate) {
- Handle<String> message =
- isolate->factory()->InternalizeUtf8String("no access");
- isolate->ScheduleThrow(*isolate->factory()->NewTypeError(message));
-}
-
-
void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
if (!thread_local_top()->failed_access_check_callback_) {
- return ThrowAccessCheckError(this);
+ return ScheduleThrow(*factory()->NewTypeError(MessageTemplate::kNoAccess));
}
DCHECK(receiver->IsAccessCheckNeeded());
@@ -770,7 +759,8 @@ void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
if (!access_check_info) {
AllowHeapAllocation doesnt_matter_anymore;
- return ThrowAccessCheckError(this);
+ return ScheduleThrow(
+ *factory()->NewTypeError(MessageTemplate::kNoAccess));
}
data = handle(access_check_info->data(), this);
}
@@ -783,11 +773,17 @@ void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
bool Isolate::IsInternallyUsedPropertyName(Handle<Object> name) {
+ if (name->IsSymbol()) {
+ return Handle<Symbol>::cast(name)->is_private();
+ }
return name.is_identical_to(factory()->hidden_string());
}
bool Isolate::IsInternallyUsedPropertyName(Object* name) {
+ if (name->IsSymbol()) {
+ return Symbol::cast(name)->is_private();
+ }
return name == heap()->hidden_string();
}
@@ -852,9 +848,13 @@ Object* Isolate::StackOverflow() {
// constructor. Instead, we copy the pre-constructed boilerplate and
// attach the stack trace as a hidden property.
Handle<String> key = factory()->stack_overflow_string();
- Handle<JSObject> boilerplate = Handle<JSObject>::cast(
- Object::GetProperty(js_builtins_object(), key).ToHandleChecked());
- Handle<JSObject> exception = factory()->CopyJSObject(boilerplate);
+ Handle<Object> boilerplate =
+ Object::GetProperty(js_builtins_object(), key).ToHandleChecked();
+ if (boilerplate->IsUndefined()) {
+ return Throw(heap()->undefined_value(), nullptr);
+ }
+ Handle<JSObject> exception =
+ factory()->CopyJSObject(Handle<JSObject>::cast(boilerplate));
Throw(*exception, nullptr);
CaptureAndSetSimpleStackTrace(exception, factory()->undefined_value());
@@ -936,6 +936,9 @@ void ReportBootstrappingException(Handle<Object> exception,
"Extension or internal compilation error in %s at line %d.\n",
String::cast(location->script()->name())->ToCString().get(),
line_number);
+ } else if (exception->IsString()) {
+ base::OS::PrintError("Extension or internal compilation error: %s.\n",
+ String::cast(*exception)->ToCString().get());
} else {
base::OS::PrintError("Extension or internal compilation error.\n");
}
@@ -944,16 +947,22 @@ void ReportBootstrappingException(Handle<Object> exception,
// builtins, print the actual source here so that line numbers match.
if (location->script()->source()->IsString()) {
Handle<String> src(String::cast(location->script()->source()));
- PrintF("Failing script:\n");
+ PrintF("Failing script:");
int len = src->length();
- int line_number = 1;
- PrintF("%5d: ", line_number);
- for (int i = 0; i < len; i++) {
- uint16_t character = src->Get(i);
- PrintF("%c", character);
- if (character == '\n' && i < len - 2) {
- PrintF("%5d: ", ++line_number);
+ if (len == 0) {
+ PrintF(" <not available>\n");
+ } else {
+ PrintF("\n");
+ int line_number = 1;
+ PrintF("%5d: ", line_number);
+ for (int i = 0; i < len; i++) {
+ uint16_t character = src->Get(i);
+ PrintF("%c", character);
+ if (character == '\n' && i < len - 2) {
+ PrintF("%5d: ", ++line_number);
+ }
}
+ PrintF("\n");
}
}
#endif
@@ -1070,7 +1079,7 @@ Object* Isolate::UnwindAndFindHandler() {
if (frame->is_optimized() && catchable_by_js) {
OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame);
int stack_slots = 0; // Will contain stack slot count of frame.
- offset = js_frame->LookupExceptionHandlerInTable(&stack_slots);
+ offset = js_frame->LookupExceptionHandlerInTable(&stack_slots, NULL);
if (offset >= 0) {
// Compute the stack pointer from the frame pointer. This ensures that
// argument slots on the stack are dropped as returning would.
@@ -1090,7 +1099,7 @@ Object* Isolate::UnwindAndFindHandler() {
if (frame->is_java_script() && catchable_by_js) {
JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
int stack_slots = 0; // Will contain operand stack depth of handler.
- offset = js_frame->LookupExceptionHandlerInTable(&stack_slots);
+ offset = js_frame->LookupExceptionHandlerInTable(&stack_slots, NULL);
if (offset >= 0) {
// Compute the stack pointer from the frame pointer. This ensures that
// operand stack slots are dropped for nested statements. Also restore
@@ -1146,8 +1155,12 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
if (frame->is_java_script()) {
JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
int stack_slots = 0; // The computed stack slot count is not used.
- if (js_frame->LookupExceptionHandlerInTable(&stack_slots) > 0) {
- return CAUGHT_BY_JAVASCRIPT;
+ HandlerTable::CatchPrediction prediction;
+ if (js_frame->LookupExceptionHandlerInTable(&stack_slots, &prediction) >
+ 0) {
+ // We are conservative with our prediction: try-finally is considered
+ // to always rethrow, to meet the expectation of the debugger.
+ if (prediction == HandlerTable::CAUGHT) return CAUGHT_BY_JAVASCRIPT;
}
}
@@ -1272,19 +1285,19 @@ bool Isolate::ComputeLocationFromException(MessageLocation* target,
if (!exception->IsJSObject()) return false;
Handle<Name> start_pos_symbol = factory()->error_start_pos_symbol();
- Handle<Object> start_pos = JSObject::GetDataProperty(
+ Handle<Object> start_pos = JSReceiver::GetDataProperty(
Handle<JSObject>::cast(exception), start_pos_symbol);
if (!start_pos->IsSmi()) return false;
int start_pos_value = Handle<Smi>::cast(start_pos)->value();
Handle<Name> end_pos_symbol = factory()->error_end_pos_symbol();
- Handle<Object> end_pos = JSObject::GetDataProperty(
+ Handle<Object> end_pos = JSReceiver::GetDataProperty(
Handle<JSObject>::cast(exception), end_pos_symbol);
if (!end_pos->IsSmi()) return false;
int end_pos_value = Handle<Smi>::cast(end_pos)->value();
Handle<Name> script_symbol = factory()->error_script_symbol();
- Handle<Object> script = JSObject::GetDataProperty(
+ Handle<Object> script = JSReceiver::GetDataProperty(
Handle<JSObject>::cast(exception), script_symbol);
if (!script->IsScript()) return false;
@@ -1301,7 +1314,7 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
if (!exception->IsJSObject()) return false;
Handle<Name> key = factory()->stack_trace_symbol();
Handle<Object> property =
- JSObject::GetDataProperty(Handle<JSObject>::cast(exception), key);
+ JSReceiver::GetDataProperty(Handle<JSObject>::cast(exception), key);
if (!property->IsJSArray()) return false;
Handle<JSArray> simple_stack_trace = Handle<JSArray>::cast(property);
@@ -1311,7 +1324,7 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
for (int i = 1; i < elements_limit; i += 4) {
Handle<JSFunction> fun =
handle(JSFunction::cast(elements->get(i + 1)), this);
- if (fun->IsFromNativeScript()) continue;
+ if (!fun->IsSubjectToDebugging()) continue;
Object* script = fun->shared()->script();
if (script->IsScript() &&
@@ -1380,20 +1393,9 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
location = &potential_computed_location;
}
- // If the exception argument is a custom object, turn it into a string
- // before throwing as uncaught exception. Note that the pending
- // exception object to be set later must not be turned into a string.
- if (exception->IsJSObject() && !IsErrorObject(exception)) {
- MaybeHandle<Object> maybe_exception =
- Execution::ToDetailString(this, exception);
- if (!maybe_exception.ToHandle(&exception)) {
- exception =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("exception"));
- }
- }
- return MessageHandler::MakeMessageObject(this, "uncaught_exception", location,
- HandleVector<Object>(&exception, 1),
- stack_trace_object);
+ return MessageHandler::MakeMessageObject(
+ this, MessageTemplate::kUncaughtException, location, exception,
+ stack_trace_object);
}
@@ -1587,7 +1589,7 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
int stack_slots = 0; // The computed stack slot count is not used.
- if (frame->LookupExceptionHandlerInTable(&stack_slots) > 0) {
+ if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) {
// Throwing inside a Promise only leads to a reject if not caught by an
// inner try-catch or try-finally.
if (frame->function() == *promise_function) {
@@ -2004,6 +2006,12 @@ Isolate::~Isolate() {
delete debug_;
debug_ = NULL;
+
+#if USE_SIMULATOR
+ Simulator::TearDown(simulator_i_cache_, simulator_redirection_);
+ simulator_i_cache_ = nullptr;
+ simulator_redirection_ = nullptr;
+#endif
}
@@ -2373,9 +2381,11 @@ CodeTracer* Isolate::GetCodeTracer() {
}
-Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
+Map* Isolate::get_initial_js_array_map(ElementsKind kind, Strength strength) {
Context* native_context = context()->native_context();
- Object* maybe_map_array = native_context->js_array_maps();
+ Object* maybe_map_array = is_strong(strength)
+ ? native_context->js_array_strong_maps()
+ : native_context->js_array_maps();
if (!maybe_map_array->IsUndefined()) {
Object* maybe_transitioned_map =
FixedArray::cast(maybe_map_array)->get(kind);
@@ -2519,21 +2529,29 @@ ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#endif
+Handle<JSObject> Isolate::SetUpSubregistry(Handle<JSObject> registry,
+ Handle<Map> map, const char* cname) {
+ Handle<String> name = factory()->InternalizeUtf8String(cname);
+ Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
+ JSObject::NormalizeProperties(obj, CLEAR_INOBJECT_PROPERTIES, 0,
+ "SetupSymbolRegistry");
+ JSObject::AddProperty(registry, name, obj, NONE);
+ return obj;
+}
+
+
Handle<JSObject> Isolate::GetSymbolRegistry() {
if (heap()->symbol_registry()->IsSmi()) {
Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
Handle<JSObject> registry = factory()->NewJSObjectFromMap(map);
heap()->set_symbol_registry(*registry);
- static const char* nested[] = {"for", "for_api", "keyFor", "private_api",
- "private_intern"};
- for (unsigned i = 0; i < arraysize(nested); ++i) {
- Handle<String> name = factory()->InternalizeUtf8String(nested[i]);
- Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
- JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8,
- "SetupSymbolRegistry");
- JSObject::SetProperty(registry, name, obj, STRICT).Assert();
- }
+ SetUpSubregistry(registry, map, "for");
+ SetUpSubregistry(registry, map, "for_api");
+ SetUpSubregistry(registry, map, "keyFor");
+ SetUpSubregistry(registry, map, "private_api");
+ heap()->AddPrivateGlobalSymbols(
+ SetUpSubregistry(registry, map, "private_intern"));
}
return Handle<JSObject>::cast(factory()->symbol_registry());
}
@@ -2669,8 +2687,15 @@ void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
- if (use_counter_callback_) {
- use_counter_callback_(reinterpret_cast<v8::Isolate*>(this), feature);
+ // The counter callback may cause the embedder to call into V8, which is not
+ // generally possible during GC.
+ if (heap_.gc_state() == Heap::NOT_IN_GC) {
+ if (use_counter_callback_) {
+ HandleScope handle_scope(this);
+ use_counter_callback_(reinterpret_cast<v8::Isolate*>(this), feature);
+ }
+ } else {
+ heap_.IncrementDeferredCount(feature);
}
}
@@ -2747,15 +2772,15 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
-bool StackLimitCheck::JsHasOverflowed() const {
+bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
// The simulator uses a separate JS stack.
Address jssp_address = Simulator::current(isolate_)->get_sp();
uintptr_t jssp = reinterpret_cast<uintptr_t>(jssp_address);
- if (jssp < stack_guard->real_jslimit()) return true;
+ if (jssp - gap < stack_guard->real_jslimit()) return true;
#endif // USE_SIMULATOR
- return GetCurrentStackPosition() < stack_guard->real_climit();
+ return GetCurrentStackPosition() - gap < stack_guard->real_climit();
}
@@ -2781,4 +2806,5 @@ bool PostponeInterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
return false;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 84190801f0..a67f0c7fb5 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -81,15 +81,8 @@ typedef void* ExternalReferenceRedirectorPointer();
class Debug;
class Debugger;
class PromiseOnStack;
-
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS64
class Redirection;
class Simulator;
-#endif
// Static indirection table for handles to constants. If a frame
@@ -321,11 +314,7 @@ class ThreadLocalTop BASE_EMBEDDED {
};
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
- V8_TARGET_ARCH_PPC && !defined(__PPC__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
- V8_TARGET_ARCH_MIPS64 && !defined(__mips__)
+#if USE_SIMULATOR
#define ISOLATE_INIT_SIMULATOR_LIST(V) \
V(bool, simulator_initialized, false) \
@@ -418,11 +407,7 @@ class Isolate {
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS64
+#if USE_SIMULATOR
simulator_(NULL),
#endif
next_(NULL),
@@ -434,11 +419,7 @@ class Isolate {
FIELD_ACCESSOR(uintptr_t, stack_limit)
FIELD_ACCESSOR(ThreadState*, thread_state)
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS64
+#if USE_SIMULATOR
FIELD_ACCESSOR(Simulator*, simulator)
#endif
@@ -452,11 +433,7 @@ class Isolate {
uintptr_t stack_limit_;
ThreadState* thread_state_;
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS64
+#if USE_SIMULATOR
Simulator* simulator_;
#endif
@@ -719,10 +696,8 @@ class Isolate {
PrintStackMode mode = kPrintStackVerbose);
void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
Handle<String> StackTraceString();
- NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
- Object* object,
- Map* map,
- unsigned int magic2));
+ NO_INLINE(void PushStackTraceAndDie(unsigned int magic, void* ptr1,
+ void* ptr2, unsigned int magic2));
Handle<JSArray> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
@@ -911,6 +886,7 @@ class Isolate {
return handle_scope_implementer_;
}
Zone* runtime_zone() { return &runtime_zone_; }
+ Zone* interface_descriptor_zone() { return &interface_descriptor_zone_; }
UnicodeCache* unicode_cache() {
return unicode_cache_;
@@ -1015,7 +991,8 @@ class Isolate {
date_cache_ = date_cache;
}
- Map* get_initial_js_array_map(ElementsKind kind);
+ Map* get_initial_js_array_map(ElementsKind kind,
+ Strength strength = Strength::WEAK);
static const int kArrayProtectorValid = 1;
static const int kArrayProtectorInvalid = 0;
@@ -1159,6 +1136,8 @@ class Isolate {
private:
friend struct GlobalState;
friend struct InitializeGlobalState;
+ Handle<JSObject> SetUpSubregistry(Handle<JSObject> registry, Handle<Map> map,
+ const char* name);
// These fields are accessed through the API, offsets must be kept in sync
// with v8::internal::Internals (in include/v8.h) constants. This is also
@@ -1293,6 +1272,7 @@ class Isolate {
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
Zone runtime_zone_;
+ Zone interface_descriptor_zone_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
GlobalHandles* global_handles_;
EternalHandles* eternal_handles_;
@@ -1501,7 +1481,7 @@ class StackLimitCheck BASE_EMBEDDED {
}
// Use this to check for stack-overflow when entering runtime from JS code.
- bool JsHasOverflowed() const;
+ bool JsHasOverflowed(uintptr_t gap = 0) const;
private:
Isolate* isolate_;
diff --git a/deps/v8/src/iterator-prototype.js b/deps/v8/src/iterator-prototype.js
new file mode 100644
index 0000000000..96dd7bfde1
--- /dev/null
+++ b/deps/v8/src/iterator-prototype.js
@@ -0,0 +1,21 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var $iteratorPrototype;
+
+(function(global, utils) {
+ "use strict";
+ %CheckIsBootstrapping();
+
+ var GlobalObject = global.Object;
+
+ // 25.1.2.1 %IteratorPrototype% [ @@iterator ] ( )
+ function IteratorPrototypeIterator() {
+ return this;
+ }
+
+ utils.SetFunctionName(IteratorPrototypeIterator, symbolIterator);
+ %AddNamedProperty($iteratorPrototype, symbolIterator,
+ IteratorPrototypeIterator, DONT_ENUM);
+})
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 54c7841480..f1fa5647f8 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -213,14 +213,13 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
if (isolate_->has_pending_exception()) return Handle<Object>::null();
// Parse failed. Current character is the unexpected token.
- const char* message;
Factory* factory = this->factory();
- Handle<JSArray> array;
+ MessageTemplate::Template message;
+ Handle<String> argument;
switch (c0_) {
case kEndOfString:
- message = "unexpected_eos";
- array = factory->NewJSArray(0);
+ message = MessageTemplate::kUnexpectedEOS;
break;
case '-':
case '0':
@@ -233,26 +232,21 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
case '7':
case '8':
case '9':
- message = "unexpected_token_number";
- array = factory->NewJSArray(0);
+ message = MessageTemplate::kUnexpectedTokenNumber;
break;
case '"':
- message = "unexpected_token_string";
- array = factory->NewJSArray(0);
+ message = MessageTemplate::kUnexpectedTokenString;
break;
default:
- message = "unexpected_token";
- Handle<Object> name = factory->LookupSingleCharacterStringFromCode(c0_);
- Handle<FixedArray> element = factory->NewFixedArray(1);
- element->set(0, *name);
- array = factory->NewJSArrayWithElements(element);
+ message = MessageTemplate::kUnexpectedToken;
+ argument = factory->LookupSingleCharacterStringFromCode(c0_);
break;
}
MessageLocation location(factory->NewScript(source_),
position_,
position_ + 1);
- Handle<Object> error = factory->NewSyntaxError(message, array);
+ Handle<Object> error = factory->NewSyntaxError(message, argument);
return isolate()->template Throw<Object>(error, &location);
}
return result;
@@ -317,7 +311,7 @@ ParseElementResult JsonParser<seq_one_byte>::ParseElement(
} else {
do {
int d = c0_ - '0';
- if (index > 429496729U - ((d > 5) ? 1 : 0)) break;
+ if (index > 429496729U - ((d + 3) >> 3)) break;
index = (index * 10) + d;
Advance();
} while (IsDecimalDigit(c0_));
@@ -331,7 +325,8 @@ ParseElementResult JsonParser<seq_one_byte>::ParseElement(
AdvanceSkipWhitespace();
Handle<Object> value = ParseJsonValue();
if (!value.is_null()) {
- JSObject::SetOwnElement(json_object, index, value, SLOPPY).Assert();
+ JSObject::SetOwnElementIgnoreAttributes(json_object, index, value, NONE)
+ .Assert();
return kElementFound;
} else {
return kNullHandle;
@@ -439,7 +434,8 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
// Commit the intermediate state to the object and stop transitioning.
CommitStateToJsonObject(json_object, map, &properties);
- Runtime::DefineObjectProperty(json_object, key, value, NONE).Check();
+ JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key, value)
+ .Check();
} while (transitioning && MatchSkipWhiteSpace(','));
// If we transitioned until the very end, transition the map now.
@@ -475,7 +471,8 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
value = ParseJsonValue();
if (value.is_null()) return ReportUnexpectedCharacter();
- Runtime::DefineObjectProperty(json_object, key, value, NONE).Check();
+ JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key,
+ value).Check();
}
}
@@ -531,7 +528,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
fast_elements->set(i, *elements[i]);
}
Handle<Object> json_array = factory()->NewJSArrayWithElements(
- fast_elements, FAST_ELEMENTS, pretenure_);
+ fast_elements, FAST_ELEMENTS, Strength::WEAK, pretenure_);
return scope.CloseAndEscape(json_array);
}
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index efb71e5fb1..1ba99c1e9a 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -8,6 +8,7 @@
#include "src/v8.h"
#include "src/conversions.h"
+#include "src/messages.h"
#include "src/string-builder.h"
#include "src/utils.h"
@@ -272,16 +273,15 @@ BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
for (int i = 0; i < length; i++) {
if (elements->get(i) == *object) {
AllowHeapAllocation allow_to_return_error;
- Handle<Object> error = factory()->NewTypeError(
- "circular_structure", HandleVector<Object>(NULL, 0));
+ Handle<Object> error =
+ factory()->NewTypeError(MessageTemplate::kCircularStructure);
isolate_->Throw(*error);
return EXCEPTION;
}
}
}
- JSArray::EnsureSize(stack_, length + 1);
+ JSArray::SetLength(stack_, length + 1);
FixedArray::cast(stack_->elements())->set(length, *object);
- stack_->set_length(Smi::FromInt(length + 1));
return SUCCESS;
}
@@ -438,7 +438,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
Result stack_push = StackPush(object);
if (stack_push != SUCCESS) return stack_push;
uint32_t length = 0;
- CHECK(object->length()->ToArrayIndex(&length));
+ CHECK(object->length()->ToArrayLength(&length));
builder_.AppendCharacter('[');
switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS: {
@@ -579,12 +579,9 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
} else {
DCHECK(key->IsNumber());
key_handle = factory()->NumberToString(Handle<Object>(key, isolate_));
- uint32_t index;
if (key->IsSmi()) {
maybe_property = Object::GetElement(
isolate_, object, Smi::cast(key)->value());
- } else if (key_handle->AsArrayIndex(&index)) {
- maybe_property = Object::GetElement(isolate_, object, index);
} else {
maybe_property = Object::GetPropertyOrElement(object, key_handle);
}
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index f5ac6cb87a..e405f87bab 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -4,13 +4,27 @@
var $jsonSerializeAdapter;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalJSON = global.JSON;
+var InternalArray = utils.InternalArray;
+
+var MathMax;
+var MathMin;
+var ObjectHasOwnProperty;
+
+utils.Import(function(from) {
+ MathMax = from.MathMax;
+ MathMin = from.MathMin;
+ ObjectHasOwnProperty = from.ObjectHasOwnProperty;
+});
// -------------------------------------------------------------------
@@ -25,7 +39,7 @@ function Revive(holder, name, reviver) {
}
} else {
for (var p in val) {
- if (%_CallFunction(val, p, $objectHasOwnProperty)) {
+ if (HAS_OWN_PROPERTY(val, p)) {
var newElement = Revive(val, p, reviver);
if (IS_UNDEFINED(newElement)) {
delete val[p];
@@ -51,9 +65,7 @@ function JSONParse(text, reviver) {
function SerializeArray(value, replacer, stack, indent, gap) {
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', []);
- }
+ if (!%PushIfAbsent(stack, value)) throw MakeTypeError(kCircularStructure);
var stepback = indent;
indent += gap;
var partial = new InternalArray();
@@ -82,16 +94,14 @@ function SerializeArray(value, replacer, stack, indent, gap) {
function SerializeObject(value, replacer, stack, indent, gap) {
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', []);
- }
+ if (!%PushIfAbsent(stack, value)) throw MakeTypeError(kCircularStructure);
var stepback = indent;
indent += gap;
var partial = new InternalArray();
if (IS_ARRAY(replacer)) {
var length = replacer.length;
for (var i = 0; i < length; i++) {
- if (%_CallFunction(replacer, i, $objectHasOwnProperty)) {
+ if (HAS_OWN_PROPERTY(replacer, i)) {
var p = replacer[i];
var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
if (!IS_UNDEFINED(strP)) {
@@ -104,7 +114,7 @@ function SerializeObject(value, replacer, stack, indent, gap) {
}
} else {
for (var p in value) {
- if (%_CallFunction(value, p, $objectHasOwnProperty)) {
+ if (HAS_OWN_PROPERTY(value, p)) {
var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
if (!IS_UNDEFINED(strP)) {
var member = %QuoteJSONString(p) + ":";
@@ -173,6 +183,30 @@ function JSONStringify(value, replacer, space) {
if (%_ArgumentsLength() == 1) {
return %BasicJSONStringify(value);
}
+ if (IS_ARRAY(replacer)) {
+ // Deduplicate replacer array items.
+ var property_list = new InternalArray();
+ var seen_properties = { __proto__: null };
+ var length = replacer.length;
+ for (var i = 0; i < length; i++) {
+ var v = replacer[i];
+ var item;
+ if (IS_STRING(v)) {
+ item = v;
+ } else if (IS_NUMBER(v)) {
+ item = %_NumberToString(v);
+ } else if (IS_STRING_WRAPPER(v) || IS_NUMBER_WRAPPER(v)) {
+ item = $toString(v);
+ } else {
+ continue;
+ }
+ if (!seen_properties[item]) {
+ property_list.push(item);
+ seen_properties[item] = true;
+ }
+ }
+ replacer = property_list;
+ }
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
if (IS_NUMBER_WRAPPER(space)) {
@@ -183,7 +217,7 @@ function JSONStringify(value, replacer, space) {
}
var gap;
if (IS_NUMBER(space)) {
- space = $max(0, $min($toInteger(space), 10));
+ space = MathMax(0, MathMin($toInteger(space), 10));
gap = %_SubString(" ", 0, space);
} else if (IS_STRING(space)) {
if (space.length > 10) {
@@ -194,28 +228,6 @@ function JSONStringify(value, replacer, space) {
} else {
gap = "";
}
- if (IS_ARRAY(replacer)) {
- // Deduplicate replacer array items.
- var property_list = new InternalArray();
- var seen_properties = { __proto__: null };
- var seen_sentinel = {};
- var length = replacer.length;
- for (var i = 0; i < length; i++) {
- var item = replacer[i];
- if (IS_STRING_WRAPPER(item)) {
- item = $toString(item);
- } else {
- if (IS_NUMBER_WRAPPER(item)) item = $toNumber(item);
- if (IS_NUMBER(item)) item = %_NumberToString(item);
- }
- if (IS_STRING(item) && seen_properties[item] != seen_sentinel) {
- property_list.push(item);
- // We cannot use true here because __proto__ needs to be an object.
- seen_properties[item] = seen_sentinel;
- }
- }
- replacer = property_list;
- }
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
@@ -224,7 +236,7 @@ function JSONStringify(value, replacer, space) {
%AddNamedProperty(GlobalJSON, symbolToStringTag, "JSON", READ_ONLY | DONT_ENUM);
// Set up non-enumerable properties of the JSON object.
-$installFunctions(GlobalJSON, DONT_ENUM, [
+utils.InstallFunctions(GlobalJSON, DONT_ENUM, [
"parse", JSONParse,
"stringify", JSONStringify
]);
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 78cd7df340..a02141d77a 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -12,6 +12,7 @@
#include "src/factory.h"
#include "src/jsregexp-inl.h"
#include "src/jsregexp.h"
+#include "src/messages.h"
#include "src/ostreams.h"
#include "src/parser.h"
#include "src/regexp-macro-assembler.h"
@@ -62,18 +63,17 @@ MaybeHandle<Object> RegExpImpl::CreateRegExpLiteral(
MUST_USE_RESULT
static inline MaybeHandle<Object> ThrowRegExpException(
- Handle<JSRegExp> re,
- Handle<String> pattern,
- Handle<String> error_text,
- const char* message) {
+ Handle<JSRegExp> re, Handle<String> pattern, Handle<String> error_text) {
Isolate* isolate = re->GetIsolate();
- Factory* factory = isolate->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set(0, *pattern);
- elements->set(1, *error_text);
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> regexp_err;
- THROW_NEW_ERROR(isolate, NewSyntaxError(message, array), Object);
+ THROW_NEW_ERROR(isolate, NewSyntaxError(MessageTemplate::kMalformedRegExp,
+ pattern, error_text),
+ Object);
+}
+
+
+inline void ThrowRegExpException(Handle<JSRegExp> re,
+ Handle<String> error_text) {
+ USE(ThrowRegExpException(re, Handle<String>(re->Pattern()), error_text));
}
@@ -159,10 +159,7 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
flags.is_multiline(), flags.is_unicode(),
&parse_result)) {
// Throw an exception if we fail to parse the pattern.
- return ThrowRegExpException(re,
- pattern,
- parse_result.error,
- "malformed_regexp");
+ return ThrowRegExpException(re, pattern, parse_result.error);
}
bool has_been_compiled = false;
@@ -352,19 +349,6 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re,
}
-static void CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
- Handle<String> error_message,
- Isolate* isolate) {
- Factory* factory = isolate->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set(0, re->Pattern());
- elements->set(1, *error_message);
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> error = factory->NewSyntaxError("malformed_regexp", array);
- isolate->Throw(*error);
-}
-
-
bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
Handle<String> sample_subject,
bool is_one_byte) {
@@ -391,7 +375,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_one_byte));
DCHECK(error_string->IsString());
Handle<String> error_message(String::cast(error_string));
- CreateRegExpErrorObjectAndThrow(re, error_message, isolate);
+ ThrowRegExpException(re, error_message);
return false;
}
@@ -405,10 +389,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
flags.is_unicode(), &compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
- USE(ThrowRegExpException(re,
- pattern,
- compile_data.error,
- "malformed_regexp"));
+ USE(ThrowRegExpException(re, pattern, compile_data.error));
return false;
}
RegExpEngine::CompilationResult result = RegExpEngine::Compile(
@@ -419,7 +400,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
// Unable to compile regexp.
Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
CStrVector(result.error_message)).ToHandleChecked();
- CreateRegExpErrorObjectAndThrow(re, error_message, isolate);
+ ThrowRegExpException(re, error_message);
return false;
}
@@ -637,14 +618,20 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
}
+static void EnsureSize(Handle<JSArray> array, uint32_t minimum_size) {
+ if (static_cast<uint32_t>(array->elements()->length()) < minimum_size) {
+ JSArray::SetLength(array, minimum_size);
+ }
+}
+
+
Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
Handle<String> subject,
int capture_count,
int32_t* match) {
DCHECK(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
- JSArray::EnsureSize(last_match_info,
- capture_register_count + kLastMatchOverhead);
+ EnsureSize(last_match_info, capture_register_count + kLastMatchOverhead);
DisallowHeapAllocation no_allocation;
FixedArray* array = FixedArray::cast(last_match_info->elements());
if (match != NULL) {
@@ -1110,7 +1097,10 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
node->set_on_work_list(false);
if (!node->label()->is_bound()) node->Emit(this, &new_trace);
}
- if (reg_exp_too_big_) return IrregexpRegExpTooBig(isolate_);
+ if (reg_exp_too_big_) {
+ macro_assembler_->AbortedCodeGeneration();
+ return IrregexpRegExpTooBig(isolate_);
+ }
Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
heap->IncreaseTotalRegexpCodeGenerated(code->Size());
@@ -1582,7 +1572,7 @@ void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
// Returns the number of characters in the equivalence class, omitting those
-// that cannot occur in the source string because it is ASCII.
+// that cannot occur in the source string because it is Latin1.
static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
bool one_byte_subject,
unibrow::uchar* letters) {
@@ -1594,15 +1584,18 @@ static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
letters[0] = character;
length = 1;
}
- if (!one_byte_subject || character <= String::kMaxOneByteCharCode) {
- return length;
+
+ if (one_byte_subject) {
+ int new_length = 0;
+ for (int i = 0; i < length; i++) {
+ if (letters[i] <= String::kMaxOneByteCharCode) {
+ letters[new_length++] = letters[i];
+ }
+ }
+ length = new_length;
}
- // The standard requires that non-ASCII characters cannot have ASCII
- // character codes in their equivalence class.
- // TODO(dcarney): issue 3550 this is not actually true for Latin1 anymore,
- // is it? For example, \u00C5 is equivalent to \u212B.
- return 0;
+ return length;
}
@@ -2281,14 +2274,12 @@ int ActionNode::EatsAtLeast(int still_to_find,
}
-void ActionNode::FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
+void ActionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
if (action_type_ == BEGIN_SUBMATCH) {
bm->SetRest(offset);
} else if (action_type_ != POSITIVE_SUBMATCH_SUCCESS) {
- on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
+ on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
@@ -2310,13 +2301,11 @@ int AssertionNode::EatsAtLeast(int still_to_find,
}
-void AssertionNode::FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
+void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
// Match the behaviour of EatsAtLeast on this node.
if (assertion_type() == AT_START && not_at_start) return;
- on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
+ on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@@ -2545,22 +2534,17 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
uc16 c = quarks[i];
- if (c > char_mask) {
- // If we expect a non-Latin1 character from an one-byte string,
- // there is no way we can match. Not even case-independent
- // matching can turn an Latin1 character into non-Latin1 or
- // vice versa.
- // TODO(dcarney): issue 3550. Verify that this works as expected.
- // For example, \u0178 is uppercase of \u00ff (y-umlaut).
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
if (compiler->ignore_case()) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int length = GetCaseIndependentLetters(isolate, c,
compiler->one_byte(), chars);
- DCHECK(length != 0); // Can only happen if c > char_mask (see above).
+ if (length == 0) {
+ // This can happen because all case variants are non-Latin1, but we
+ // know the input is Latin1.
+ details->set_cannot_match();
+ pos->determines_perfectly = false;
+ return;
+ }
if (length == 1) {
// This letter has no case equivalents, so it's nice and simple
// and the mask-compare will determine definitely whether we have
@@ -2591,6 +2575,11 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
// Don't ignore case. Nice simple case where the mask-compare will
// determine definitely whether we have a match at this character
// position.
+ if (c > char_mask) {
+ details->set_cannot_match();
+ pos->determines_perfectly = false;
+ return;
+ }
pos->mask = char_mask;
pos->value = c;
pos->determines_perfectly = true;
@@ -2945,16 +2934,14 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
-void LoopChoiceNode::FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
+void LoopChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
if (body_can_be_zero_length_ || budget <= 0) {
bm->SetRest(offset);
SaveBMInfo(bm, not_at_start, offset);
return;
}
- ChoiceNode::FillInBMInfo(offset, budget - 1, bm, not_at_start);
+ ChoiceNode::FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@@ -3046,6 +3033,7 @@ static void EmitHat(RegExpCompiler* compiler,
// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Isolate* isolate = assembler->isolate();
Trace::TriBool next_is_word_character = Trace::UNKNOWN;
bool not_at_start = (trace->at_start() == Trace::FALSE_VALUE);
BoyerMooreLookahead* lookahead = bm_info(not_at_start);
@@ -3057,7 +3045,7 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
- FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
+ FillInBMInfo(isolate, 0, kRecursionBudget, bm, not_at_start);
if (bm->at(0)->is_non_word())
next_is_word_character = Trace::FALSE_VALUE;
if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE_VALUE;
@@ -4072,6 +4060,7 @@ int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
DCHECK(trace->is_trivial());
RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ Isolate* isolate = macro_assembler->isolate();
// At this point we know that we are at a non-greedy loop that will eat
// any character one at a time. Any non-anchored regexp has such a
// loop prepended to it in order to find where it starts. We look for
@@ -4090,7 +4079,7 @@ int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
compiler,
zone());
GuardedAlternative alt0 = alternatives_->at(0);
- alt0.node()->FillInBMInfo(0, kRecursionBudget, bm, false);
+ alt0.node()->FillInBMInfo(isolate, 0, kRecursionBudget, bm, false);
}
}
if (bm != NULL) {
@@ -4837,10 +4826,247 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
}
+int CompareFirstChar(RegExpTree* const* a, RegExpTree* const* b) {
+ RegExpAtom* atom1 = (*a)->AsAtom();
+ RegExpAtom* atom2 = (*b)->AsAtom();
+ uc16 character1 = atom1->data().at(0);
+ uc16 character2 = atom2->data().at(0);
+ if (character1 < character2) return -1;
+ if (character1 > character2) return 1;
+ return 0;
+}
+
+
+static unibrow::uchar Canonical(
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
+ unibrow::uchar c) {
+ unibrow::uchar chars[unibrow::Ecma262Canonicalize::kMaxWidth];
+ int length = canonicalize->get(c, '\0', chars);
+ DCHECK_LE(length, 1);
+ unibrow::uchar canonical = c;
+ if (length == 1) canonical = chars[0];
+ return canonical;
+}
+
+
+int CompareFirstCharCaseIndependent(
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
+ RegExpTree* const* a, RegExpTree* const* b) {
+ RegExpAtom* atom1 = (*a)->AsAtom();
+ RegExpAtom* atom2 = (*b)->AsAtom();
+ unibrow::uchar character1 = atom1->data().at(0);
+ unibrow::uchar character2 = atom2->data().at(0);
+ if (character1 == character2) return 0;
+ if (character1 >= 'a' || character2 >= 'a') {
+ character1 = Canonical(canonicalize, character1);
+ character2 = Canonical(canonicalize, character2);
+ }
+ return static_cast<int>(character1) - static_cast<int>(character2);
+}
+
+
+// We can stable sort runs of atoms, since the order does not matter if they
+// start with different characters.
+// Returns true if any consecutive atoms were found.
+bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ int length = alternatives->length();
+ bool found_consecutive_atoms = false;
+ for (int i = 0; i < length; i++) {
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (alternative->IsAtom()) break;
+ i++;
+ }
+ // i is length or it is the index of an atom.
+ if (i == length) break;
+ int first_atom = i;
+ i++;
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) break;
+ i++;
+ }
+ // Sort atoms to get ones with common prefixes together.
+ // This step is more tricky if we are in a case-independent regexp,
+ // because it would change /is|I/ to /I|is/, and order matters when
+ // the regexp parts don't match only disjoint starting points. To fix
+ // this we have a version of CompareFirstChar that uses case-
+ // independent character classes for comparison.
+ DCHECK_LT(first_atom, alternatives->length());
+ DCHECK_LE(i, alternatives->length());
+ DCHECK_LE(first_atom, i);
+ if (compiler->ignore_case()) {
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ compiler->isolate()->regexp_macro_assembler_canonicalize();
+ auto compare_closure =
+ [canonicalize](RegExpTree* const* a, RegExpTree* const* b) {
+ return CompareFirstCharCaseIndependent(canonicalize, a, b);
+ };
+ alternatives->StableSort(compare_closure, first_atom, i - first_atom);
+ } else {
+ alternatives->StableSort(CompareFirstChar, first_atom, i - first_atom);
+ }
+ if (i - first_atom > 1) found_consecutive_atoms = true;
+ }
+ return found_consecutive_atoms;
+}
+
+
+// Optimizes ab|ac|az to a(?:b|c|d).
+void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
+ Zone* zone = compiler->zone();
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ int length = alternatives->length();
+
+ int write_posn = 0;
+ int i = 0;
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) {
+ alternatives->at(write_posn++) = alternatives->at(i);
+ i++;
+ continue;
+ }
+ RegExpAtom* atom = alternative->AsAtom();
+ unibrow::uchar common_prefix = atom->data().at(0);
+ int first_with_prefix = i;
+ int prefix_length = atom->length();
+ i++;
+ while (i < length) {
+ alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) break;
+ atom = alternative->AsAtom();
+ unibrow::uchar new_prefix = atom->data().at(0);
+ if (new_prefix != common_prefix) {
+ if (!compiler->ignore_case()) break;
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ compiler->isolate()->regexp_macro_assembler_canonicalize();
+ new_prefix = Canonical(canonicalize, new_prefix);
+ common_prefix = Canonical(canonicalize, common_prefix);
+ if (new_prefix != common_prefix) break;
+ }
+ prefix_length = Min(prefix_length, atom->length());
+ i++;
+ }
+ if (i > first_with_prefix + 2) {
+ // Found worthwhile run of alternatives with common prefix of at least one
+ // character. The sorting function above did not sort on more than one
+ // character for reasons of correctness, but there may still be a longer
+ // common prefix if the terms were similar or presorted in the input.
+ // Find out how long the common prefix is.
+ int run_length = i - first_with_prefix;
+ atom = alternatives->at(first_with_prefix)->AsAtom();
+ for (int j = 1; j < run_length && prefix_length > 1; j++) {
+ RegExpAtom* old_atom =
+ alternatives->at(j + first_with_prefix)->AsAtom();
+ for (int k = 1; k < prefix_length; k++) {
+ if (atom->data().at(k) != old_atom->data().at(k)) {
+ prefix_length = k;
+ break;
+ }
+ }
+ }
+ RegExpAtom* prefix =
+ new (zone) RegExpAtom(atom->data().SubVector(0, prefix_length));
+ ZoneList<RegExpTree*>* pair = new (zone) ZoneList<RegExpTree*>(2, zone);
+ pair->Add(prefix, zone);
+ ZoneList<RegExpTree*>* suffixes =
+ new (zone) ZoneList<RegExpTree*>(run_length, zone);
+ for (int j = 0; j < run_length; j++) {
+ RegExpAtom* old_atom =
+ alternatives->at(j + first_with_prefix)->AsAtom();
+ int len = old_atom->length();
+ if (len == prefix_length) {
+ suffixes->Add(new (zone) RegExpEmpty(), zone);
+ } else {
+ RegExpTree* suffix = new (zone) RegExpAtom(
+ old_atom->data().SubVector(prefix_length, old_atom->length()));
+ suffixes->Add(suffix, zone);
+ }
+ }
+ pair->Add(new (zone) RegExpDisjunction(suffixes), zone);
+ alternatives->at(write_posn++) = new (zone) RegExpAlternative(pair);
+ } else {
+ // Just copy any non-worthwhile alternatives.
+ for (int j = first_with_prefix; j < i; j++) {
+ alternatives->at(write_posn++) = alternatives->at(j);
+ }
+ }
+ }
+ alternatives->Rewind(write_posn); // Trim end of array.
+}
+
+
+// Optimizes b|c|z to [bcz].
+void RegExpDisjunction::FixSingleCharacterDisjunctions(
+ RegExpCompiler* compiler) {
+ Zone* zone = compiler->zone();
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ int length = alternatives->length();
+
+ int write_posn = 0;
+ int i = 0;
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) {
+ alternatives->at(write_posn++) = alternatives->at(i);
+ i++;
+ continue;
+ }
+ RegExpAtom* atom = alternative->AsAtom();
+ if (atom->length() != 1) {
+ alternatives->at(write_posn++) = alternatives->at(i);
+ i++;
+ continue;
+ }
+ int first_in_run = i;
+ i++;
+ while (i < length) {
+ alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) break;
+ atom = alternative->AsAtom();
+ if (atom->length() != 1) break;
+ i++;
+ }
+ if (i > first_in_run + 1) {
+ // Found non-trivial run of single-character alternatives.
+ int run_length = i - first_in_run;
+ ZoneList<CharacterRange>* ranges =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ for (int j = 0; j < run_length; j++) {
+ RegExpAtom* old_atom = alternatives->at(j + first_in_run)->AsAtom();
+ DCHECK_EQ(old_atom->length(), 1);
+ ranges->Add(CharacterRange::Singleton(old_atom->data().at(0)), zone);
+ }
+ alternatives->at(write_posn++) =
+ new (zone) RegExpCharacterClass(ranges, false);
+ } else {
+ // Just copy any trivial alternatives.
+ for (int j = first_in_run; j < i; j++) {
+ alternatives->at(write_posn++) = alternatives->at(j);
+ }
+ }
+ }
+ alternatives->Rewind(write_posn); // Trim end of array.
+}
+
+
RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
ZoneList<RegExpTree*>* alternatives = this->alternatives();
+
+ if (alternatives->length() > 2) {
+ bool found_consecutive_atoms = SortConsecutiveAtoms(compiler);
+ if (found_consecutive_atoms) RationalizeConsecutiveAtoms(compiler);
+ FixSingleCharacterDisjunctions(compiler);
+ if (alternatives->length() == 1) {
+ return alternatives->at(0)->ToNode(compiler, on_success);
+ }
+ }
+
int length = alternatives->length();
+
ChoiceNode* result =
new(compiler->zone()) ChoiceNode(length, compiler->zone());
for (int i = 0; i < length; i++) {
@@ -5825,8 +6051,7 @@ void Analysis::VisitAssertion(AssertionNode* that) {
}
-void BackReferenceNode::FillInBMInfo(int offset,
- int budget,
+void BackReferenceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Working out the set of characters that a backreference can match is too
@@ -5840,10 +6065,8 @@ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
RegExpMacroAssembler::kTableSize);
-void ChoiceNode::FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
+void ChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
ZoneList<GuardedAlternative>* alts = alternatives();
budget = (budget - 1) / alts->length();
for (int i = 0; i < alts->length(); i++) {
@@ -5853,16 +6076,14 @@ void ChoiceNode::FillInBMInfo(int offset,
SaveBMInfo(bm, not_at_start, offset);
return;
}
- alt.node()->FillInBMInfo(offset, budget, bm, not_at_start);
+ alt.node()->FillInBMInfo(isolate, offset, budget, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
-void TextNode::FillInBMInfo(int initial_offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
+void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
if (initial_offset >= bm->length()) return;
int offset = initial_offset;
int max_char = bm->max_char();
@@ -5883,9 +6104,7 @@ void TextNode::FillInBMInfo(int initial_offset,
if (bm->compiler()->ignore_case()) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int length = GetCaseIndependentLetters(
- Isolate::Current(),
- character,
- bm->max_char() == String::kMaxOneByteCharCode,
+ isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
chars);
for (int j = 0; j < length; j++) {
bm->Set(offset, chars[j]);
@@ -5915,9 +6134,7 @@ void TextNode::FillInBMInfo(int initial_offset,
if (initial_offset == 0) set_bm_info(not_at_start, bm);
return;
}
- on_success()->FillInBMInfo(offset,
- budget - 1,
- bm,
+ on_success()->FillInBMInfo(isolate, offset, budget - 1, bm,
true); // Not at start after a text node.
if (initial_offset == 0) set_bm_info(not_at_start, bm);
}
@@ -6190,4 +6407,5 @@ bool RegExpEngine::TooMuchRegExpCode(Handle<String> pattern) {
}
return too_much;
}
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 5bd670c239..ff7759bfec 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -212,7 +212,7 @@ class RegExpImpl {
// and the total executable memory at any point.
static const int kRegExpExecutableMemoryLimit = 16 * MB;
static const int kRegExpCompiledLimit = 1 * MB;
- static const int kRegExpTooLargeToOptimize = 10 * KB;
+ static const int kRegExpTooLargeToOptimize = 20 * KB;
private:
static bool CompileIrregexp(Handle<JSRegExp> re,
@@ -619,10 +619,8 @@ class RegExpNode: public ZoneObject {
// the number of nodes we are willing to look at in order to create this data.
static const int kRecursionBudget = 200;
bool KeepRecursing(RegExpCompiler* compiler);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
UNREACHABLE();
}
@@ -731,11 +729,9 @@ class SeqRegExpNode: public RegExpNode {
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- on_success_->FillInBMInfo(offset, budget - 1, bm, not_at_start);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
@@ -786,10 +782,8 @@ class ActionNode: public SeqRegExpNode {
return on_success()->GetQuickCheckDetails(
details, compiler, filled_in, not_at_start);
}
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start);
ActionType action_type() { return action_type_; }
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
@@ -855,10 +849,8 @@ class TextNode: public SeqRegExpNode {
virtual int GreedyLoopTextLength();
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start);
void CalculateOffsets();
virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
@@ -915,10 +907,8 @@ class AssertionNode: public SeqRegExpNode {
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start);
AssertionType assertion_type() { return assertion_type_; }
private:
@@ -954,10 +944,8 @@ class BackReferenceNode: public SeqRegExpNode {
bool not_at_start) {
return;
}
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start);
private:
int start_reg_;
@@ -982,10 +970,8 @@ class EndNode: public RegExpNode {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
@@ -1077,10 +1063,8 @@ class ChoiceNode: public RegExpNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start);
bool being_calculated() { return being_calculated_; }
bool not_at_start() { return not_at_start_; }
@@ -1146,12 +1130,10 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- alternatives_->at(1).node()->FillInBMInfo(
- offset, budget - 1, bm, not_at_start);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ alternatives_->at(1).node()->FillInBMInfo(isolate, offset, budget - 1, bm,
+ not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
// For a negative lookahead we don't emit the quick check for the
@@ -1182,10 +1164,8 @@ class LoopChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
- virtual void FillInBMInfo(int offset,
- int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start);
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index ba76704d5f..77671328b4 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -21,8 +21,8 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
return handle(LayoutDescriptor::FromSmi(Smi::FromInt(0)), isolate);
}
length = GetSlowModeBackingStoreLength(length);
- return Handle<LayoutDescriptor>::cast(
- isolate->factory()->NewFixedTypedArray(length, kExternalUint32Array));
+ return Handle<LayoutDescriptor>::cast(isolate->factory()->NewFixedTypedArray(
+ length, kExternalUint32Array, true));
}
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index 66a1f0fb07..25cece822a 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -284,5 +284,5 @@ bool LayoutDescriptor::IsConsistentWithMap(Map* map, bool check_tail) {
}
return true;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 1ac52f919f..b41c5852a8 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -41,19 +41,24 @@ DefaultPlatform::~DefaultPlatform() {
base::LockGuard<base::Mutex> guard(&lock_);
queue_.Terminate();
if (initialized_) {
- for (std::vector<WorkerThread*>::iterator i = thread_pool_.begin();
- i != thread_pool_.end(); ++i) {
+ for (auto i = thread_pool_.begin(); i != thread_pool_.end(); ++i) {
delete *i;
}
}
- for (std::map<v8::Isolate*, std::queue<Task*> >::iterator i =
- main_thread_queue_.begin();
- i != main_thread_queue_.end(); ++i) {
+ for (auto i = main_thread_queue_.begin(); i != main_thread_queue_.end();
+ ++i) {
while (!i->second.empty()) {
delete i->second.front();
i->second.pop();
}
}
+ for (auto i = main_thread_delayed_queue_.begin();
+ i != main_thread_delayed_queue_.end(); ++i) {
+ while (!i->second.empty()) {
+ delete i->second.top().second;
+ i->second.pop();
+ }
+ }
}
@@ -78,23 +83,56 @@ void DefaultPlatform::EnsureInitialized() {
}
+Task* DefaultPlatform::PopTaskInMainThreadQueue(v8::Isolate* isolate) {
+ auto it = main_thread_queue_.find(isolate);
+ if (it == main_thread_queue_.end() || it->second.empty()) {
+ return NULL;
+ }
+ Task* task = it->second.front();
+ it->second.pop();
+ return task;
+}
+
+
+Task* DefaultPlatform::PopTaskInMainThreadDelayedQueue(v8::Isolate* isolate) {
+ auto it = main_thread_delayed_queue_.find(isolate);
+ if (it == main_thread_delayed_queue_.end() || it->second.empty()) {
+ return NULL;
+ }
+ double now = MonotonicallyIncreasingTime();
+ std::pair<double, Task*> deadline_and_task = it->second.top();
+ if (deadline_and_task.first > now) {
+ return NULL;
+ }
+ it->second.pop();
+ return deadline_and_task.second;
+}
+
+
bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate) {
Task* task = NULL;
{
base::LockGuard<base::Mutex> guard(&lock_);
- std::map<v8::Isolate*, std::queue<Task*> >::iterator it =
- main_thread_queue_.find(isolate);
- if (it == main_thread_queue_.end() || it->second.empty()) {
+
+ // Move delayed tasks that hit their deadline to the main queue.
+ task = PopTaskInMainThreadDelayedQueue(isolate);
+ while (task != NULL) {
+ main_thread_queue_[isolate].push(task);
+ task = PopTaskInMainThreadDelayedQueue(isolate);
+ }
+
+ task = PopTaskInMainThreadQueue(isolate);
+
+ if (task == NULL) {
return false;
}
- task = it->second.front();
- it->second.pop();
}
task->Run();
delete task;
return true;
}
+
void DefaultPlatform::CallOnBackgroundThread(Task *task,
ExpectedRuntime expected_runtime) {
EnsureInitialized();
@@ -108,6 +146,15 @@ void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
}
+void DefaultPlatform::CallDelayedOnForegroundThread(Isolate* isolate,
+ Task* task,
+ double delay_in_seconds) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ double deadline = MonotonicallyIncreasingTime() + delay_in_seconds;
+ main_thread_delayed_queue_[isolate].push(std::make_pair(deadline, task));
+}
+
+
double DefaultPlatform::MonotonicallyIncreasingTime() {
return base::TimeTicks::HighResolutionNow().ToInternalValue() /
static_cast<double>(base::Time::kMicrosecondsPerSecond);
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 72b4d91aa8..fba5803f40 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -5,6 +5,7 @@
#ifndef V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
#define V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
+#include <functional>
#include <map>
#include <queue>
#include <vector>
@@ -37,11 +38,16 @@ class DefaultPlatform : public Platform {
Task* task, ExpectedRuntime expected_runtime) override;
virtual void CallOnForegroundThread(v8::Isolate* isolate,
Task* task) override;
+ virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
+ double delay_in_seconds) override;
double MonotonicallyIncreasingTime() override;
private:
static const int kMaxThreadPoolSize;
+ Task* PopTaskInMainThreadQueue(v8::Isolate* isolate);
+ Task* PopTaskInMainThreadDelayedQueue(v8::Isolate* isolate);
+
base::Mutex lock_;
bool initialized_;
int thread_pool_size_;
@@ -49,6 +55,12 @@ class DefaultPlatform : public Platform {
TaskQueue queue_;
std::map<v8::Isolate*, std::queue<Task*> > main_thread_queue_;
+ typedef std::pair<double, Task*> DelayedEntry;
+ std::map<v8::Isolate*,
+ std::priority_queue<DelayedEntry, std::vector<DelayedEntry>,
+ std::greater<DelayedEntry> > >
+ main_thread_delayed_queue_;
+
DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
};
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 9b122fdbae..98f0343fa5 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -193,12 +193,19 @@ int List<T, P>::CountOccurrences(const T& elm, int start, int end) const {
}
-template<typename T, class P>
-void List<T, P>::Sort(int (*cmp)(const T* x, const T* y)) {
- ToVector().Sort(cmp);
+template <typename T, class P>
+template <typename CompareFunction>
+void List<T, P>::Sort(CompareFunction cmp) {
+ Sort(cmp, 0, length_);
+}
+
+
+template <typename T, class P>
+template <typename CompareFunction>
+void List<T, P>::Sort(CompareFunction cmp, size_t s, size_t l) {
+ ToVector().Sort(cmp, s, l);
#ifdef DEBUG
- for (int i = 1; i < length_; i++)
- DCHECK(cmp(&data_[i - 1], &data_[i]) <= 0);
+ for (size_t i = s + 1; i < l; i++) DCHECK(cmp(&data_[i - 1], &data_[i]) <= 0);
#endif
}
@@ -209,7 +216,30 @@ void List<T, P>::Sort() {
}
-template<typename T, class P>
+template <typename T, class P>
+template <typename CompareFunction>
+void List<T, P>::StableSort(CompareFunction cmp) {
+ StableSort(cmp, 0, length_);
+}
+
+
+template <typename T, class P>
+template <typename CompareFunction>
+void List<T, P>::StableSort(CompareFunction cmp, size_t s, size_t l) {
+ ToVector().StableSort(cmp, s, l);
+#ifdef DEBUG
+ for (size_t i = s + 1; i < l; i++) DCHECK(cmp(&data_[i - 1], &data_[i]) <= 0);
+#endif
+}
+
+
+template <typename T, class P>
+void List<T, P>::StableSort() {
+ ToVector().StableSort();
+}
+
+
+template <typename T, class P>
void List<T, P>::Initialize(int capacity, P allocator) {
DCHECK(capacity >= 0);
data_ = (capacity > 0) ? NewData(capacity, allocator) : NULL;
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 021cafe146..b636449c42 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -149,8 +149,16 @@ class List {
void Iterate(Visitor* visitor);
// Sort all list entries (using QuickSort)
- void Sort(int (*cmp)(const T* x, const T* y));
+ template <typename CompareFunction>
+ void Sort(CompareFunction cmp, size_t start, size_t length);
+ template <typename CompareFunction>
+ void Sort(CompareFunction cmp);
void Sort();
+ template <typename CompareFunction>
+ void StableSort(CompareFunction cmp, size_t start, size_t length);
+ template <typename CompareFunction>
+ void StableSort(CompareFunction cmp);
+ void StableSort();
INLINE(void Initialize(int capacity,
AllocationPolicy allocator = AllocationPolicy()));
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 36c8c7d90a..79d6cfe5f0 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -2190,4 +2190,5 @@ LAllocatorPhase::~LAllocatorPhase() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
index 89df2ce845..24c1301dc5 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/lithium-codegen.cc
@@ -45,8 +45,7 @@ HGraph* LCodeGenBase::graph() const {
}
-LCodeGenBase::LCodeGenBase(LChunk* chunk,
- MacroAssembler* assembler,
+LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler,
CompilationInfo* info)
: chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
@@ -56,8 +55,8 @@ LCodeGenBase::LCodeGenBase(LChunk* chunk,
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- last_lazy_deopt_pc_(0) {
-}
+ deoptimization_literals_(8, info->zone()),
+ last_lazy_deopt_pc_(0) {}
bool LCodeGenBase::GenerateBody() {
@@ -190,6 +189,99 @@ void LCodeGenBase::AddStabilityDependency(Handle<Map> map) {
}
+int LCodeGenBase::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
+ Translation* translation) {
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ switch (environment->frame_type()) {
+ case JS_FUNCTION: {
+ int shared_id = DefineDeoptimizationLiteral(
+ environment->entry() ? environment->entry()->shared()
+ : info()->shared_info());
+ translation->BeginJSFrame(environment->ast_id(), shared_id, height);
+ if (info()->closure().is_identical_to(environment->closure())) {
+ translation->StoreJSFrameFunction();
+ } else {
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->StoreLiteral(closure_id);
+ }
+ break;
+ }
+ case JS_CONSTRUCT: {
+ int shared_id = DefineDeoptimizationLiteral(
+ environment->entry() ? environment->entry()->shared()
+ : info()->shared_info());
+ translation->BeginConstructStubFrame(shared_id, translation_size);
+ if (info()->closure().is_identical_to(environment->closure())) {
+ translation->StoreJSFrameFunction();
+ } else {
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->StoreLiteral(closure_id);
+ }
+ break;
+ }
+ case JS_GETTER: {
+ DCHECK(translation_size == 1);
+ DCHECK(height == 0);
+ int shared_id = DefineDeoptimizationLiteral(
+ environment->entry() ? environment->entry()->shared()
+ : info()->shared_info());
+ translation->BeginGetterStubFrame(shared_id);
+ if (info()->closure().is_identical_to(environment->closure())) {
+ translation->StoreJSFrameFunction();
+ } else {
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->StoreLiteral(closure_id);
+ }
+ break;
+ }
+ case JS_SETTER: {
+ DCHECK(translation_size == 2);
+ DCHECK(height == 0);
+ int shared_id = DefineDeoptimizationLiteral(
+ environment->entry() ? environment->entry()->shared()
+ : info()->shared_info());
+ translation->BeginSetterStubFrame(shared_id);
+ if (info()->closure().is_identical_to(environment->closure())) {
+ translation->StoreJSFrameFunction();
+ } else {
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->StoreLiteral(closure_id);
+ }
+ break;
+ }
+ case ARGUMENTS_ADAPTOR: {
+ int shared_id = DefineDeoptimizationLiteral(
+ environment->entry() ? environment->entry()->shared()
+ : info()->shared_info());
+ translation->BeginArgumentsAdaptorFrame(shared_id, translation_size);
+ if (info()->closure().is_identical_to(environment->closure())) {
+ translation->StoreJSFrameFunction();
+ } else {
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->StoreLiteral(closure_id);
+ }
+ break;
+ }
+ case STUB:
+ translation->BeginCompiledStubFrame(translation_size);
+ break;
+ }
+}
+
+
Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
LInstruction* instr, Deoptimizer::DeoptReason deopt_reason) {
Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
@@ -198,4 +290,5 @@ Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
deopt_info.inlining_id = enter_inlined ? enter_inlined->inlining_id() : 0;
return deopt_info;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
index 80afbaf235..fddc1b2599 100644
--- a/deps/v8/src/lithium-codegen.h
+++ b/deps/v8/src/lithium-codegen.h
@@ -14,6 +14,7 @@
namespace v8 {
namespace internal {
+class LEnvironment;
class LInstruction;
class LPlatformChunk;
@@ -50,6 +51,10 @@ class LCodeGenBase BASE_EMBEDDED {
void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
+ void WriteTranslationFrame(LEnvironment* environment,
+ Translation* translation);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+
// Check that an environment assigned via AssignEnvironment is actually being
// used. Redundant assignments keep things alive longer than necessary, and
// consequently lead to worse code, so it's important to minimize this.
@@ -71,6 +76,7 @@ class LCodeGenBase BASE_EMBEDDED {
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
int last_lazy_deopt_pc_;
bool is_unused() const { return status_ == UNUSED; }
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 53ed919f2f..a9d7748ef3 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -272,9 +272,9 @@ LChunk::LChunk(CompilationInfo* info, HGraph* graph)
graph_(graph),
instructions_(32, info->zone()),
pointer_maps_(8, info->zone()),
- inlined_closures_(1, info->zone()),
- deprecation_dependencies_(MapLess(), MapAllocator(info->zone())),
- stability_dependencies_(MapLess(), MapAllocator(info->zone())) {}
+ inlined_functions_(1, info->zone()),
+ deprecation_dependencies_(32, info->zone()),
+ stability_dependencies_(8, info->zone()) {}
LLabel* LChunk::GetLabel(int block_id) const {
@@ -456,10 +456,6 @@ void LChunk::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const {
for (int i = 0; i < objects.length(); i++) {
AddWeakObjectToCodeDependency(isolate(), objects.at(i), code);
}
- if (FLAG_enable_ool_constant_pool) {
- code->constant_pool()->set_weak_object_state(
- ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE);
- }
code->set_can_have_weak_objects(true);
}
@@ -468,17 +464,13 @@ void LChunk::CommitDependencies(Handle<Code> code) const {
if (!code->is_optimized_code()) return;
HandleScope scope(isolate());
- for (MapSet::const_iterator it = deprecation_dependencies_.begin(),
- iend = deprecation_dependencies_.end(); it != iend; ++it) {
- Handle<Map> map = *it;
+ for (Handle<Map> map : deprecation_dependencies_) {
DCHECK(!map->is_deprecated());
DCHECK(map->CanBeDeprecated());
Map::AddDependentCode(map, DependentCode::kTransitionGroup, code);
}
- for (MapSet::const_iterator it = stability_dependencies_.begin(),
- iend = stability_dependencies_.end(); it != iend; ++it) {
- Handle<Map> map = *it;
+ for (Handle<Map> map : stability_dependencies_) {
DCHECK(map->is_stable());
DCHECK(map->CanTransition());
Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code);
@@ -726,4 +718,5 @@ LPhase::~LPhase() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index f5868823d1..046de19fd0 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -653,26 +653,26 @@ class LChunk : public ZoneObject {
int LookupDestination(int block_id) const;
Label* GetAssemblyLabel(int block_id) const;
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
+ const ZoneList<Handle<SharedFunctionInfo>>& inlined_functions() const {
+ return inlined_functions_;
}
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure, zone());
+ void AddInlinedFunction(Handle<SharedFunctionInfo> closure) {
+ inlined_functions_.Add(closure, zone());
}
void AddDeprecationDependency(Handle<Map> map) {
DCHECK(!map->is_deprecated());
if (!map->CanBeDeprecated()) return;
DCHECK(!info_->IsStub());
- deprecation_dependencies_.insert(map);
+ deprecation_dependencies_.Add(map, zone());
}
void AddStabilityDependency(Handle<Map> map) {
DCHECK(map->is_stable());
if (!map->CanTransition()) return;
DCHECK(!info_->IsStub());
- stability_dependencies_.insert(map);
+ stability_dependencies_.Add(map, zone());
}
Zone* zone() const { return info_->zone(); }
@@ -690,10 +690,6 @@ class LChunk : public ZoneObject {
int spill_slot_count_;
private:
- typedef std::less<Handle<Map> > MapLess;
- typedef zone_allocator<Handle<Map> > MapAllocator;
- typedef std::set<Handle<Map>, MapLess, MapAllocator> MapSet;
-
void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const;
void CommitDependencies(Handle<Code> code) const;
@@ -702,9 +698,9 @@ class LChunk : public ZoneObject {
BitVector* allocated_double_registers_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
- MapSet deprecation_dependencies_;
- MapSet stability_dependencies_;
+ ZoneList<Handle<SharedFunctionInfo>> inlined_functions_;
+ ZoneList<Handle<Map>> deprecation_dependencies_;
+ ZoneList<Handle<Map>> stability_dependencies_;
};
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index c03d8d3e93..150dcb8892 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -28,7 +28,7 @@ void SetElementSloppy(Handle<JSObject> object,
// Ignore return value from SetElement. It can only be a failure if there
// are element setters causing exceptions and the debugger context has none
// of these.
- JSObject::SetElement(object, index, value, NONE, SLOPPY).Assert();
+ JSObject::SetElement(object, index, value, SLOPPY).Assert();
}
@@ -764,7 +764,10 @@ class FunctionInfoListener {
ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone);
ZoneList<Variable*> context_list(
current_scope->ContextLocalCount(), zone);
- current_scope->CollectStackAndContextLocals(&stack_list, &context_list);
+ ZoneList<Variable*> globals_list(current_scope->ContextGlobalCount(),
+ zone);
+ current_scope->CollectStackAndContextLocals(&stack_list, &context_list,
+ &globals_list);
context_list.Sort(&Variable::CompareIndex);
for (int i = 0; i < context_list.length(); i++) {
@@ -844,7 +847,7 @@ MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
{
// Creating verbose TryCatch from public API is currently the only way to
// force code save location. We do not use this the object directly.
- v8::TryCatch try_catch;
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
try_catch.SetVerbose(true);
// A logical 'try' section.
@@ -1089,37 +1092,36 @@ class LiteralFixer {
};
+namespace {
+
// Check whether the code is natural function code (not a lazy-compile stub
// code).
-static bool IsJSFunctionCode(Code* code) {
- return code->kind() == Code::FUNCTION;
-}
+bool IsJSFunctionCode(Code* code) { return code->kind() == Code::FUNCTION; }
// Returns true if an instance of candidate were inlined into function's code.
-static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
+bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
DisallowHeapAllocation no_gc;
if (function->code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
- DeoptimizationInputData* data =
+ DeoptimizationInputData* const data =
DeoptimizationInputData::cast(function->code()->deoptimization_data());
-
- if (data == function->GetIsolate()->heap()->empty_fixed_array()) {
- return false;
- }
-
- FixedArray* literals = data->LiteralArray();
-
- int inlined_count = data->InlinedFunctionCount()->value();
- for (int i = 0; i < inlined_count; ++i) {
- JSFunction* inlined = JSFunction::cast(literals->get(i));
- if (inlined->shared() == candidate) return true;
+ if (data != function->GetIsolate()->heap()->empty_fixed_array()) {
+ FixedArray* const literals = data->LiteralArray();
+ int const inlined_count = data->InlinedFunctionCount()->value();
+ for (int i = 0; i < inlined_count; ++i) {
+ if (SharedFunctionInfo::cast(literals->get(i)) == candidate) {
+ return true;
+ }
+ }
}
return false;
}
+} // namespace
+
// Marks code that shares the same shared function info or has inlined
// code that shares the same function info.
@@ -1221,7 +1223,7 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
Handle<SharedFunctionInfo> shared_info =
UnwrapSharedFunctionInfoFromJSValue(function_wrapper);
CHECK(script_handle->IsScript() || script_handle->IsUndefined());
- shared_info->set_script(*script_handle);
+ SharedFunctionInfo::SetScript(shared_info, script_handle);
shared_info->DisableOptimization(kLiveEdit);
function_wrapper->GetIsolate()->compilation_cache()->Remove(shared_info);
@@ -2061,4 +2063,5 @@ bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
return isolate->active_function_info_listener() != NULL;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h
index 65fe1a61c7..495fcce64d 100644
--- a/deps/v8/src/liveedit.h
+++ b/deps/v8/src/liveedit.h
@@ -249,7 +249,7 @@ class JSArrayBasedStruct {
protected:
void SetField(int field_position, Handle<Object> value) {
- JSObject::SetElement(array_, field_position, value, NONE, SLOPPY).Assert();
+ JSObject::SetElement(array_, field_position, value, SLOPPY).Assert();
}
void SetSmiValueField(int field_position, int value) {
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 278cff1fc8..835ed8e21e 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -240,4 +240,5 @@ void Log::MessageBuilder::WriteToLogFile() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 3856f6062f..afc3521c60 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -24,7 +24,7 @@ class Log {
static bool InitLogAtStart() {
return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp ||
- FLAG_ll_prof || FLAG_perf_basic_prof || FLAG_perf_jit_prof ||
+ FLAG_ll_prof || FLAG_perf_basic_prof ||
FLAG_log_internal_timer_events || FLAG_prof_cpp;
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 04a154450f..8f47e81f0e 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -19,7 +19,6 @@
#include "src/log-inl.h"
#include "src/log-utils.h"
#include "src/macro-assembler.h"
-#include "src/perf-jit.h"
#include "src/runtime-profiler.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
@@ -48,12 +47,14 @@ for (int i = 0; i < listeners_.length(); ++i) { \
} \
} while (false);
-// ComputeMarker must only be used when SharedFunctionInfo is known.
-static const char* ComputeMarker(Code* code) {
+static const char* ComputeMarker(SharedFunctionInfo* shared, Code* code) {
switch (code->kind()) {
- case Code::FUNCTION: return code->optimizable() ? "~" : "";
- case Code::OPTIMIZED_FUNCTION: return "*";
- default: return "";
+ case Code::FUNCTION:
+ return shared->optimization_disabled() ? "" : "~";
+ case Code::OPTIMIZED_FUNCTION:
+ return "*";
+ default:
+ return "";
}
}
@@ -183,7 +184,7 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
CompilationInfo* info,
Name* name) {
name_buffer_->Init(tag);
- name_buffer_->AppendBytes(ComputeMarker(code));
+ name_buffer_->AppendBytes(ComputeMarker(shared, code));
name_buffer_->AppendName(name);
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
@@ -195,7 +196,7 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
CompilationInfo* info,
Name* source, int line, int column) {
name_buffer_->Init(tag);
- name_buffer_->AppendBytes(ComputeMarker(code));
+ name_buffer_->AppendBytes(ComputeMarker(shared, code));
name_buffer_->AppendString(shared->DebugName());
name_buffer_->AppendByte(' ');
if (source->IsString()) {
@@ -776,7 +777,6 @@ Logger::Logger(Isolate* isolate)
is_logging_(false),
log_(new Log(this)),
perf_basic_logger_(NULL),
- perf_jit_logger_(NULL),
ll_logger_(NULL),
jit_logger_(NULL),
listeners_(5),
@@ -953,57 +953,58 @@ TIMER_EVENTS_LIST(V)
#undef V
-void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
+namespace {
+// Emits the source code of a regexp. Used by regexp events.
+void LogRegExpSource(Handle<JSRegExp> regexp, Isolate* isolate,
+ Log::MessageBuilder* msg) {
// Prints "/" + re.source + "/" +
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
- Log::MessageBuilder msg(log_);
- Handle<Object> source = Object::GetProperty(
- isolate_, regexp, "source").ToHandleChecked();
+ Handle<Object> source =
+ Object::GetProperty(isolate, regexp, "source").ToHandleChecked();
if (!source->IsString()) {
- msg.Append("no source");
+ msg->Append("no source");
return;
}
switch (regexp->TypeTag()) {
case JSRegExp::ATOM:
- msg.Append('a');
+ msg->Append('a');
break;
default:
break;
}
- msg.Append('/');
- msg.AppendDetailed(*Handle<String>::cast(source), false);
- msg.Append('/');
+ msg->Append('/');
+ msg->AppendDetailed(*Handle<String>::cast(source), false);
+ msg->Append('/');
// global flag
- Handle<Object> global = Object::GetProperty(
- isolate_, regexp, "global").ToHandleChecked();
+ Handle<Object> global =
+ Object::GetProperty(isolate, regexp, "global").ToHandleChecked();
if (global->IsTrue()) {
- msg.Append('g');
+ msg->Append('g');
}
// ignorecase flag
- Handle<Object> ignorecase = Object::GetProperty(
- isolate_, regexp, "ignoreCase").ToHandleChecked();
+ Handle<Object> ignorecase =
+ Object::GetProperty(isolate, regexp, "ignoreCase").ToHandleChecked();
if (ignorecase->IsTrue()) {
- msg.Append('i');
+ msg->Append('i');
}
// multiline flag
- Handle<Object> multiline = Object::GetProperty(
- isolate_, regexp, "multiline").ToHandleChecked();
+ Handle<Object> multiline =
+ Object::GetProperty(isolate, regexp, "multiline").ToHandleChecked();
if (multiline->IsTrue()) {
- msg.Append('m');
+ msg->Append('m');
}
-
- msg.WriteToLogFile();
}
+} // namespace
void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
if (!log_->IsEnabled() || !FLAG_log_regexp) return;
Log::MessageBuilder msg(log_);
msg.Append("regexp-compile,");
- LogRegExpSource(regexp);
+ LogRegExpSource(regexp, isolate_, &msg);
msg.Append(in_cache ? ",hit" : ",miss");
msg.WriteToLogFile();
}
@@ -1079,16 +1080,6 @@ void Logger::DeleteEvent(const char* name, void* object) {
}
-void Logger::NewEventStatic(const char* name, void* object, size_t size) {
- Isolate::Current()->logger()->NewEvent(name, object, size);
-}
-
-
-void Logger::DeleteEventStatic(const char* name, void* object) {
- Isolate::Current()->logger()->DeleteEvent(name, object);
-}
-
-
void Logger::CallbackEventInternal(const char* prefix, Name* name,
Address entry_point) {
if (!FLAG_log_code || !log_->IsEnabled()) return;
@@ -1209,7 +1200,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
msg.Append(',');
msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(code));
+ msg.Append(",%s", ComputeMarker(shared, code));
msg.WriteToLogFile();
}
@@ -1243,7 +1234,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
msg.Append(":%d:%d\",", line, column);
msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(code));
+ msg.Append(",%s", ComputeMarker(shared, code));
msg.WriteToLogFile();
}
@@ -1827,11 +1818,6 @@ bool Logger::SetUp(Isolate* isolate) {
addCodeEventListener(perf_basic_logger_);
}
- if (FLAG_perf_jit_prof) {
- perf_jit_logger_ = new PerfJitLogger();
- addCodeEventListener(perf_jit_logger_);
- }
-
if (FLAG_ll_prof) {
ll_logger_ = new LowLevelLogger(log_file_name.str().c_str());
addCodeEventListener(ll_logger_);
@@ -1900,12 +1886,6 @@ FILE* Logger::TearDown() {
perf_basic_logger_ = NULL;
}
- if (perf_jit_logger_) {
- removeCodeEventListener(perf_jit_logger_);
- delete perf_jit_logger_;
- perf_jit_logger_ = NULL;
- }
-
if (ll_logger_) {
removeCodeEventListener(ll_logger_);
delete ll_logger_;
@@ -1921,4 +1901,5 @@ FILE* Logger::TearDown() {
return log_->Close();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index c0559e7895..33c1b29d96 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -146,7 +146,6 @@ struct TickSample;
class JitLogger;
class PerfBasicLogger;
class LowLevelLogger;
-class PerfJitLogger;
class Sampler;
class Logger {
@@ -188,11 +187,6 @@ class Logger {
void NewEvent(const char* name, void* object, size_t size);
void DeleteEvent(const char* name, void* object);
- // Static versions of the above, operate on current isolate's logger.
- // Used in TRACK_MEMORY(TypeName) defined in globals.h
- static void NewEventStatic(const char* name, void* object, size_t size);
- static void DeleteEventStatic(const char* name, void* object);
-
// Emits an event with a tag, and some resource usage information.
// -> (name, tag, <rusage information>).
// Currently, the resource usage information is a process time stamp
@@ -360,9 +354,6 @@ class Logger {
// Internal configurable move event.
void MoveEventInternal(LogEventsAndTags event, Address from, Address to);
- // Emits the source code of a regexp. Used by regexp events.
- void LogRegExpSource(Handle<JSRegExp> regexp);
-
// Used for logging stubs found in the snapshot.
void LogCodeObject(Object* code_object);
@@ -406,7 +397,6 @@ class Logger {
bool is_logging_;
Log* log_;
PerfBasicLogger* perf_basic_logger_;
- PerfJitLogger* perf_jit_logger_;
LowLevelLogger* ll_logger_;
JitLogger* jit_logger_;
List<CodeEventListener*> listeners_;
diff --git a/deps/v8/src/lookup-inl.h b/deps/v8/src/lookup-inl.h
index a801e3493e..3df2194d3f 100644
--- a/deps/v8/src/lookup-inl.h
+++ b/deps/v8/src/lookup-inl.h
@@ -7,13 +7,15 @@
#include "src/lookup.h"
+#include "src/elements.h"
+
namespace v8 {
namespace internal {
JSReceiver* LookupIterator::NextHolder(Map* map) {
DisallowHeapAllocation no_gc;
- if (map->prototype()->IsNull()) return NULL;
+ if (!map->prototype()->IsJSReceiver()) return NULL;
JSReceiver* next = JSReceiver::cast(map->prototype());
DCHECK(!next->map()->IsGlobalObjectMap() ||
@@ -42,36 +44,62 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
case NOT_FOUND:
if (map->IsJSProxyMap()) return JSPROXY;
if (map->is_access_check_needed() &&
- !isolate_->IsInternallyUsedPropertyName(name_)) {
+ (IsElement() || !isolate_->IsInternallyUsedPropertyName(name_))) {
return ACCESS_CHECK;
}
// Fall through.
case ACCESS_CHECK:
- if (exotic_index_state_ != ExoticIndexState::kNoIndex &&
+ if (exotic_index_state_ != ExoticIndexState::kNotExotic &&
IsIntegerIndexedExotic(holder)) {
return INTEGER_INDEXED_EXOTIC;
}
- if (check_interceptor() && map->has_named_interceptor() &&
+ if (check_interceptor() && HasInterceptor(map) &&
!SkipInterceptor(JSObject::cast(holder))) {
return INTERCEPTOR;
}
// Fall through.
case INTERCEPTOR:
- if (map->is_dictionary_map()) {
- NameDictionary* dict = JSObject::cast(holder)->property_dictionary();
- number_ = dict->FindEntry(name_);
- if (number_ == NameDictionary::kNotFound) return NOT_FOUND;
- if (holder->IsGlobalObject()) {
- DCHECK(dict->ValueAt(number_)->IsPropertyCell());
- PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
- if (cell->value()->IsTheHole()) return NOT_FOUND;
+ if (IsElement()) {
+ // TODO(verwaest): Optimize.
+ if (holder->IsStringObjectWithCharacterAt(index_)) {
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ property_details_ = PropertyDetails(attributes, v8::internal::DATA, 0,
+ PropertyCellType::kNoCell);
+ } else {
+ JSObject* js_object = JSObject::cast(holder);
+ if (js_object->elements() == isolate()->heap()->empty_fixed_array()) {
+ return NOT_FOUND;
+ }
+
+ ElementsAccessor* accessor = js_object->GetElementsAccessor();
+ FixedArrayBase* backing_store = js_object->elements();
+ number_ =
+ accessor->GetEntryForIndex(js_object, backing_store, index_);
+ if (number_ == kMaxUInt32) return NOT_FOUND;
+ property_details_ = accessor->GetDetails(backing_store, number_);
}
- property_details_ = dict->DetailsAt(number_);
- } else {
+ } else if (!map->is_dictionary_map()) {
DescriptorArray* descriptors = map->instance_descriptors();
- number_ = descriptors->SearchWithCache(*name_, map);
- if (number_ == DescriptorArray::kNotFound) return NOT_FOUND;
+ int number = descriptors->SearchWithCache(*name_, map);
+ if (number == DescriptorArray::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
property_details_ = descriptors->GetDetails(number_);
+ } else if (map->IsGlobalObjectMap()) {
+ GlobalDictionary* dict = JSObject::cast(holder)->global_dictionary();
+ int number = dict->FindEntry(name_);
+ if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
+ DCHECK(dict->ValueAt(number_)->IsPropertyCell());
+ PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
+ if (cell->value()->IsTheHole()) return NOT_FOUND;
+ property_details_ = cell->property_details();
+ } else {
+ NameDictionary* dict = JSObject::cast(holder)->property_dictionary();
+ int number = dict->FindEntry(name_);
+ if (number == NameDictionary::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = dict->DetailsAt(number_);
}
has_property_ = true;
switch (property_details_.kind()) {
@@ -97,7 +125,7 @@ LookupIterator::State LookupIterator::LookupNonMaskingInterceptorInHolder(
Map* const map, JSReceiver* const holder) {
switch (state_) {
case NOT_FOUND:
- if (check_interceptor() && map->has_named_interceptor() &&
+ if (check_interceptor() && HasInterceptor(map) &&
!SkipInterceptor(JSObject::cast(holder))) {
return INTERCEPTOR;
}
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 809fd0eb2e..81793d344e 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -87,11 +87,6 @@ Handle<JSObject> LookupIterator::GetStoreTarget() const {
}
-bool LookupIterator::IsBootstrapping() const {
- return isolate_->bootstrapper()->IsActive();
-}
-
-
bool LookupIterator::HasAccess() const {
DCHECK_EQ(ACCESS_CHECK, state_);
return isolate_->MayAccess(GetHolder<JSObject>());
@@ -106,13 +101,57 @@ void LookupIterator::ReloadPropertyInformation() {
}
+void LookupIterator::ReloadHolderMap() {
+ DCHECK_EQ(DATA, state_);
+ DCHECK(IsElement());
+ DCHECK(JSObject::cast(*holder_)->HasExternalArrayElements() ||
+ JSObject::cast(*holder_)->HasFixedTypedArrayElements());
+ if (*holder_map_ != holder_->map()) {
+ holder_map_ = handle(holder_->map(), isolate_);
+ }
+}
+
+
void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
DCHECK(state_ == DATA || state_ == ACCESSOR);
DCHECK(HolderIsReceiverOrHiddenPrototype());
- if (holder_map_->is_dictionary_map()) return;
- holder_map_ =
- Map::PrepareForDataProperty(holder_map_, descriptor_number(), value);
- JSObject::MigrateToMap(GetHolder<JSObject>(), holder_map_);
+
+ Handle<JSObject> holder = GetHolder<JSObject>();
+
+ if (IsElement()) {
+ ElementsKind old_kind = holder_map_->elements_kind();
+ holder_map_ = Map::PrepareForDataElement(holder_map_, value);
+ ElementsKind new_kind = holder_map_->elements_kind();
+ if (new_kind != old_kind) {
+ // TODO(verwaest): Handle element migration in MigrateToMap.
+ JSObject::UpdateAllocationSite(holder, new_kind);
+ if (IsFastDoubleElementsKind(old_kind) !=
+ IsFastDoubleElementsKind(new_kind)) {
+ uint32_t capacity = holder->elements()->length();
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(new_kind);
+ accessor->GrowCapacityAndConvert(holder, capacity);
+ // GrowCapacityAndConvert migrated the object. No reloading of property
+ // infomation is necessary for elements.
+ return;
+ } else if (FLAG_trace_elements_transitions) {
+ Handle<FixedArrayBase> elements(holder->elements());
+ JSObject::PrintElementsTransition(stdout, holder, old_kind, elements,
+ new_kind, elements);
+ }
+ }
+
+ // Copy the backing store if it is copy-on-write.
+ if (IsFastSmiOrObjectElementsKind(new_kind)) {
+ JSObject::EnsureWritableFastElements(holder);
+ }
+
+ } else {
+ if (holder_map_->is_dictionary_map()) return;
+ holder_map_ =
+ Map::PrepareForDataProperty(holder_map_, descriptor_number(), value);
+ }
+
+ JSObject::MigrateToMap(holder, holder_map_);
ReloadPropertyInformation();
}
@@ -122,7 +161,14 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
DCHECK(state_ == DATA || state_ == ACCESSOR);
DCHECK(HolderIsReceiverOrHiddenPrototype());
Handle<JSObject> holder = GetHolder<JSObject>();
- if (holder_map_->is_dictionary_map()) {
+ if (IsElement()) {
+ DCHECK(!holder->HasExternalArrayElements());
+ DCHECK(!holder->HasFixedTypedArrayElements());
+ DCHECK(attributes != NONE || !holder->HasFastElements());
+ Handle<FixedArrayBase> elements(holder->elements());
+ holder->GetElementsAccessor()->Reconfigure(holder, elements, number_, value,
+ attributes);
+ } else if (holder_map_->is_dictionary_map()) {
PropertyDetails details(attributes, v8::internal::DATA, 0,
PropertyCellType::kMutable);
JSObject::SetNormalizedProperty(holder, name(), value, details);
@@ -142,8 +188,10 @@ void LookupIterator::PrepareTransitionToDataProperty(
Handle<Object> value, PropertyAttributes attributes,
Object::StoreFromKeyed store_mode) {
if (state_ == TRANSITION) return;
- DCHECK_NE(LookupIterator::ACCESSOR, state_);
- DCHECK_NE(LookupIterator::INTEGER_INDEXED_EXOTIC, state_);
+ DCHECK(state_ != LookupIterator::ACCESSOR ||
+ (GetAccessors()->IsAccessorInfo() &&
+ AccessorInfo::cast(*GetAccessors())->is_special_data_property()));
+ DCHECK_NE(INTEGER_INDEXED_EXOTIC, state_);
DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
// Can only be called when the receiver is a JSObject. JSProxy has to be
// handled via a trap. Adding properties to primitive values is not
@@ -167,7 +215,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
Handle<GlobalObject>::cast(receiver), name());
DCHECK(cell->value()->IsTheHole());
transition_ = cell;
- } else if (transition->GetBackPointer()->IsMap()) {
+ } else if (!transition->is_dictionary_map()) {
property_details_ = transition->GetLastDescriptorDetails();
has_property_ = true;
}
@@ -186,6 +234,28 @@ void LookupIterator::ApplyTransitionToDataProperty() {
}
+void LookupIterator::Delete() {
+ Handle<JSObject> holder = Handle<JSObject>::cast(holder_);
+ if (IsElement()) {
+ ElementsAccessor* accessor = holder->GetElementsAccessor();
+ accessor->Delete(holder, number_);
+ } else {
+ PropertyNormalizationMode mode = holder->map()->is_prototype_map()
+ ? KEEP_INOBJECT_PROPERTIES
+ : CLEAR_INOBJECT_PROPERTIES;
+
+ if (holder->HasFastProperties()) {
+ JSObject::NormalizeProperties(holder, mode, 0, "DeletingProperty");
+ holder_map_ = handle(holder->map(), isolate_);
+ ReloadPropertyInformation();
+ }
+ // TODO(verwaest): Get rid of the name_ argument.
+ JSObject::DeleteNormalizedProperty(holder, name_, number_);
+ JSObject::ReoptimizeIfPrototype(holder);
+ }
+}
+
+
void LookupIterator::TransitionToAccessorProperty(
AccessorComponent component, Handle<Object> accessor,
PropertyAttributes attributes) {
@@ -232,6 +302,10 @@ void LookupIterator::TransitionToAccessorProperty(
bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY);
+ return InternalHolderIsReceiverOrHiddenPrototype();
+}
+
+bool LookupIterator::InternalHolderIsReceiverOrHiddenPrototype() const {
// Optimization that only works if configuration_ is not mutable.
if (!check_prototype_chain()) return true;
DisallowHeapAllocation no_gc;
@@ -256,12 +330,23 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
Handle<Object> LookupIterator::FetchValue() const {
Object* result = NULL;
Handle<JSObject> holder = GetHolder<JSObject>();
- if (holder_map_->is_dictionary_map()) {
- result = holder->property_dictionary()->ValueAt(number_);
- if (holder_map_->IsGlobalObjectMap()) {
- DCHECK(result->IsPropertyCell());
- result = PropertyCell::cast(result)->value();
+ if (IsElement()) {
+ // TODO(verwaest): Optimize.
+ if (holder->IsStringObjectWithCharacterAt(index_)) {
+ Handle<JSValue> js_value = Handle<JSValue>::cast(holder);
+ Handle<String> string(String::cast(js_value->value()));
+ return factory()->LookupSingleCharacterStringFromCode(
+ String::Flatten(string)->Get(index_));
}
+
+ ElementsAccessor* accessor = holder->GetElementsAccessor();
+ return accessor->Get(holder, index_);
+ } else if (holder_map_->IsGlobalObjectMap()) {
+ result = holder->global_dictionary()->ValueAt(number_);
+ DCHECK(result->IsPropertyCell());
+ result = PropertyCell::cast(result)->value();
+ } else if (holder_map_->is_dictionary_map()) {
+ result = holder->property_dictionary()->ValueAt(number_);
} else if (property_details_.type() == v8::internal::DATA) {
FieldIndex field_index = FieldIndex::ForDescriptor(*holder_map_, number_);
return JSObject::FastPropertyAt(holder, property_details_.representation(),
@@ -285,6 +370,7 @@ int LookupIterator::GetConstantIndex() const {
DCHECK(has_property_);
DCHECK(!holder_map_->is_dictionary_map());
DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
+ DCHECK(!IsElement());
return descriptor_number();
}
@@ -293,6 +379,7 @@ FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
DCHECK(!holder_map_->is_dictionary_map());
DCHECK_EQ(v8::internal::DATA, property_details_.type());
+ DCHECK(!IsElement());
int index =
holder_map_->instance_descriptors()->GetFieldIndex(descriptor_number());
bool is_double = representation().IsDouble();
@@ -311,9 +398,10 @@ Handle<HeapType> LookupIterator::GetFieldType() const {
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
+ DCHECK(!IsElement());
Handle<JSObject> holder = GetHolder<JSObject>();
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Object* value = global->property_dictionary()->ValueAt(dictionary_entry());
+ Object* value = global->global_dictionary()->ValueAt(dictionary_entry());
DCHECK(value->IsPropertyCell());
return handle(PropertyCell::cast(value));
}
@@ -335,15 +423,17 @@ Handle<Object> LookupIterator::GetDataValue() const {
void LookupIterator::WriteDataValue(Handle<Object> value) {
DCHECK_EQ(DATA, state_);
Handle<JSObject> holder = GetHolder<JSObject>();
- if (holder_map_->is_dictionary_map()) {
- Handle<NameDictionary> property_dictionary =
- handle(holder->property_dictionary());
- if (holder->IsGlobalObject()) {
- PropertyCell::UpdateCell(property_dictionary, dictionary_entry(), value,
- property_details_);
- } else {
- property_dictionary->ValueAtPut(dictionary_entry(), *value);
- }
+ if (IsElement()) {
+ ElementsAccessor* accessor = holder->GetElementsAccessor();
+ accessor->Set(holder->elements(), index_, *value);
+ } else if (holder->IsGlobalObject()) {
+ Handle<GlobalDictionary> property_dictionary =
+ handle(holder->global_dictionary());
+ PropertyCell::UpdateCell(property_dictionary, dictionary_entry(), value,
+ property_details_);
+ } else if (holder_map_->is_dictionary_map()) {
+ NameDictionary* property_dictionary = holder->property_dictionary();
+ property_dictionary->ValueAtPut(dictionary_entry(), *value);
} else if (property_details_.type() == v8::internal::DATA) {
holder->WriteToField(descriptor_number(), *value);
} else {
@@ -353,21 +443,27 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
bool LookupIterator::IsIntegerIndexedExotic(JSReceiver* holder) {
- DCHECK(exotic_index_state_ != ExoticIndexState::kNoIndex);
+ DCHECK(exotic_index_state_ != ExoticIndexState::kNotExotic);
// Currently typed arrays are the only such objects.
if (!holder->IsJSTypedArray()) return false;
- if (exotic_index_state_ == ExoticIndexState::kIndex) return true;
+ if (exotic_index_state_ == ExoticIndexState::kExotic) return true;
+ if (!InternalHolderIsReceiverOrHiddenPrototype()) {
+ exotic_index_state_ = ExoticIndexState::kNotExotic;
+ return false;
+ }
DCHECK(exotic_index_state_ == ExoticIndexState::kUninitialized);
bool result = false;
// Compute and cache result.
- if (name()->IsString()) {
+ if (IsElement()) {
+ result = index_ >= JSTypedArray::cast(holder)->length_value();
+ } else if (name()->IsString()) {
Handle<String> name_string = Handle<String>::cast(name());
if (name_string->length() != 0) {
result = IsSpecialIndex(isolate_->unicode_cache(), *name_string);
}
}
exotic_index_state_ =
- result ? ExoticIndexState::kIndex : ExoticIndexState::kNoIndex;
+ result ? ExoticIndexState::kExotic : ExoticIndexState::kNotExotic;
return result;
}
@@ -378,8 +474,26 @@ void LookupIterator::InternalizeName() {
}
+bool LookupIterator::HasInterceptor(Map* map) const {
+ if (IsElement()) return map->has_indexed_interceptor();
+ return map->has_named_interceptor();
+}
+
+
+Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
+ DCHECK_EQ(INTERCEPTOR, state_);
+ return handle(GetInterceptor(JSObject::cast(*holder_)), isolate_);
+}
+
+
+InterceptorInfo* LookupIterator::GetInterceptor(JSObject* holder) const {
+ if (IsElement()) return holder->GetIndexedInterceptor();
+ return holder->GetNamedInterceptor();
+}
+
+
bool LookupIterator::SkipInterceptor(JSObject* holder) {
- auto info = holder->GetNamedInterceptor();
+ auto info = GetInterceptor(holder);
// TODO(dcarney): check for symbol/can_intercept_symbols here as well.
if (info->non_masking()) {
switch (interceptor_state_) {
@@ -394,4 +508,5 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
}
return interceptor_state_ == InterceptorState::kProcessNonMasking;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index a574980c1f..90edd8b43d 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -26,7 +26,8 @@ class LookupIterator final BASE_EMBEDDED {
HIDDEN_SKIP_INTERCEPTOR = kHidden,
HIDDEN = kHidden | kInterceptor,
PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kHidden | kPrototypeChain,
- PROTOTYPE_CHAIN = kHidden | kPrototypeChain | kInterceptor
+ PROTOTYPE_CHAIN = kHidden | kPrototypeChain | kInterceptor,
+ DEFAULT = PROTOTYPE_CHAIN
};
enum State {
@@ -44,43 +45,136 @@ class LookupIterator final BASE_EMBEDDED {
};
LookupIterator(Handle<Object> receiver, Handle<Name> name,
- Configuration configuration = PROTOTYPE_CHAIN)
+ Configuration configuration = DEFAULT)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
exotic_index_state_(ExoticIndexState::kUninitialized),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(name->GetIsolate()),
- name_(name),
+ name_(Name::Flatten(name)),
+ // kMaxUInt32 isn't a valid index.
+ index_(kMaxUInt32),
receiver_(receiver),
holder_(GetRoot(receiver_, isolate_)),
holder_map_(holder_->map(), isolate_),
initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
+#ifdef DEBUG
+ uint32_t index; // Assert that the name is not an array index.
+ DCHECK(!name->AsArrayIndex(&index));
+#endif // DEBUG
Next();
}
LookupIterator(Handle<Object> receiver, Handle<Name> name,
Handle<JSReceiver> holder,
- Configuration configuration = PROTOTYPE_CHAIN)
+ Configuration configuration = DEFAULT)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
exotic_index_state_(ExoticIndexState::kUninitialized),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(name->GetIsolate()),
- name_(name),
+ name_(Name::Flatten(name)),
+ // kMaxUInt32 isn't a valid index.
+ index_(kMaxUInt32),
receiver_(receiver),
holder_(holder),
holder_map_(holder_->map(), isolate_),
initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
+#ifdef DEBUG
+ uint32_t index; // Assert that the name is not an array index.
+ DCHECK(!name->AsArrayIndex(&index));
+#endif // DEBUG
Next();
}
+ LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
+ Configuration configuration = DEFAULT)
+ : configuration_(configuration),
+ state_(NOT_FOUND),
+ exotic_index_state_(ExoticIndexState::kUninitialized),
+ interceptor_state_(InterceptorState::kUninitialized),
+ property_details_(PropertyDetails::Empty()),
+ isolate_(isolate),
+ name_(),
+ index_(index),
+ receiver_(receiver),
+ holder_(GetRoot(receiver_, isolate_)),
+ holder_map_(holder_->map(), isolate_),
+ initial_holder_(holder_),
+ number_(DescriptorArray::kNotFound) {
+ // kMaxUInt32 isn't a valid index.
+ DCHECK_NE(kMaxUInt32, index_);
+ Next();
+ }
+
+ LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
+ Handle<JSReceiver> holder,
+ Configuration configuration = DEFAULT)
+ : configuration_(configuration),
+ state_(NOT_FOUND),
+ exotic_index_state_(ExoticIndexState::kUninitialized),
+ interceptor_state_(InterceptorState::kUninitialized),
+ property_details_(PropertyDetails::Empty()),
+ isolate_(isolate),
+ name_(),
+ index_(index),
+ receiver_(receiver),
+ holder_(holder),
+ holder_map_(holder_->map(), isolate_),
+ initial_holder_(holder_),
+ number_(DescriptorArray::kNotFound) {
+ // kMaxUInt32 isn't a valid index.
+ DCHECK_NE(kMaxUInt32, index_);
+ Next();
+ }
+
+ static LookupIterator PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Configuration configuration = DEFAULT) {
+ name = Name::Flatten(name);
+ uint32_t index;
+ LookupIterator it =
+ name->AsArrayIndex(&index)
+ ? LookupIterator(isolate, receiver, index, configuration)
+ : LookupIterator(receiver, name, configuration);
+ it.name_ = name;
+ return it;
+ }
+
+ static LookupIterator PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Handle<JSReceiver> holder, Configuration configuration = DEFAULT) {
+ name = Name::Flatten(name);
+ uint32_t index;
+ LookupIterator it =
+ name->AsArrayIndex(&index)
+ ? LookupIterator(isolate, receiver, index, holder, configuration)
+ : LookupIterator(receiver, name, holder, configuration);
+ it.name_ = name;
+ return it;
+ }
+
Isolate* isolate() const { return isolate_; }
State state() const { return state_; }
- Handle<Name> name() const { return name_; }
+
+ Handle<Name> name() const {
+ DCHECK(!IsElement());
+ return name_;
+ }
+ Handle<Name> GetName() {
+ if (name_.is_null()) {
+ DCHECK(IsElement());
+ name_ = isolate_->factory()->Uint32ToString(index_);
+ }
+ return name_;
+ }
+ uint32_t index() const { return index_; }
+
+ bool IsElement() const { return index_ != kMaxUInt32; }
bool IsFound() const { return state_ != NOT_FOUND; }
void Next();
@@ -116,11 +210,13 @@ class LookupIterator final BASE_EMBEDDED {
bool IsCacheableTransition() {
if (state_ != TRANSITION) return false;
return transition_->IsPropertyCell() ||
- transition_map()->GetBackPointer()->IsMap();
+ (!transition_map()->is_dictionary_map() &&
+ transition_map()->GetBackPointer()->IsMap());
}
void ApplyTransitionToDataProperty();
void ReconfigureDataProperty(Handle<Object> value,
PropertyAttributes attributes);
+ void Delete();
void TransitionToAccessorProperty(AccessorComponent component,
Handle<Object> accessor,
PropertyAttributes attributes);
@@ -139,11 +235,11 @@ class LookupIterator final BASE_EMBEDDED {
int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
Handle<Object> GetAccessors() const;
+ Handle<InterceptorInfo> GetInterceptor() const;
Handle<Object> GetDataValue() const;
- // Usually returns the value that was passed in, but may perform
- // non-observable modifications on it, such as internalize strings.
void WriteDataValue(Handle<Object> value);
void InternalizeName();
+ void ReloadHolderMap();
private:
enum class InterceptorState {
@@ -161,12 +257,13 @@ class LookupIterator final BASE_EMBEDDED {
Handle<Object> FetchValue() const;
void ReloadPropertyInformation();
bool SkipInterceptor(JSObject* holder);
-
- bool IsBootstrapping() const;
+ bool HasInterceptor(Map* map) const;
+ bool InternalHolderIsReceiverOrHiddenPrototype() const;
+ InterceptorInfo* GetInterceptor(JSObject* holder) const;
bool check_hidden() const { return (configuration_ & kHidden) != 0; }
bool check_interceptor() const {
- return !IsBootstrapping() && (configuration_ & kInterceptor) != 0;
+ return (configuration_ & kInterceptor) != 0;
}
bool check_prototype_chain() const {
return (configuration_ & kPrototypeChain) != 0;
@@ -184,7 +281,7 @@ class LookupIterator final BASE_EMBEDDED {
static Configuration ComputeConfiguration(
Configuration configuration, Handle<Name> name) {
- if (name->IsOwn()) {
+ if (name->IsPrivate()) {
return static_cast<Configuration>(configuration &
HIDDEN_SKIP_INTERCEPTOR);
} else {
@@ -192,7 +289,7 @@ class LookupIterator final BASE_EMBEDDED {
}
}
- enum class ExoticIndexState { kUninitialized, kNoIndex, kIndex };
+ enum class ExoticIndexState { kUninitialized, kNotExotic, kExotic };
bool IsIntegerIndexedExotic(JSReceiver* holder);
// If configuration_ becomes mutable, update
@@ -205,12 +302,13 @@ class LookupIterator final BASE_EMBEDDED {
PropertyDetails property_details_;
Isolate* const isolate_;
Handle<Name> name_;
+ uint32_t index_;
Handle<Object> transition_;
const Handle<Object> receiver_;
Handle<JSReceiver> holder_;
Handle<Map> holder_map_;
const Handle<JSReceiver> initial_holder_;
- int number_;
+ uint32_t number_;
};
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index b59fd3b94d..5ea9657515 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -135,11 +135,11 @@ class FrameAndConstantPoolScope {
: masm_(masm),
type_(type),
old_has_frame_(masm->has_frame()),
- old_constant_pool_available_(FLAG_enable_ool_constant_pool &&
- masm->is_ool_constant_pool_available()) {
+ old_constant_pool_available_(FLAG_enable_embedded_constant_pool &&
+ masm->is_constant_pool_available()) {
masm->set_has_frame(true);
- if (FLAG_enable_ool_constant_pool) {
- masm->set_ool_constant_pool_available(true);
+ if (FLAG_enable_embedded_constant_pool) {
+ masm->set_constant_pool_available(true);
}
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
masm->EnterFrame(type, !old_constant_pool_available_);
@@ -149,8 +149,8 @@ class FrameAndConstantPoolScope {
~FrameAndConstantPoolScope() {
masm_->LeaveFrame(type_);
masm_->set_has_frame(old_has_frame_);
- if (FLAG_enable_ool_constant_pool) {
- masm_->set_ool_constant_pool_available(old_constant_pool_available_);
+ if (FLAG_enable_embedded_constant_pool) {
+ masm_->set_constant_pool_available(old_constant_pool_available_);
}
}
@@ -178,15 +178,15 @@ class ConstantPoolUnavailableScope {
public:
explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
: masm_(masm),
- old_constant_pool_available_(FLAG_enable_ool_constant_pool &&
- masm->is_ool_constant_pool_available()) {
- if (FLAG_enable_ool_constant_pool) {
- masm_->set_ool_constant_pool_available(false);
+ old_constant_pool_available_(FLAG_enable_embedded_constant_pool &&
+ masm->is_constant_pool_available()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ masm_->set_constant_pool_available(false);
}
}
~ConstantPoolUnavailableScope() {
- if (FLAG_enable_ool_constant_pool) {
- masm_->set_ool_constant_pool_available(old_constant_pool_available_);
+ if (FLAG_enable_embedded_constant_pool) {
+ masm_->set_constant_pool_available(old_constant_pool_available_);
}
}
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 9fd5442758..5e28c66a83 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -95,13 +95,13 @@ macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_OBJECT(arg) = (%_IsObject(arg));
macro IS_ARRAY(arg) = (%_IsArray(arg));
+macro IS_DATE(arg) = (%_IsDate(arg));
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
macro IS_REGEXP(arg) = (%_IsRegExp(arg));
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
-macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
@@ -112,10 +112,12 @@ macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
+macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
+macro IS_STRONG(arg) = (%IsStrong(arg));
# Macro for ECMAScript 5 queries of the type:
# "Type(O) is object."
@@ -152,15 +154,13 @@ macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : $nonStringToStrin
macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : $nonNumberToNumber(arg));
macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : $toObject(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
-macro HAS_OWN_PROPERTY(arg, index) = (%_CallFunction(arg, index, $objectHasOwnProperty));
+macro HAS_OWN_PROPERTY(arg, index) = (%_CallFunction(arg, index, ObjectHasOwnProperty));
macro SHOULD_CREATE_WRAPPER(functionName, receiver) = (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(functionName));
macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array))) ? (index < array.length) : (index in array));
# Private names.
-# GET_PRIVATE should only be used if the property is known to exists on obj
-# itself (it should really use %GetOwnProperty, but that would be way slower).
-macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateOwnSymbol(name));
-macro NEW_PRIVATE_OWN(name) = (%CreatePrivateOwnSymbol(name));
+macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateSymbol(name));
+macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name));
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
macro HAS_PRIVATE(obj, sym) = (%HasOwnProperty(obj, sym));
macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
@@ -191,7 +191,7 @@ define MAX_TIME_BEFORE_UTC = 8640002592000000;
# Gets the value of a Date object. If arg is not a Date object
# a type error is thrown.
-macro CHECK_DATE(arg) = if (%_ClassOf(arg) !== 'Date') throw MakeTypeError(kDateType);
+macro CHECK_DATE(arg) = if (!%_IsDate(arg)) %_ThrowNotDateError();
macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21));
macro UTC_DATE_VALUE(arg) = (%_DateField(arg, 0));
@@ -307,3 +307,7 @@ define NOT_FOUND = -1;
define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
macro DEBUG_IS_STEPPING(function) = (%_DebugIsActive() != 0 && %DebugCallbackSupportsStepping(function));
macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (DEBUG_IS_STEPPING(function)) %DebugPrepareStepInIfStepping(function);
+
+# SharedFlag equivalents
+define kNotShared = false;
+define kShared = true;
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index af2d0acda0..fc3bf2fcda 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -4,19 +4,16 @@
var rngstate; // Initialized to a Uint32Array during genesis.
-var $abs;
-var $exp;
-var $floor;
-var $max;
-var $min;
-
-(function(global, shared, exports) {
-
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalObject = global.Object;
+var InternalArray = utils.InternalArray;
//-------------------------------------------------------------------
@@ -140,6 +137,15 @@ function MathRandom() {
return (x < 0 ? (x + 0x100000000) : x) * 2.3283064365386962890625e-10;
}
+function MathRandomRaw() {
+ var r0 = (MathImul(18030, rngstate[0] & 0xFFFF) + (rngstate[0] >>> 16)) | 0;
+ rngstate[0] = r0;
+ var r1 = (MathImul(36969, rngstate[1] & 0xFFFF) + (rngstate[1] >>> 16)) | 0;
+ rngstate[1] = r1;
+ var x = ((r0 << 16) + (r1 & 0xFFFF)) | 0;
+ return x & 0x3fffffff;
+}
+
// ECMA 262 - 15.8.2.15
function MathRound(x) {
return %RoundNumber(TO_NUMBER_INLINE(x));
@@ -294,7 +300,7 @@ var Math = new MathConstructor();
%AddNamedProperty(Math, symbolToStringTag, "Math", READ_ONLY | DONT_ENUM);
// Set up math constants.
-$installConstants(Math, [
+utils.InstallConstants(Math, [
// ECMA-262, section 15.8.1.1.
"E", 2.7182818284590452354,
// ECMA-262, section 15.8.1.2.
@@ -311,7 +317,7 @@ $installConstants(Math, [
// Set up non-enumerable functions of the Math object and
// set their names.
-$installFunctions(Math, DONT_ENUM, [
+utils.InstallFunctions(Math, DONT_ENUM, [
"random", MathRandom,
"abs", MathAbs,
"acos", MathAcosJS,
@@ -340,24 +346,29 @@ $installFunctions(Math, DONT_ENUM, [
"cbrt", MathCbrt
]);
-%SetInlineBuiltinFlag(MathAbs);
-%SetInlineBuiltinFlag(MathAcosJS);
-%SetInlineBuiltinFlag(MathAsinJS);
-%SetInlineBuiltinFlag(MathAtanJS);
-%SetInlineBuiltinFlag(MathAtan2JS);
-%SetInlineBuiltinFlag(MathCeil);
-%SetInlineBuiltinFlag(MathClz32JS);
-%SetInlineBuiltinFlag(MathFloorJS);
-%SetInlineBuiltinFlag(MathRandom);
-%SetInlineBuiltinFlag(MathSign);
-%SetInlineBuiltinFlag(MathSqrtJS);
-%SetInlineBuiltinFlag(MathTrunc);
-
-// Expose to the global scope.
-$abs = MathAbs;
-$exp = MathExp;
-$floor = MathFloorJS;
-$max = MathMax;
-$min = MathMin;
+%SetForceInlineFlag(MathAbs);
+%SetForceInlineFlag(MathAcosJS);
+%SetForceInlineFlag(MathAsinJS);
+%SetForceInlineFlag(MathAtanJS);
+%SetForceInlineFlag(MathAtan2JS);
+%SetForceInlineFlag(MathCeil);
+%SetForceInlineFlag(MathClz32JS);
+%SetForceInlineFlag(MathFloorJS);
+%SetForceInlineFlag(MathRandom);
+%SetForceInlineFlag(MathSign);
+%SetForceInlineFlag(MathSqrtJS);
+%SetForceInlineFlag(MathTrunc);
+
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.MathAbs = MathAbs;
+ to.MathExp = MathExp;
+ to.MathFloor = MathFloorJS;
+ to.IntRandom = MathRandomRaw;
+ to.MathMax = MathMax;
+ to.MathMin = MathMin;
+});
})
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 81fcdec51c..7193392d9d 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -35,20 +35,9 @@ void MessageHandler::DefaultMessageReport(Isolate* isolate,
Handle<JSMessageObject> MessageHandler::MakeMessageObject(
- Isolate* isolate,
- const char* type,
- MessageLocation* loc,
- Vector< Handle<Object> > args,
- Handle<JSArray> stack_frames) {
+ Isolate* isolate, MessageTemplate::Template message, MessageLocation* loc,
+ Handle<Object> argument, Handle<JSArray> stack_frames) {
Factory* factory = isolate->factory();
- Handle<String> type_handle = factory->InternalizeUtf8String(type);
- Handle<FixedArray> arguments_elements =
- factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- arguments_elements->set(i, *args[i]);
- }
- Handle<JSArray> arguments_handle =
- factory->NewJSArrayWithElements(arguments_elements);
int start = 0;
int end = 0;
@@ -63,21 +52,15 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_frames);
- Handle<JSMessageObject> message =
- factory->NewJSMessageObject(type_handle,
- arguments_handle,
- start,
- end,
- script_handle,
- stack_frames_handle);
+ Handle<JSMessageObject> message_obj = factory->NewJSMessageObject(
+ message, argument, start, end, script_handle, stack_frames_handle);
- return message;
+ return message_obj;
}
-void MessageHandler::ReportMessage(Isolate* isolate,
- MessageLocation* loc,
- Handle<Object> message) {
+void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
+ Handle<JSMessageObject> message) {
// We are calling into embedder's code which can throw exceptions.
// Thus we need to save current exception state, reset it to the clean one
// and ignore scheduled exceptions callbacks can throw.
@@ -87,14 +70,29 @@ void MessageHandler::ReportMessage(Isolate* isolate,
if (isolate->has_pending_exception()) {
exception_object = isolate->pending_exception();
}
- Handle<Object> exception_handle(exception_object, isolate);
+ Handle<Object> exception(exception_object, isolate);
Isolate::ExceptionScope exception_scope(isolate);
isolate->clear_pending_exception();
isolate->set_external_caught_exception(false);
+ // Turn the exception on the message into a string if it is an object.
+ if (message->argument()->IsJSObject()) {
+ HandleScope scope(isolate);
+ Handle<Object> argument(message->argument(), isolate);
+ Handle<Object> args[] = {argument};
+ MaybeHandle<Object> maybe_stringified = Execution::TryCall(
+ isolate->to_detail_string_fun(), isolate->js_builtins_object(),
+ arraysize(args), args);
+ Handle<Object> stringified;
+ if (!maybe_stringified.ToHandle(&stringified)) {
+ stringified = isolate->factory()->NewStringFromAsciiChecked("exception");
+ }
+ message->set_argument(*stringified);
+ }
+
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
- v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception_handle);
+ v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
v8::NeanderArray global_listeners(isolate->factory()->message_listeners());
int global_length = global_listeners.length();
@@ -114,7 +112,7 @@ void MessageHandler::ReportMessage(Isolate* isolate,
Handle<Object> callback_data(listener.get(1), isolate);
{
// Do not allow exceptions to propagate.
- v8::TryCatch try_catch;
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
callback(api_message_obj, callback_data->IsUndefined()
? api_exception_obj
: v8::Utils::ToLocal(callback_data));
@@ -129,29 +127,9 @@ void MessageHandler::ReportMessage(Isolate* isolate,
Handle<String> MessageHandler::GetMessage(Isolate* isolate,
Handle<Object> data) {
- Factory* factory = isolate->factory();
- Handle<String> fmt_str =
- factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("$formatMessage"));
- Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty(
- isolate->js_builtins_object(), fmt_str).ToHandleChecked());
Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
- Handle<Object> argv[] = { Handle<Object>(message->type(), isolate),
- Handle<Object>(message->arguments(), isolate) };
-
- MaybeHandle<Object> maybe_result = Execution::TryCall(
- fun, isolate->js_builtins_object(), arraysize(argv), argv);
- Handle<Object> result;
- if (!maybe_result.ToHandle(&result) || !result->IsString()) {
- return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
- }
- Handle<String> result_string = Handle<String>::cast(result);
- // A string that has been obtained from JS code in this way is
- // likely to be a complicated ConsString of some sort. We flatten it
- // here to improve the efficiency of converting it to a C string and
- // other operations that are likely to take place (see GetLocalizedMessage
- // for example).
- result_string = String::Flatten(result_string);
- return result_string;
+ Handle<Object> arg = Handle<Object>(message->argument(), isolate);
+ return MessageTemplate::FormatMessage(isolate, message->type(), arg);
}
@@ -197,10 +175,11 @@ Handle<Object> CallSite::GetScriptNameOrSourceUrl(Isolate* isolate) {
}
-bool CheckMethodName(Handle<JSObject> obj, Handle<Name> name,
+bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
Handle<JSFunction> fun,
LookupIterator::Configuration config) {
- LookupIterator iter(obj, name, config);
+ LookupIterator iter =
+ LookupIterator::PropertyOrElement(isolate, obj, name, config);
if (iter.state() == LookupIterator::DATA) {
return iter.GetDataValue().is_identical_to(fun);
} else if (iter.state() == LookupIterator::ACCESSOR) {
@@ -225,7 +204,7 @@ Handle<Object> CallSite::GetMethodName(Isolate* isolate) {
Handle<Object> function_name(fun_->shared()->name(), isolate);
if (function_name->IsName()) {
Handle<Name> name = Handle<Name>::cast(function_name);
- if (CheckMethodName(obj, name, fun_,
+ if (CheckMethodName(isolate, obj, name, fun_,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR))
return name;
}
@@ -244,7 +223,7 @@ Handle<Object> CallSite::GetMethodName(Isolate* isolate) {
HandleScope inner_scope(isolate);
if (!keys->get(i)->IsName()) continue;
Handle<Name> name_key(Name::cast(keys->get(i)), isolate);
- if (!CheckMethodName(current_obj, name_key, fun_,
+ if (!CheckMethodName(isolate, current_obj, name_key, fun_,
LookupIterator::OWN_SKIP_INTERCEPTOR))
continue;
// Return null in case of duplicates to avoid confusion.
@@ -306,16 +285,54 @@ bool CallSite::IsEval(Isolate* isolate) {
bool CallSite::IsConstructor(Isolate* isolate) {
if (!receiver_->IsJSObject()) return false;
Handle<Object> constructor =
- JSObject::GetDataProperty(Handle<JSObject>::cast(receiver_),
- isolate->factory()->constructor_string());
+ JSReceiver::GetDataProperty(Handle<JSObject>::cast(receiver_),
+ isolate->factory()->constructor_string());
return constructor.is_identical_to(fun_);
}
+Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
+ int template_index,
+ Handle<Object> arg) {
+ Factory* factory = isolate->factory();
+ Handle<String> result_string;
+ if (arg->IsString()) {
+ result_string = Handle<String>::cast(arg);
+ } else {
+ Handle<String> fmt_str = factory->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("$noSideEffectToString"));
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(
+ Object::GetProperty(isolate->js_builtins_object(), fmt_str)
+ .ToHandleChecked());
+
+ MaybeHandle<Object> maybe_result =
+ Execution::TryCall(fun, isolate->js_builtins_object(), 1, &arg);
+ Handle<Object> result;
+ if (!maybe_result.ToHandle(&result) || !result->IsString()) {
+ return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
+ }
+ result_string = Handle<String>::cast(result);
+ }
+ MaybeHandle<String> maybe_result_string = MessageTemplate::FormatMessage(
+ template_index, result_string, factory->empty_string(),
+ factory->empty_string());
+ if (!maybe_result_string.ToHandle(&result_string)) {
+ return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
+ }
+ // A string that has been obtained from JS code in this way is
+ // likely to be a complicated ConsString of some sort. We flatten it
+ // here to improve the efficiency of converting it to a C string and
+ // other operations that are likely to take place (see GetLocalizedMessage
+ // for example).
+ return String::Flatten(result_string);
+}
+
+
MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
Handle<String> arg0,
Handle<String> arg1,
Handle<String> arg2) {
+ Isolate* isolate = arg0->GetIsolate();
const char* template_string;
switch (template_index) {
#define CASE(NAME, STRING) \
@@ -326,20 +343,25 @@ MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
#undef CASE
case kLastMessage:
default:
- UNREACHABLE();
- template_string = "";
- break;
+ isolate->ThrowIllegalOperation();
+ return MaybeHandle<String>();
}
- Isolate* isolate = arg0->GetIsolate();
IncrementalStringBuilder builder(isolate);
unsigned int i = 0;
Handle<String> args[] = {arg0, arg1, arg2};
for (const char* c = template_string; *c != '\0'; c++) {
if (*c == '%') {
- DCHECK(i < arraysize(args));
- builder.AppendString(args[i++]);
+ // %% results in verbatim %.
+ if (*(c + 1) == '%') {
+ c++;
+ builder.AppendCharacter('%');
+ } else {
+ DCHECK(i < arraysize(args));
+ Handle<String> arg = args[i++];
+ builder.AppendString(arg);
+ }
} else {
builder.AppendCharacter(*c);
}
@@ -347,4 +369,5 @@ MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
return builder.Finish();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 930ea92b7b..4072300bf6 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -62,32 +62,6 @@ class MessageLocation {
};
-// A message handler is a convenience interface for accessing the list
-// of message listeners registered in an environment
-class MessageHandler {
- public:
- // Returns a message object for the API to use.
- static Handle<JSMessageObject> MakeMessageObject(
- Isolate* isolate,
- const char* type,
- MessageLocation* loc,
- Vector< Handle<Object> > args,
- Handle<JSArray> stack_frames);
-
- // Report a formatted message (needs JS allocation).
- static void ReportMessage(Isolate* isolate,
- MessageLocation* loc,
- Handle<Object> message);
-
- static void DefaultMessageReport(Isolate* isolate,
- const MessageLocation* loc,
- Handle<Object> message_obj);
- static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data);
- static SmartArrayPointer<char> GetLocalizedMessage(Isolate* isolate,
- Handle<Object> data);
-};
-
-
class CallSite {
public:
CallSite(Handle<Object> receiver, Handle<JSFunction> fun, int pos)
@@ -115,8 +89,11 @@ class CallSite {
#define MESSAGE_TEMPLATES(T) \
/* Error */ \
+ T(None, "") \
T(CyclicProto, "Cyclic __proto__ value") \
+ T(DebuggerLoading, "Error loading debugger") \
T(DefaultOptionsMissing, "Internal % error. Default options are missing.") \
+ T(UncaughtException, "Uncaught %") \
T(Unsupported, "Not supported") \
T(WrongServiceType, "Internal error, wrong service type: %") \
T(WrongValueType, "Internal error. Wrong value type.") \
@@ -131,6 +108,10 @@ class CallSite {
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
+ T(CannotPreventExtExternalArray, \
+ "Cannot prevent extension of an object with external array elements") \
+ T(CircularStructure, "Converting circular structure to JSON") \
+ T(ConstAssign, "Assignment to constant variable.") \
T(ConstructorNonCallable, \
"Class constructors cannot be invoked without 'new'") \
T(ConstructorNotFunction, "Constructor % requires 'new'") \
@@ -139,12 +120,18 @@ class CallSite {
"First argument to DataView constructor must be an ArrayBuffer") \
T(DateType, "this is not a Date object.") \
T(DefineDisallowed, "Cannot define property:%, object is not extensible.") \
+ T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
+ T(ExtendsValueGenerator, \
+ "Class extends value % may not be a generator function") \
+ T(ExtendsValueNotFunction, \
+ "Class extends value % is not a function or null") \
T(FirstArgumentNotRegExp, \
"First argument to % must not be a regular expression") \
T(FlagsGetterNonObject, \
"RegExp.prototype.flags getter called on non-object %") \
T(FunctionBind, "Bind must be called on a function") \
T(GeneratorRunning, "Generator is already running") \
+ T(IllegalInvocation, "Illegal invocation") \
T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
T(InstanceofFunctionExpected, \
"Expecting a function in instanceof check, but got %") \
@@ -160,15 +147,26 @@ class CallSite {
T(MethodInvokedOnNullOrUndefined, \
"Method invoked on undefined or null value.") \
T(MethodInvokedOnWrongType, "Method invoked on an object that is not %.") \
+ T(NoAccess, "no access") \
+ T(NonCoercible, "Cannot match against 'undefined' or 'null'.") \
+ T(NonExtensibleProto, "% is not extensible") \
+ T(NonObjectPropertyLoad, "Cannot read property '%' of %") \
+ T(NonObjectPropertyStore, "Cannot set property '%' of %") \
+ T(NoSetterInCallback, "Cannot set property % of % which has only a getter") \
T(NotAnIterator, "% is not an iterator") \
T(NotAPromise, "% is not a promise") \
T(NotConstructor, "% is not a constructor") \
+ T(NotDateObject, "this is not a Date object.") \
+ T(NotIntlObject, "% is not an i18n object.") \
T(NotGeneric, "% is not generic") \
T(NotIterable, "% is not iterable") \
T(NotTypedArray, "this is not a typed array.") \
+ T(NotSharedTypedArray, "% is not a shared typed array.") \
+ T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.") \
T(ObjectGetterExpectingFunction, \
"Object.prototype.__defineGetter__: Expecting function") \
T(ObjectGetterCallable, "Getter must be a function: %") \
+ T(ObjectNotExtensible, "Can't add property %, object is not extensible") \
T(ObjectSetterExpectingFunction, \
"Object.prototype.__defineSetter__: Expecting function") \
T(ObjectSetterCallable, "Setter must be a function: %") \
@@ -190,6 +188,10 @@ class CallSite {
T(PropertyDescObject, "Property description must be an object: %") \
T(PropertyNotFunction, "Property '%' of object % is not a function") \
T(ProtoObjectOrNull, "Object prototype may only be an Object or null: %") \
+ T(PrototypeParentNotAnObject, \
+ "Class extends value does not have valid prototype property %") \
+ T(ProxyHandlerDeleteFailed, \
+ "Proxy handler % did not return a boolean value from 'delete' trap") \
T(ProxyHandlerNonObject, "Proxy.% called with non-object as handler") \
T(ProxyHandlerReturned, "Proxy handler % returned % from '%' trap") \
T(ProxyHandlerTrapMissing, "Proxy handler % has no '%' trap") \
@@ -204,12 +206,38 @@ class CallSite {
T(ProxyTrapFunctionExpected, \
"Proxy.createFunction called with non-function for '%' trap") \
T(RedefineDisallowed, "Cannot redefine property: %") \
+ T(RedefineExternalArray, \
+ "Cannot redefine a property of an object with external array elements") \
T(ReduceNoInitial, "Reduce of empty array with no initial value") \
+ T(RegExpFlags, \
+ "Cannot supply flags when constructing one RegExp from another") \
T(ReinitializeIntl, "Trying to re-initialize % object.") \
T(ResolvedOptionsCalledOnNonObject, \
"resolvedOptions method called on a non-object or on a object that is " \
"not Intl.%.") \
T(ResolverNotAFunction, "Promise resolver % is not a function") \
+ T(RestrictedFunctionProperties, \
+ "'caller' and 'arguments' are restricted function properties and cannot " \
+ "be accessed in this context.") \
+ T(StaticPrototype, "Classes may not have static property named prototype") \
+ T(StrictCannotAssign, "Cannot assign to read only '% in strict mode") \
+ T(StrictDeleteProperty, "Cannot delete property '%' of %") \
+ T(StrictPoisonPill, \
+ "'caller', 'callee', and 'arguments' properties may not be accessed on " \
+ "strict mode functions or the arguments objects for calls to them") \
+ T(StrictReadOnlyProperty, "Cannot assign to read only property '%' of %") \
+ T(StrongArity, \
+ "In strong mode, calling a function with too few arguments is deprecated") \
+ T(StrongDeleteProperty, \
+ "Deleting property '%' of strong object '%' is deprecated") \
+ T(StrongImplicitConversion, \
+ "In strong mode, implicit conversions are deprecated") \
+ T(StrongRedefineDisallowed, \
+ "On strong object %, redefining writable, non-configurable property '%' " \
+ "to be non-writable is deprecated") \
+ T(StrongSetProto, \
+ "On strong object %, redefining the internal prototype is deprecated") \
+ T(SymbolKeyFor, "% is not a symbol") \
T(SymbolToPrimitive, \
"Cannot convert a Symbol wrapper object to a primitive value") \
T(SymbolToNumber, "Cannot convert a Symbol value to a number") \
@@ -218,14 +246,19 @@ class CallSite {
T(ValueAndAccessor, \
"Invalid property. A property cannot both have accessors and be " \
"writable or have a value, %") \
+ T(VarRedeclaration, "Identifier '%' has already been declared") \
T(WithExpression, "% has no properties") \
T(WrongArgs, "%: Arguments list has wrong type") \
/* ReferenceError */ \
T(NonMethod, "'super' is referenced from non-method") \
T(NotDefined, "% is not defined") \
+ T(StrongSuperCallMissing, \
+ "In strong mode, invoking the super constructor in a subclass is " \
+ "required") \
+ T(StrongUnboundGlobal, \
+ "In strong mode, using an undeclared global variable '%' is not allowed") \
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
- T(ArrayLengthOutOfRange, "defineProperty() array length out of range") \
T(DateRange, "Provided date is not in valid range.") \
T(ExpectedLocation, "Expected Area/Location for time zone, got %") \
T(InvalidArrayBufferLength, "Invalid array buffer length") \
@@ -238,6 +271,8 @@ class CallSite {
T(InvalidDataViewLength, "Invalid data view length") \
T(InvalidDataViewOffset, "Start offset is outside the bounds of the buffer") \
T(InvalidLanguageTag, "Invalid language tag: %") \
+ T(InvalidWeakMapKey, "Invalid value used as weak map key") \
+ T(InvalidWeakSetValue, "Invalid value used in weak set") \
T(InvalidStringLength, "Invalid string length") \
T(InvalidTimeValue, "Invalid time value") \
T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
@@ -255,7 +290,131 @@ class CallSite {
T(UnsupportedTimeZone, "Unsupported time zone specified %") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
/* SyntaxError */ \
+ T(BadGetterArity, "Getter must not have any formal parameters.") \
+ T(BadSetterArity, "Setter must have exactly one formal parameter.") \
+ T(ConstructorIsAccessor, "Class constructor may not be an accessor") \
+ T(ConstructorIsGenerator, "Class constructor may not be a generator") \
+ T(DerivedConstructorReturn, \
+ "Derived constructors may only return object or undefined") \
+ T(DuplicateArrawFunFormalParam, \
+ "Arrow function may not have duplicate parameter names") \
+ T(DuplicateConstructor, "A class may only have one constructor") \
+ T(DuplicateExport, "Duplicate export of '%'") \
+ T(DuplicateProto, \
+ "Duplicate __proto__ fields are not allowed in object literals") \
+ T(ForInLoopInitializer, \
+ "for-in loop variable declaration may not have an initializer.") \
+ T(ForInOfLoopMultiBindings, \
+ "Invalid left-hand side in % loop: Must have a single binding.") \
+ T(ForOfLoopInitializer, \
+ "for-of loop variable declaration may not have an initializer.") \
+ T(IllegalAccess, "Illegal access") \
+ T(IllegalBreak, "Illegal break statement") \
+ T(IllegalContinue, "Illegal continue statement") \
+ T(IllegalReturn, "Illegal return statement") \
+ T(InvalidLhsInAssignment, "Invalid left-hand side in assignment") \
+ T(InvalidLhsInFor, "Invalid left-hand side in for-loop") \
+ T(InvalidLhsInPostfixOp, \
+ "Invalid left-hand side expression in postfix operation") \
+ T(InvalidLhsInPrefixOp, \
+ "Invalid left-hand side expression in prefix operation") \
+ T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
+ T(LabelRedeclaration, "Label '%' has already been declared") \
+ T(MalformedArrowFunParamList, "Malformed arrow function parameter list") \
+ T(MalformedRegExp, "Invalid regular expression: /%/: %") \
+ T(MalformedRegExpFlags, "Invalid regular expression flags") \
+ T(MissingArrow, \
+ "Expected () to start arrow function, but got '%' instead of '=>'") \
+ T(ModuleExportUndefined, "Export '%' is not defined in module") \
+ T(MultipleDefaultsInSwitch, \
+ "More than one default clause in switch statement") \
+ T(NewlineAfterThrow, "Illegal newline after throw") \
+ T(NoCatchOrFinally, "Missing catch or finally after try") \
+ T(NotIsvar, "builtin %%IS_VAR: not a variable") \
+ T(ParamAfterRest, "Rest parameter must be last formal parameter") \
+ T(BadSetterRestParameter, \
+ "Setter function argument must not be a rest parameter") \
T(ParenthesisInArgString, "Function arg string contains parenthesis") \
+ T(SingleFunctionLiteral, "Single function literal required") \
+ T(SloppyLexical, \
+ "Block-scoped declarations (let, const, function, class) not yet " \
+ "supported outside strict mode") \
+ T(StrictDelete, "Delete of an unqualified identifier in strict mode.") \
+ T(StrictEvalArguments, "Unexpected eval or arguments in strict mode") \
+ T(StrictFunction, \
+ "In strict mode code, functions can only be declared at top level or " \
+ "immediately within another function.") \
+ T(StrictOctalLiteral, "Octal literals are not allowed in strict mode.") \
+ T(StrictParamDupe, \
+ "Strict mode function may not have duplicate parameter names") \
+ T(StrictWith, "Strict mode code may not include a with statement") \
+ T(StrongArguments, \
+ "In strong mode, 'arguments' is deprecated, use '...args' instead") \
+ T(StrongConstructorReturnMisplaced, \
+ "In strong mode, returning from a constructor before its super " \
+ "constructor invocation or all assignments to 'this' is deprecated") \
+ T(StrongConstructorReturnValue, \
+ "In strong mode, returning a value from a constructor is deprecated") \
+ T(StrongConstructorSuper, \
+ "In strong mode, 'super' can only be used to invoke the super " \
+ "constructor, and cannot be nested inside another statement or " \
+ "expression") \
+ T(StrongConstructorThis, \
+ "In strong mode, 'this' can only be used to initialize properties, and " \
+ "cannot be nested inside another statement or expression") \
+ T(StrongDelete, \
+ "In strong mode, 'delete' is deprecated, use maps or sets instead") \
+ T(StrongDirectEval, "In strong mode, direct calls to eval are deprecated") \
+ T(StrongEllision, \
+ "In strong mode, arrays with holes are deprecated, use maps instead") \
+ T(StrongEmpty, \
+ "In strong mode, empty sub-statements are deprecated, make them explicit " \
+ "with '{}' instead") \
+ T(StrongEqual, \
+ "In strong mode, '==' and '!=' are deprecated, use '===' and '!==' " \
+ "instead") \
+ T(StrongForIn, \
+ "In strong mode, 'for'-'in' loops are deprecated, use 'for'-'of' instead") \
+ T(StrongPropertyAccess, \
+ "In strong mode, accessing missing property '%' of % is deprecated") \
+ T(StrongSuperCallDuplicate, \
+ "In strong mode, invoking the super constructor multiple times is " \
+ "deprecated") \
+ T(StrongSuperCallMisplaced, \
+ "In strong mode, the super constructor must be invoked before any " \
+ "assignment to 'this'") \
+ T(StrongSwitchFallthrough, \
+ "In strong mode, switch fall-through is deprecated, terminate each case " \
+ "with 'break', 'continue', 'return' or 'throw'") \
+ T(StrongUndefined, \
+ "In strong mode, binding or assigning to 'undefined' is deprecated") \
+ T(StrongUseBeforeDeclaration, \
+ "In strong mode, declaring variable '%' before its use is required") \
+ T(StrongVar, \
+ "In strong mode, 'var' is deprecated, use 'let' or 'const' instead") \
+ T(TemplateOctalLiteral, \
+ "Octal literals are not allowed in template strings.") \
+ T(ThisFormalParameter, "'this' is not a valid formal parameter name") \
+ T(TooManyArguments, \
+ "Too many arguments in function call (only 65535 allowed)") \
+ T(TooManyParameters, \
+ "Too many parameters in function definition (only 65535 allowed)") \
+ T(TooManyVariables, "Too many variables declared (only 4194303 allowed)") \
+ T(UnexpectedEOS, "Unexpected end of input") \
+ T(UnexpectedReserved, "Unexpected reserved word") \
+ T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
+ T(UnexpectedSuper, "'super' keyword unexpected here") \
+ T(UnexpectedNewTarget, "new.target expression is not allowed here") \
+ T(UnexpectedTemplateString, "Unexpected template string") \
+ T(UnexpectedToken, "Unexpected token %") \
+ T(UnexpectedTokenIdentifier, "Unexpected identifier") \
+ T(UnexpectedTokenNumber, "Unexpected number") \
+ T(UnexpectedTokenString, "Unexpected string") \
+ T(UnknownLabel, "Undefined label '%'") \
+ T(UnterminatedArgList, "missing ) after argument list") \
+ T(UnterminatedRegExp, "Invalid regular expression: missing /") \
+ T(UnterminatedTemplate, "Unterminated template literal") \
+ T(UnterminatedTemplateExpr, "Missing } in template expression") \
/* EvalError */ \
T(CodeGenFromStrings, "%") \
/* URIError */ \
@@ -274,6 +433,30 @@ class MessageTemplate {
Handle<String> arg0,
Handle<String> arg1,
Handle<String> arg2);
+
+ static Handle<String> FormatMessage(Isolate* isolate, int template_index,
+ Handle<Object> arg);
+};
+
+
+// A message handler is a convenience interface for accessing the list
+// of message listeners registered in an environment
+class MessageHandler {
+ public:
+ // Returns a message object for the API to use.
+ static Handle<JSMessageObject> MakeMessageObject(
+ Isolate* isolate, MessageTemplate::Template type, MessageLocation* loc,
+ Handle<Object> argument, Handle<JSArray> stack_frames);
+
+ // Report a formatted message (needs JS allocation).
+ static void ReportMessage(Isolate* isolate, MessageLocation* loc,
+ Handle<JSMessageObject> message);
+
+ static void DefaultMessageReport(Isolate* isolate, const MessageLocation* loc,
+ Handle<Object> message_obj);
+ static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data);
+ static SmartArrayPointer<char> GetLocalizedMessage(Isolate* isolate,
+ Handle<Object> data);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 20396f8b5a..d7ca7cd647 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -5,11 +5,11 @@
// -------------------------------------------------------------------
var $errorToString;
-var $formatMessage;
var $getStackTraceLine;
var $messageGetPositionInLine;
var $messageGetLineNumber;
var $messageGetSourceLine;
+var $noSideEffectToString;
var $stackOverflowBoilerplate;
var $stackTraceSymbol;
var $toDetailString;
@@ -27,15 +27,34 @@ var MakeReferenceError;
var MakeSyntaxError;
var MakeTypeError;
var MakeURIError;
-var MakeReferenceErrorEmbedded;
-var MakeSyntaxErrorEmbedded;
-var MakeTypeErrorEmbedded;
-(function(global, shared, exports) {
+(function(global, utils) {
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalObject = global.Object;
+var InternalArray = utils.InternalArray;
+var ObjectDefineProperty = utils.ObjectDefineProperty;
+
+var ArrayJoin;
+var ObjectToString;
+var StringCharAt;
+var StringIndexOf;
+var StringSubstring;
+
+utils.Import(function(from) {
+ ArrayJoin = from.ArrayJoin;
+ ObjectToString = from.ObjectToString;
+ StringCharAt = from.StringCharAt;
+ StringIndexOf = from.StringIndexOf;
+ StringSubstring = from.StringSubstring;
+});
+
+// -------------------------------------------------------------------
+
var GlobalError;
var GlobalTypeError;
var GlobalRangeError;
@@ -44,156 +63,6 @@ var GlobalSyntaxError;
var GlobalReferenceError;
var GlobalEvalError;
-// -------------------------------------------------------------------
-
-var kMessages = {
- // Error
- constructor_is_generator: ["Class constructor may not be a generator"],
- constructor_is_accessor: ["Class constructor may not be an accessor"],
- // TypeError
- unexpected_token: ["Unexpected token ", "%0"],
- unexpected_token_number: ["Unexpected number"],
- unexpected_token_string: ["Unexpected string"],
- unexpected_token_identifier: ["Unexpected identifier"],
- unexpected_reserved: ["Unexpected reserved word"],
- unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
- unexpected_eos: ["Unexpected end of input"],
- unexpected_template_string: ["Unexpected template string"],
- malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
- malformed_regexp_flags: ["Invalid regular expression flags"],
- unterminated_regexp: ["Invalid regular expression: missing /"],
- unterminated_template: ["Unterminated template literal"],
- unterminated_template_expr: ["Missing } in template expression"],
- unterminated_arg_list: ["missing ) after argument list"],
- regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
- multiple_defaults_in_switch: ["More than one default clause in switch statement"],
- newline_after_throw: ["Illegal newline after throw"],
- label_redeclaration: ["Label '", "%0", "' has already been declared"],
- var_redeclaration: ["Identifier '", "%0", "' has already been declared"],
- duplicate_template_property: ["Object template has duplicate property '", "%0", "'"],
- no_catch_or_finally: ["Missing catch or finally after try"],
- unknown_label: ["Undefined label '", "%0", "'"],
- uncaught_exception: ["Uncaught ", "%0"],
- undefined_method: ["Object ", "%1", " has no method '", "%0", "'"],
- non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
- non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
- illegal_invocation: ["Illegal invocation"],
- no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
- value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
- proto_object_or_null: ["Object prototype may only be an Object or null: ", "%0"],
- non_extensible_proto: ["%0", " is not extensible"],
- invalid_weakmap_key: ["Invalid value used as weak map key"],
- invalid_weakset_value: ["Invalid value used in weak set"],
- not_date_object: ["this is not a Date object."],
- not_a_symbol: ["%0", " is not a symbol"],
- // ReferenceError
- invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
- invalid_lhs_in_for: ["Invalid left-hand side in for-loop"],
- invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
- invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
- // SyntaxError
- not_isvar: ["builtin %IS_VAR: not a variable"],
- single_function_literal: ["Single function literal required"],
- invalid_regexp_flags: ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
- invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
- illegal_break: ["Illegal break statement"],
- illegal_continue: ["Illegal continue statement"],
- illegal_return: ["Illegal return statement"],
- error_loading_debugger: ["Error loading debugger"],
- circular_structure: ["Converting circular structure to JSON"],
- array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
- object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
- illegal_access: ["Illegal access"],
- static_prototype: ["Classes may not have static property named prototype"],
- strict_mode_with: ["Strict mode code may not include a with statement"],
- strict_eval_arguments: ["Unexpected eval or arguments in strict mode"],
- too_many_arguments: ["Too many arguments in function call (only 65535 allowed)"],
- too_many_parameters: ["Too many parameters in function definition (only 65535 allowed)"],
- too_many_variables: ["Too many variables declared (only 4194303 allowed)"],
- strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
- strict_octal_literal: ["Octal literals are not allowed in strict mode."],
- template_octal_literal: ["Octal literals are not allowed in template strings."],
- strict_delete: ["Delete of an unqualified identifier in strict mode."],
- strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
- strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
- strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
- strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
- restricted_function_properties: ["'caller' and 'arguments' are restricted function properties and cannot be accessed in this context."],
- strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
- strict_caller: ["Illegal access to a strict mode caller function."],
- strong_ellision: ["In strong mode, arrays with holes are deprecated, use maps instead"],
- strong_arguments: ["In strong mode, 'arguments' is deprecated, use '...args' instead"],
- strong_undefined: ["In strong mode, binding or assigning to 'undefined' is deprecated"],
- strong_implicit_cast: ["In strong mode, implicit conversions are deprecated"],
- strong_direct_eval: ["In strong mode, direct calls to eval are deprecated"],
- strong_switch_fallthrough : ["In strong mode, switch fall-through is deprecated, terminate each case with 'break', 'continue', 'return' or 'throw'"],
- strong_equal: ["In strong mode, '==' and '!=' are deprecated, use '===' and '!==' instead"],
- strong_delete: ["In strong mode, 'delete' is deprecated, use maps or sets instead"],
- strong_var: ["In strong mode, 'var' is deprecated, use 'let' or 'const' instead"],
- strong_for_in: ["In strong mode, 'for'-'in' loops are deprecated, use 'for'-'of' instead"],
- strong_empty: ["In strong mode, empty sub-statements are deprecated, make them explicit with '{}' instead"],
- strong_use_before_declaration: ["In strong mode, declaring variable '", "%0", "' before its use is required"],
- strong_unbound_global: ["In strong mode, using an undeclared global variable '", "%0", "' is not allowed"],
- strong_super_call_missing: ["In strong mode, invoking the super constructor in a subclass is required"],
- strong_super_call_duplicate: ["In strong mode, invoking the super constructor multiple times is deprecated"],
- strong_super_call_misplaced: ["In strong mode, the super constructor must be invoked before any assignment to 'this'"],
- strong_constructor_super: ["In strong mode, 'super' can only be used to invoke the super constructor, and cannot be nested inside another statement or expression"],
- strong_constructor_this: ["In strong mode, 'this' can only be used to initialize properties, and cannot be nested inside another statement or expression"],
- strong_constructor_return_value: ["In strong mode, returning a value from a constructor is deprecated"],
- strong_constructor_return_misplaced: ["In strong mode, returning from a constructor before its super constructor invocation or all assignments to 'this' is deprecated"],
- sloppy_lexical: ["Block-scoped declarations (let, const, function, class) not yet supported outside strict mode"],
- malformed_arrow_function_parameter_list: ["Malformed arrow function parameter list"],
- cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
- redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
- const_assign: ["Assignment to constant variable."],
- module_export_undefined: ["Export '", "%0", "' is not defined in module"],
- duplicate_export: ["Duplicate export of '", "%0", "'"],
- unexpected_super: ["'super' keyword unexpected here"],
- extends_value_not_a_function: ["Class extends value ", "%0", " is not a function or null"],
- extends_value_generator: ["Class extends value ", "%0", " may not be a generator function"],
- prototype_parent_not_an_object: ["Class extends value does not have valid prototype property ", "%0"],
- duplicate_constructor: ["A class may only have one constructor"],
- super_constructor_call: ["A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported."],
- duplicate_proto: ["Duplicate __proto__ fields are not allowed in object literals"],
- param_after_rest: ["Rest parameter must be last formal parameter"],
- derived_constructor_return: ["Derived constructors may only return object or undefined"],
- for_in_loop_initializer: ["for-in loop variable declaration may not have an initializer."],
- for_of_loop_initializer: ["for-of loop variable declaration may not have an initializer."],
- for_inof_loop_multi_bindings: ["Invalid left-hand side in ", "%0", " loop: Must have a single binding."],
- bad_getter_arity: ["Getter must not have any formal parameters."],
- bad_setter_arity: ["Setter must have exactly one formal parameter."],
- this_formal_parameter: ["'this' is not a valid formal parameter name"],
- duplicate_arrow_function_formal_parameter: ["Arrow function may not have duplicate parameter names"]
-};
-
-
-function FormatString(format, args) {
- var result = "";
- var arg_num = 0;
- for (var i = 0; i < format.length; i++) {
- var str = format[i];
- if (str.length == 2 && %_StringCharCodeAt(str, 0) == 0x25) {
- // Two-char string starts with "%".
- var arg_num = (%_StringCharCodeAt(str, 1) - 0x30) >>> 0;
- if (arg_num < 4) {
- // str is one of %0, %1, %2 or %3.
- try {
- str = NoSideEffectToString(args[arg_num]);
- } catch (e) {
- if (%IsJSModule(args[arg_num]))
- str = "module";
- else if (IS_SPEC_OBJECT(args[arg_num]))
- str = "object";
- else
- str = "#<error>";
- }
- }
- }
- result += str;
- }
- return result;
-}
-
function NoSideEffectsObjectToString() {
if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) return "[object Undefined]";
@@ -218,7 +87,7 @@ function NoSideEffectToString(obj) {
}
if (IS_SYMBOL(obj)) return %_CallFunction(obj, $symbolToString);
if (IS_OBJECT(obj)
- && %GetDataProperty(obj, "toString") === $objectToString) {
+ && %GetDataProperty(obj, "toString") === ObjectToString) {
var constructor = %GetDataProperty(obj, "constructor");
if (typeof constructor == "function") {
var constructorName = constructor.name;
@@ -270,7 +139,7 @@ function ToStringCheckErrorObject(obj) {
function ToDetailString(obj) {
- if (obj != null && IS_OBJECT(obj) && obj.toString === $objectToString) {
+ if (obj != null && IS_OBJECT(obj) && obj.toString === ObjectToString) {
var constructor = obj.constructor;
if (typeof constructor == "function") {
var constructorName = constructor.name;
@@ -303,20 +172,14 @@ function MakeGenericError(constructor, type, arg0, arg1, arg2) {
// Helper functions; called from the runtime system.
function FormatMessage(type, arg0, arg1, arg2) {
- if (IS_NUMBER(type)) {
- var arg0 = NoSideEffectToString(arg0);
- var arg1 = NoSideEffectToString(arg1);
- var arg2 = NoSideEffectToString(arg2);
- try {
- return %FormatMessageString(type, arg0, arg1, arg2);
- } catch (e) {
- return "";
- }
+ var arg0 = NoSideEffectToString(arg0);
+ var arg1 = NoSideEffectToString(arg1);
+ var arg2 = NoSideEffectToString(arg2);
+ try {
+ return %FormatMessageString(type, arg0, arg1, arg2);
+ } catch (e) {
+ return "<error>";
}
- // TODO(yangguo): remove this code path once we migrated all messages.
- var format = kMessages[type];
- if (!format) return "<unknown message " + type + ">";
- return FormatString(format, arg0);
}
@@ -394,7 +257,7 @@ function ScriptLocationFromPosition(position,
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- if (end > 0 && %_CallFunction(this.source, end - 1, $stringCharAt) == '\r') {
+ if (end > 0 && %_CallFunction(this.source, end - 1, StringCharAt) == '\r') {
end--;
}
var column = position - start;
@@ -517,7 +380,7 @@ function ScriptSourceLine(opt_line) {
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- return %_CallFunction(this.source, start, end, $stringSubstring);
+ return %_CallFunction(this.source, start, end, StringSubstring);
}
@@ -533,6 +396,16 @@ function ScriptLineCount() {
/**
+ * Returns the position of the nth line end.
+ * @return {number}
+ * Zero-based position of the nth line end in the script.
+ */
+function ScriptLineEnd(n) {
+ return this.line_ends[n];
+}
+
+
+/**
* If sourceURL comment is available returns sourceURL comment contents.
* Otherwise, script name is returned. See
* http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
@@ -548,7 +421,7 @@ function ScriptNameOrSourceURL() {
}
-$setUpLockedPrototype(Script, [
+utils.SetUpLockedPrototype(Script, [
"source",
"name",
"source_url",
@@ -563,7 +436,8 @@ $setUpLockedPrototype(Script, [
"sourceSlice", ScriptSourceSlice,
"sourceLine", ScriptSourceLine,
"lineCount", ScriptLineCount,
- "nameOrSourceURL", ScriptNameOrSourceURL
+ "nameOrSourceURL", ScriptNameOrSourceURL,
+ "lineEnd", ScriptLineEnd
]
);
@@ -608,11 +482,11 @@ function SourceLocationSourceText() {
return %_CallFunction(this.script.source,
this.start,
this.end,
- $stringSubstring);
+ StringSubstring);
}
-$setUpLockedPrototype(SourceLocation,
+utils.SetUpLockedPrototype(SourceLocation,
["script", "position", "line", "column", "start", "end"],
["sourceText", SourceLocationSourceText]
);
@@ -653,10 +527,10 @@ function SourceSliceSourceText() {
return %_CallFunction(this.script.source,
this.from_position,
this.to_position,
- $stringSubstring);
+ StringSubstring);
}
-$setUpLockedPrototype(SourceSlice,
+utils.SetUpLockedPrototype(SourceSlice,
["script", "from_line", "to_line", "from_position", "to_position"],
["sourceText", SourceSliceSourceText]
);
@@ -680,10 +554,10 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-var CallSiteReceiverKey = NEW_PRIVATE_OWN("CallSite#receiver");
-var CallSiteFunctionKey = NEW_PRIVATE_OWN("CallSite#function");
-var CallSitePositionKey = NEW_PRIVATE_OWN("CallSite#position");
-var CallSiteStrictModeKey = NEW_PRIVATE_OWN("CallSite#strict_mode");
+var CallSiteReceiverKey = NEW_PRIVATE("CallSite#receiver");
+var CallSiteFunctionKey = NEW_PRIVATE("CallSite#function");
+var CallSitePositionKey = NEW_PRIVATE("CallSite#position");
+var CallSiteStrictModeKey = NEW_PRIVATE("CallSite#strict_mode");
function CallSite(receiver, fun, pos, strict_mode) {
SET_PRIVATE(this, CallSiteReceiverKey, receiver);
@@ -828,12 +702,12 @@ function CallSiteToString() {
var methodName = this.getMethodName();
if (functionName) {
if (typeName &&
- %_CallFunction(functionName, typeName, $stringIndexOf) != 0) {
+ %_CallFunction(functionName, typeName, StringIndexOf) != 0) {
line += typeName + ".";
}
line += functionName;
if (methodName &&
- (%_CallFunction(functionName, "." + methodName, $stringIndexOf) !=
+ (%_CallFunction(functionName, "." + methodName, StringIndexOf) !=
functionName.length - methodName.length - 1)) {
line += " [as " + methodName + "]";
}
@@ -854,7 +728,7 @@ function CallSiteToString() {
return line;
}
-$setUpLockedPrototype(CallSite, ["receiver", "fun", "pos"], [
+utils.SetUpLockedPrototype(CallSite, ["receiver", "fun", "pos"], [
"getThis", CallSiteGetThis,
"getTypeName", CallSiteGetTypeName,
"isToplevel", CallSiteIsToplevel,
@@ -981,11 +855,12 @@ function FormatStackTrace(obj, raw_stack) {
}
lines.push(" at " + line);
}
- return %_CallFunction(lines, "\n", $arrayJoin);
+ return %_CallFunction(lines, "\n", ArrayJoin);
}
function GetTypeName(receiver, requireConstructor) {
+ if (IS_NULL_OR_UNDEFINED(receiver)) return null;
var constructor = receiver.constructor;
if (!constructor) {
return requireConstructor ? null :
@@ -999,7 +874,7 @@ function GetTypeName(receiver, requireConstructor) {
return constructorName;
}
-var formatted_stack_trace_symbol = NEW_PRIVATE_OWN("formatted stack trace");
+var formatted_stack_trace_symbol = NEW_PRIVATE("formatted stack trace");
// Format the stack trace if not yet done, and return it.
@@ -1041,13 +916,7 @@ var StackTraceSetter = function(v) {
// Use a dummy function since we do not actually want to capture a stack trace
// when constructing the initial Error prototytpes.
-var captureStackTrace = function captureStackTrace(obj, cons_opt) {
- // Define accessors first, as this may fail and throw.
- $objectDefineProperty(obj, 'stack', { get: StackTraceGetter,
- set: StackTraceSetter,
- configurable: true });
- %CollectStackTrace(obj, cons_opt ? cons_opt : captureStackTrace);
-}
+var captureStackTrace = function() {};
// Define special error type constructors.
@@ -1102,9 +971,6 @@ GlobalSyntaxError = DefineError(global, function SyntaxError() { });
GlobalTypeError = DefineError(global, function TypeError() { });
GlobalURIError = DefineError(global, function URIError() { });
-
-GlobalError.captureStackTrace = captureStackTrace;
-
%AddNamedProperty(GlobalError.prototype, 'message', '', DONT_ENUM);
// Global list of error objects visited during ErrorToString. This is
@@ -1169,15 +1035,15 @@ function ErrorToString() {
}
}
-$installFunctions(GlobalError.prototype, DONT_ENUM,
- ['toString', ErrorToString]);
+utils.InstallFunctions(GlobalError.prototype, DONT_ENUM,
+ ['toString', ErrorToString]);
$errorToString = ErrorToString;
-$formatMessage = FormatMessage;
$getStackTraceLine = GetStackTraceLine;
$messageGetPositionInLine = GetPositionInLine;
$messageGetLineNumber = GetLineNumber;
$messageGetSourceLine = GetSourceLine;
+$noSideEffectToString = NoSideEffectToString;
$toDetailString = ToDetailString;
$Error = GlobalError;
@@ -1216,24 +1082,22 @@ MakeURIError = function() {
return MakeGenericError(GlobalURIError, kURIMalformed);
}
-// The embedded versions are called from unoptimized code, with embedded
-// arguments. Those arguments cannot be arrays, which are context-dependent.
-MakeSyntaxErrorEmbedded = function(type, arg) {
- return MakeGenericError(GlobalSyntaxError, type, [arg]);
-}
-
-MakeReferenceErrorEmbedded = function(type, arg) {
- return MakeGenericError(GlobalReferenceError, type, [arg]);
-}
-
-MakeTypeErrorEmbedded = function(type, arg) {
- return MakeGenericError(GlobalTypeError, type, [arg]);
-}
-
-//Boilerplate for exceptions for stack overflows. Used from
-//Isolate::StackOverflow().
+// Boilerplate for exceptions for stack overflows. Used from
+// Isolate::StackOverflow().
$stackOverflowBoilerplate = MakeRangeError(kStackOverflow);
%DefineAccessorPropertyUnchecked($stackOverflowBoilerplate, 'stack',
- StackTraceGetter, StackTraceSetter, DONT_ENUM);
+ StackTraceGetter, StackTraceSetter,
+ DONT_ENUM);
-})
+// Define actual captureStackTrace function after everything has been set up.
+captureStackTrace = function captureStackTrace(obj, cons_opt) {
+ // Define accessors first, as this may fail and throw.
+ ObjectDefineProperty(obj, 'stack', { get: StackTraceGetter,
+ set: StackTraceSetter,
+ configurable: true });
+ %CollectStackTrace(obj, cons_opt ? cons_opt : captureStackTrace);
+};
+
+GlobalError.captureStackTrace = captureStackTrace;
+
+});
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 1d38f3a3dc..2d1ac059a5 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -959,6 +959,20 @@ void Assembler::GenInstrImmediate(Opcode opcode,
}
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t j) {
+ DCHECK(rs.is_valid() && (is_uint21(j)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (j & kImm21Mask);
+ emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26) {
+ DCHECK(is_int26(offset26));
+ Instr instr = opcode | (offset26 & kImm26Mask);
+ emit(instr);
+}
+
+
void Assembler::GenInstrJump(Opcode opcode,
uint32_t address) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1156,6 +1170,19 @@ void Assembler::bal(int16_t offset) {
}
+void Assembler::bc(int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrImmediate(BC, offset);
+}
+
+
+void Assembler::balc(int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BALC, offset);
+}
+
+
void Assembler::beq(Register rs, Register rt, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BEQ, rs, rt, offset);
@@ -1355,7 +1382,7 @@ void Assembler::beqc(Register rs, Register rt, int16_t offset) {
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
- Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
+ Instr instr = POP66 | (rs.code() << kRsShift) | (offset & kImm21Mask);
emit(instr);
}
@@ -1370,7 +1397,7 @@ void Assembler::bnec(Register rs, Register rt, int16_t offset) {
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
- Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
+ Instr instr = POP76 | (rs.code() << kRsShift) | offset;
emit(instr);
}
@@ -1423,29 +1450,18 @@ void Assembler::jalr(Register rs, Register rd) {
}
-void Assembler::j_or_jr(int32_t target, Register rs) {
- // Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- if (in_range) {
- j(target);
- } else {
- jr(t9);
- }
+void Assembler::jic(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = POP66 | (JIC << kRsShift) | (rt.code() << kRtShift) |
+ (offset & kImm16Mask);
+ emit(instr);
}
-void Assembler::jal_or_jalr(int32_t target, Register rs) {
- // Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
- (kImm26Bits+kImmFieldShift)) == 0;
- if (in_range) {
- jal(target);
- } else {
- jalr(t9);
- }
+void Assembler::jialc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(POP76, zero_reg, rt, offset);
}
@@ -1621,7 +1637,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
@@ -1631,7 +1647,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
@@ -1758,11 +1774,46 @@ void Assembler::lui(Register rd, int32_t j) {
void Assembler::aui(Register rs, Register rt, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
+ DCHECK(!(rs.is(zero_reg)));
DCHECK(is_uint16(j));
GenInstrImmediate(LUI, rs, rt, j);
}
+// ---------PC-Relative instructions-----------
+
+void Assembler::addiupc(Register rs, int32_t imm19) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.is_valid() && is_int19(imm19));
+ int32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
+void Assembler::lwpc(Register rs, int32_t offset19) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.is_valid() && is_int19(offset19));
+ int32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
+void Assembler::auipc(Register rs, int16_t imm16) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.is_valid() && is_int16(imm16));
+ int32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
+void Assembler::aluipc(Register rs, int16_t imm16) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.is_valid() && is_int16(imm16));
+ int32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -1904,45 +1955,12 @@ void Assembler::movf(Register rd, Register rs, uint16_t cc) {
}
-void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
-
- Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
- fs.code() << kFsShift | fd.code() << kFdShift | SEL;
- emit(instr);
-}
-
-
void Assembler::seleqz(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
}
-void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
-}
-
-
-void Assembler::selnez(Register rd, Register rs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
-}
-
-
-void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
-}
-
-
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
if (!IsMipsArchVariant(kMips32r6)) {
@@ -1970,6 +1988,12 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
}
+void Assembler::bitswap(Register rd, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
+}
+
+
void Assembler::pref(int32_t hint, const MemOperand& rs) {
DCHECK(!IsMipsArchVariant(kLoongson));
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
@@ -1979,6 +2003,14 @@ void Assembler::pref(int32_t hint, const MemOperand& rs) {
}
+void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(is_uint3(bp));
+ uint16_t sa = (ALIGN << kBp2Bits) | bp;
+ GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
+}
+
+
// --------Coprocessor-instructions----------------
// Load, store, move.
@@ -2111,10 +2143,127 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
}
+void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
+}
+
+
+void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
+}
+
+
+void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
+}
+
+
+void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ sel(S, fd, fs, ft);
+}
+
+
+void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ sel(D, fd, fs, ft);
+}
+
+
+void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
+}
+
+
+void Assembler::selnez(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
+}
+
+
+void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
+}
+
+
+void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ seleqz(D, fd, fs, ft);
+}
+
+
+void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ seleqz(S, fd, fs, ft);
+}
+
+
+void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ selnez(D, fd, fs, ft);
+}
+
+
+void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ selnez(S, fd, fs, ft);
+}
+
+
+void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
+}
+
+
+void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
+}
+
+
+void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
+}
+
+
// Arithmetic.
void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
+ GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
}
@@ -2124,7 +2273,7 @@ void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
+ GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
}
@@ -2134,7 +2283,7 @@ void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
+ GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
}
@@ -2151,7 +2300,7 @@ void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
- GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
+ GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
}
@@ -2161,7 +2310,7 @@ void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::abs_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
+ GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
}
@@ -2171,12 +2320,17 @@ void Assembler::abs_d(FPURegister fd, FPURegister fs) {
void Assembler::mov_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
+ GenInstrRegister(COP1, D, f0, fs, fd, MOV_S);
+}
+
+
+void Assembler::mov_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, MOV_D);
}
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
+ GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
}
@@ -2186,7 +2340,7 @@ void Assembler::neg_d(FPURegister fd, FPURegister fs) {
void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
+ GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
}
@@ -2195,6 +2349,30 @@ void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
}
+void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
+}
+
+
+void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
+}
+
+
+void Assembler::recip_d(FPURegister fd, FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
+}
+
+
+void Assembler::recip_s(FPURegister fd, FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
+}
+
+
// Conversions.
void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
@@ -2252,6 +2430,7 @@ void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
}
@@ -2260,25 +2439,25 @@ void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
@@ -2313,6 +2492,18 @@ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
}
+void Assembler::class_s(FPURegister fd, FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
+}
+
+
+void Assembler::class_d(FPURegister fd, FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
+}
+
+
void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
@@ -2391,7 +2582,7 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -2407,7 +2598,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -2428,6 +2619,17 @@ void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
}
+void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ cmp(cond, W, fd, fs, ft);
+}
+
+void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ cmp(cond, L, fd, fs, ft);
+}
+
+
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
@@ -2446,6 +2648,7 @@ void Assembler::bc1nez(int16_t offset, FPURegister ft) {
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(is_uint3(cc));
+ DCHECK(fmt == S || fmt == D);
DCHECK((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
| cc << 8 | 3 << 4 | cond;
@@ -2453,6 +2656,18 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
}
+void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
+ uint16_t cc) {
+ c(cond, S, fs, ft, cc);
+}
+
+
+void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
+ uint16_t cc) {
+ c(cond, D, fs, ft, cc);
+}
+
+
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
DCHECK(src2 == 0.0);
@@ -2578,6 +2793,13 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dq(uint64_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
+}
+
+
void Assembler::dd(Label* label) {
CheckBuffer();
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
@@ -2875,20 +3097,7 @@ void Assembler::JumpToJumpRegister(Address pc) {
}
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return isolate->factory()->empty_constant_pool_array();
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return;
-}
-
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 69201dc32c..a44a16837b 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -500,19 +500,16 @@ class Assembler : public AssemblerBase {
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
// On MIPS there is no Constant Pool so we skip that parameter.
- INLINE(static Address target_address_at(Address pc,
- ConstantPoolArray* constant_pool)) {
+ INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
}
- INLINE(static void set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED)) {
+ INLINE(static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
set_target_address_at(pc, target, icache_flush_mode);
}
INLINE(static Address target_address_at(Address pc, Code* code)) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(Address pc,
@@ -520,7 +517,7 @@ class Assembler : public AssemblerBase {
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED)) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@@ -609,6 +606,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -645,6 +645,10 @@ class Assembler : public AssemblerBase {
void b(Label* L) { b(branch_offset(L, false)>>2); }
void bal(int16_t offset);
void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+ void bc(int32_t offset);
+ void bc(Label* L) { bc(branch_offset(L, false) >> 2); }
+ void balc(int32_t offset);
+ void balc(Label* L) { balc(branch_offset(L, false) >> 2); }
void beq(Register rs, Register rt, int16_t offset);
void beq(Register rs, Register rt, Label* L) {
@@ -753,8 +757,8 @@ class Assembler : public AssemblerBase {
void jal(int32_t target);
void jalr(Register rs, Register rd = ra);
void jr(Register target);
- void j_or_jr(int32_t target, Register rs);
- void jal_or_jalr(int32_t target, Register rs);
+ void jic(Register rt, int16_t offset);
+ void jialc(Register rt, int16_t offset);
// -------Data-processing-instructions---------
@@ -819,6 +823,14 @@ class Assembler : public AssemblerBase {
void swr(Register rd, const MemOperand& rs);
+ // ---------PC-Relative-instructions-----------
+
+ void addiupc(Register rs, int32_t imm19);
+ void lwpc(Register rs, int32_t offset19);
+ void auipc(Register rs, int16_t imm16);
+ void aluipc(Register rs, int16_t imm16);
+
+
// ----------------Prefetch--------------------
void pref(int32_t hint, const MemOperand& rs);
@@ -853,17 +865,33 @@ class Assembler : public AssemblerBase {
void movf(Register rd, Register rs, uint16_t cc = 0);
void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
+ void sel_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void sel_d(FPURegister fd, FPURegister fs, FPURegister ft);
void seleqz(Register rd, Register rs, Register rt);
void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft);
void selnez(Register rd, Register rs, Register rt);
void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft);
-
+ void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft);
+
+ void movz_s(FPURegister fd, FPURegister fs, Register rt);
+ void movz_d(FPURegister fd, FPURegister fs, Register rt);
+ void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movn_s(FPURegister fd, FPURegister fs, Register rt);
+ void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void bitswap(Register rd, Register rt);
+ void align(Register rd, Register rs, Register rt, uint8_t bp);
// --------Coprocessor-instructions----------------
@@ -896,10 +924,15 @@ class Assembler : public AssemblerBase {
void abs_s(FPURegister fd, FPURegister fs);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
+ void mov_s(FPURegister fd, FPURegister fs);
void neg_s(FPURegister fd, FPURegister fs);
void neg_d(FPURegister fd, FPURegister fs);
void sqrt_s(FPURegister fd, FPURegister fs);
void sqrt_d(FPURegister fd, FPURegister fs);
+ void rsqrt_s(FPURegister fd, FPURegister fs);
+ void rsqrt_d(FPURegister fd, FPURegister fs);
+ void recip_d(FPURegister fd, FPURegister fs);
+ void recip_s(FPURegister fd, FPURegister fs);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
@@ -927,6 +960,9 @@ class Assembler : public AssemblerBase {
void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs);
+ void class_s(FPURegister fd, FPURegister fs);
+ void class_d(FPURegister fd, FPURegister fs);
+
void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
@@ -951,6 +987,8 @@ class Assembler : public AssemblerBase {
// Conditions and branches for MIPSr6.
void cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister ft, FPURegister fs);
+ void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
+ void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void bc1eqz(int16_t offset, FPURegister ft);
void bc1eqz(Label* L, FPURegister ft) {
@@ -964,6 +1002,8 @@ class Assembler : public AssemblerBase {
// Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
@@ -1054,6 +1094,8 @@ class Assembler : public AssemblerBase {
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dq(uint64_t data);
+ void dp(uintptr_t data) { dd(data); }
void dd(Label* label);
// Emits the address of the code stub's first instruction.
@@ -1138,11 +1180,12 @@ class Assembler : public AssemblerBase {
void CheckTrampolinePool();
- // Allocate a constant pool of the correct size for the generated code.
- Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
-
- // Generate the constant pool for the generated code.
- void PopulateConstantPool(ConstantPoolArray* constant_pool);
+ void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+ }
protected:
// Relocation for a type-recording IC has the AST id added to it. This
@@ -1317,6 +1360,8 @@ class Assembler : public AssemblerBase {
Register r1,
FPURegister r2,
int32_t j);
+ void GenInstrImmediate(Opcode opcode, Register rs, int32_t j);
+ void GenInstrImmediate(Opcode opcode, int32_t offset26);
void GenInstrJump(Opcode opcode,
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 90c5cd602b..b5a67b47dc 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -348,6 +348,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
+ bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -379,9 +380,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(a2);
}
- // Preserve the two incoming parameters on the stack.
- __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
- __ MultiPushReversed(a0.bit() | a1.bit());
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ if (use_new_target) {
+ __ Push(a0, a1, a3);
+ } else {
+ __ Push(a0, a1);
+ }
Label rt_call, allocated, normal_new, count_incremented;
__ Branch(&normal_new, eq, a1, Operand(a3));
@@ -452,7 +457,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// a1: constructor function
// a2: initial map
- // a3: object size (not including memento if create_memento)
+ // a3: object size (including memento if create_memento)
// t4: JSObject (not tagged)
__ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
__ mov(t5, t4);
@@ -532,7 +537,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Addu(t4, t4, Operand(kHeapObjectTag));
// Check if a non-empty properties array is needed. Continue with
- // allocated object if not fall through to runtime call if it is.
+ // allocated object if not; allocate and initialize a FixedArray if yes.
// a1: constructor function
// t4: JSObject
// t5: start of next object (not tagged)
@@ -568,7 +573,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor
// a3: number of elements in properties array (untagged)
// t4: JSObject
- // t5: start of next object
+ // t5: start of FixedArray (untagged)
__ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
__ mov(a2, t5);
__ sw(t6, MemOperand(a2, JSObject::kMapOffset));
@@ -588,20 +593,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sll(t3, a3, kPointerSizeLog2);
__ addu(t6, a2, t3); // End of object.
DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (!is_api_function || create_memento) {
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t2));
- }
- __ jmp(&entry);
- __ bind(&loop);
- __ sw(t7, MemOperand(a2));
- __ addiu(a2, a2, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, less, a2, Operand(t6));
+ if (!is_api_function || create_memento) {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t2));
}
+ __ InitializeFieldsWithFiller(a2, t6, t7);
// Store the initialized FixedArray into the properties field of
// the JSObject.
@@ -635,7 +633,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated);
if (create_memento) {
- __ lw(a2, MemOperand(sp, kPointerSize * 2));
+ int offset = (use_new_target ? 3 : 2) * kPointerSize;
+ __ lw(a2, MemOperand(sp, offset));
__ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
__ Branch(&count_incremented, eq, a2, Operand(t5));
// a2 is an AllocationSite. We are creating a memento from it, so we
@@ -648,22 +647,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
- __ Push(t4, t4);
+ // Restore the parameters.
+ if (use_new_target) {
+ __ Pop(a3); // new.target
+ }
+ __ Pop(a1);
- // Reload the number of arguments from the stack.
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 2 * kPointerSize));
- __ lw(a3, MemOperand(sp, 3 * kPointerSize));
+ // Retrieve smi-tagged arguments count from the stack.
+ __ lw(a0, MemOperand(sp));
+ __ SmiUntag(a0);
+
+ if (use_new_target) {
+ __ Push(a3, t4, t4);
+ } else {
+ __ Push(t4, t4);
+ }
// Set up pointer to last argument.
__ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Set up number of arguments for function call below.
- __ srl(a0, a3, kSmiTagSize);
-
// Copy arguments and receiver to the expression stack.
// a0: number of arguments
// a1: constructor function
@@ -671,9 +673,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: new.target (if used)
+ // sp[2/3]: number of arguments (smi-tagged)
Label loop, entry;
+ __ SmiTag(a3, a0);
__ jmp(&entry);
__ bind(&loop);
__ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -698,7 +701,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ // TODO(arv): Remove the "!use_new_target" before supporting optimization
+ // of functions that reference new.target
+ if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -713,8 +718,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// v0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
__ JumpIfSmi(v0, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -732,9 +737,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// v0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
+ int offset = (use_new_target ? 2 : 1) * kPointerSize;
+ __ lw(a1, MemOperand(sp, offset));
// Leave construct frame.
}
@@ -748,12 +754,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@@ -806,8 +817,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ Subu(t0, t0, Operand(2));
__ Branch(&loop, ge, t0, Operand(zero_reg));
- __ Addu(a0, a0, Operand(1));
-
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@@ -830,9 +839,10 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// Restore context from the frame.
// v0: result
- // sp[0]: number of arguments (smi-tagged)
+ // sp[0]: new.target
+ // sp[1]: number of arguments (smi-tagged)
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ lw(a1, MemOperand(sp, 0));
+ __ lw(a1, MemOperand(sp, kPointerSize));
// Leave construct frame.
}
@@ -1406,6 +1416,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
__ lw(key, MemOperand(fp, indexOffset));
__ Branch(&entry);
@@ -1415,7 +1427,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ lw(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
+ FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
+ Handle<TypeFeedbackVector> feedback_vector =
+ masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
+ int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ li(slot, Operand(Smi::FromInt(index)));
+ __ li(vector, feedback_vector);
+ Handle<Code> ic =
+ KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
__ push(v0);
@@ -1742,6 +1761,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
+
+ // If the function is strong we need to throw an error.
+ Label no_strong_error;
+ __ lw(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t2, FieldMemOperand(t1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t3, t2, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
+ kSmiTagSize)));
+ __ Branch(&no_strong_error, eq, t3, Operand(zero_reg));
+
+ // What we really care about is the required number of arguments.
+ __ lw(t2, FieldMemOperand(t1, SharedFunctionInfo::kLengthOffset));
+ __ SmiUntag(t2);
+ __ Branch(&no_strong_error, ge, a0, Operand(t2));
+
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ }
+
+ __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into a0 and copy end address is fp.
@@ -1819,6 +1859,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index e1cf6d6641..04aa17dfe2 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -93,9 +93,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc);
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cc, Strength strength);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -113,17 +112,17 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ a0.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments, adjust sp.
__ Subu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
// Store argument to stack.
- __ sw(descriptor.GetEnvironmentParameterRegister(i),
+ __ sw(descriptor.GetRegisterParameter(i),
MemOperand(sp, (param_count - 1 - i) * kPointerSize));
}
__ CallExternalReference(miss, param_count);
@@ -276,9 +275,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc) {
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cc, Strength strength) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t5;
@@ -293,14 +291,31 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Smis. If it's not a heap number, then return equal.
__ GetObjectType(a0, t4, t4);
if (cc == less || cc == greater) {
+ // Call runtime on identical JSObjects.
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics, since
+ // we need to throw a TypeError. Smis have already been ruled out.
+ __ Branch(&return_equal, eq, t4, Operand(HEAP_NUMBER_TYPE));
+ __ And(t4, t4, Operand(kIsNotStringMask));
+ __ Branch(slow, ne, t4, Operand(zero_reg));
+ }
} else {
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics,
+ // since we need to throw a TypeError. Smis and heap numbers have
+ // already been ruled out.
+ __ And(t4, t4, Operand(kIsNotStringMask));
+ __ Branch(slow, ne, t4, Operand(zero_reg));
+ }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -585,7 +600,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
+ EmitIdenticalObjectComparison(masm, &slow, cc, strength());
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -713,7 +728,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
- native = Builtins::COMPARE;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result.
if (cc == lt || cc == le) {
ncr = GREATER;
@@ -1371,8 +1387,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register scratch = t1;
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!FLAG_vector_ics ||
- !scratch.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
@@ -1596,9 +1611,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(t0, t1, VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister()));
+ DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
t1, &miss);
@@ -1609,7 +1623,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@@ -1669,8 +1682,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[4] : receiver displacement
// sp[8] : function
- CHECK(!has_new_target());
-
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1702,8 +1713,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// t2 : allocated object (tagged)
// t5 : mapped parameter count (tagged)
- CHECK(!has_new_target());
-
__ lw(a1, MemOperand(sp, 0 * kPointerSize));
// a1 = parameter count (tagged)
@@ -1770,7 +1779,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kNormalOffset =
Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
const int kAliasedOffset =
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
@@ -1961,13 +1970,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- if (has_new_target()) {
- Label skip_decrement;
- __ Branch(&skip_decrement, eq, a1, Operand(Smi::FromInt(0)));
- // Subtract 1 from smi-tagged arguments count.
- __ Subu(a1, a1, Operand(2));
- __ bind(&skip_decrement);
- }
__ sw(a1, MemOperand(sp, 0));
__ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, a2, Operand(at));
@@ -2050,9 +2052,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // sp[0] : index of rest parameter
- // sp[4] : number of parameters
- // sp[8] : receiver displacement
+ // sp[0] : language mode
+ // sp[4] : index of rest parameter
+ // sp[8] : number of parameters
+ // sp[12] : receiver displacement
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -2063,16 +2066,16 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ sw(a1, MemOperand(sp, 2 * kPointerSize));
__ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, a2, Operand(at));
__ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sw(a3, MemOperand(sp, 2 * kPointerSize));
+ __ sw(a3, MemOperand(sp, 3 * kPointerSize));
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+ __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@@ -2839,6 +2842,13 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, t1, Operand(at));
+ // Increment the call count for monomorphic function calls.
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(at, a2, Operand(at));
+ __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+ __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+
__ mov(a2, t0);
__ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
@@ -2898,6 +2908,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(a1, &extra_checks_or_miss);
+ // Increment the call count for monomorphic function calls.
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(at, a2, Operand(at));
+ __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+ __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2973,6 +2990,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
__ sw(t0, FieldMemOperand(a2, with_types_offset));
+ // Initialize the call counter.
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(at, a2, Operand(at));
+ __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+
// Store the function. Use a stub since we need a frame for allocation.
// a2 - vector
// a3 - slot
@@ -3077,9 +3100,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
// Consumed by runtime conversion function:
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Push(VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else {
__ Push(object_, index_);
}
@@ -3094,9 +3117,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, v0);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Pop(VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister(), object_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_);
} else {
__ pop(object_);
}
@@ -3743,7 +3766,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -4569,15 +4592,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawLoadStub stub(isolate(), state());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawKeyedLoadStub stub(isolate());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4596,12 +4619,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
-void VectorRawLoadStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4696,11 +4717,11 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
-void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
- Register name = VectorLoadICDescriptor::NameRegister(); // a2
- Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
- Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
+ Register name = LoadWithVectorDescriptor::NameRegister(); // a2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
Register feedback = t0;
Register receiver_map = t1;
Register scratch1 = t4;
@@ -4743,21 +4764,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
-void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
- Register key = VectorLoadICDescriptor::NameRegister(); // a2
- Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
- Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
+ Register key = LoadWithVectorDescriptor::NameRegister(); // a2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
Register feedback = t0;
Register receiver_map = t1;
Register scratch1 = t4;
@@ -4792,7 +4813,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&try_poly_name, ne, at, Operand(feedback));
Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
@@ -4816,6 +4837,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ StoreIC::GenerateMiss(masm);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -5504,6 +5577,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index a04b8cb0e7..23780d8b25 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -1262,6 +1262,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 7ecfe74295..0ef64f508d 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -141,6 +141,8 @@ bool Instruction::IsForbiddenInBranchDelay() const {
case BNEL:
case BLEZL:
case BGTZL:
+ case BC:
+ case BALC:
return true;
case REGIMM:
switch (RtFieldRaw()) {
@@ -173,6 +175,11 @@ bool Instruction::IsLinkingInstruction() const {
switch (op) {
case JAL:
return true;
+ case POP76:
+ if (RsFieldRawNoAssert() == JIALC)
+ return true; // JIALC
+ else
+ return false; // BNEZC
case REGIMM:
switch (RtFieldRaw()) {
case BGEZAL:
@@ -273,6 +280,24 @@ Instruction::Type Instruction::InstructionType() const {
case INS:
case EXT:
return kRegisterType;
+ case BSHFL: {
+ int sa = SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP:
+ return kRegisterType;
+ case WSBH:
+ case SEB:
+ case SEH:
+ return kUnsupported;
+ }
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ }
default:
return kUnsupported;
}
@@ -308,8 +333,8 @@ Instruction::Type Instruction::InstructionType() const {
case BNEL:
case BLEZL:
case BGTZL:
- case BEQZC:
- case BNEZC:
+ case POP66:
+ case POP76:
case LB:
case LH:
case LWL:
@@ -326,6 +351,9 @@ Instruction::Type Instruction::InstructionType() const {
case LDC1:
case SWC1:
case SDC1:
+ case PCREL:
+ case BC:
+ case BALC:
return kImmediateType;
// 26 bits immediate type instructions. e.g.: j imm26.
case J:
@@ -338,6 +366,7 @@ Instruction::Type Instruction::InstructionType() const {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 81f755b320..37ac2336bf 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -259,9 +259,15 @@ const int kSaBits = 5;
const int kFunctionShift = 0;
const int kFunctionBits = 6;
const int kLuiShift = 16;
+const int kBp2Shift = 6;
+const int kBp2Bits = 2;
const int kImm16Shift = 0;
const int kImm16Bits = 16;
+const int kImm18Shift = 0;
+const int kImm18Bits = 18;
+const int kImm19Shift = 0;
+const int kImm19Bits = 19;
const int kImm21Shift = 0;
const int kImm21Bits = 21;
const int kImm26Shift = 0;
@@ -294,6 +300,9 @@ const int kFBtrueBits = 1;
// Instruction bit masks.
const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kImm18Mask = ((1 << kImm18Bits) - 1) << kImm18Shift;
+const int kImm19Mask = ((1 << kImm19Bits) - 1) << kImm19Shift;
+const int kImm21Mask = ((1 << kImm21Bits) - 1) << kImm21Shift;
const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
@@ -311,60 +320,63 @@ const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
// We use this presentation to stay close to the table representation in
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
enum Opcode {
- SPECIAL = 0 << kOpcodeShift,
- REGIMM = 1 << kOpcodeShift,
-
- J = ((0 << 3) + 2) << kOpcodeShift,
- JAL = ((0 << 3) + 3) << kOpcodeShift,
- BEQ = ((0 << 3) + 4) << kOpcodeShift,
- BNE = ((0 << 3) + 5) << kOpcodeShift,
- BLEZ = ((0 << 3) + 6) << kOpcodeShift,
- BGTZ = ((0 << 3) + 7) << kOpcodeShift,
-
- ADDI = ((1 << 3) + 0) << kOpcodeShift,
- ADDIU = ((1 << 3) + 1) << kOpcodeShift,
- SLTI = ((1 << 3) + 2) << kOpcodeShift,
- SLTIU = ((1 << 3) + 3) << kOpcodeShift,
- ANDI = ((1 << 3) + 4) << kOpcodeShift,
- ORI = ((1 << 3) + 5) << kOpcodeShift,
- XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
-
- BEQC = ((2 << 3) + 0) << kOpcodeShift,
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
- BEQL = ((2 << 3) + 4) << kOpcodeShift,
- BNEL = ((2 << 3) + 5) << kOpcodeShift,
- BLEZL = ((2 << 3) + 6) << kOpcodeShift,
- BGTZL = ((2 << 3) + 7) << kOpcodeShift,
-
- DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
- SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
- SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
-
- LB = ((4 << 3) + 0) << kOpcodeShift,
- LH = ((4 << 3) + 1) << kOpcodeShift,
- LWL = ((4 << 3) + 2) << kOpcodeShift,
- LW = ((4 << 3) + 3) << kOpcodeShift,
- LBU = ((4 << 3) + 4) << kOpcodeShift,
- LHU = ((4 << 3) + 5) << kOpcodeShift,
- LWR = ((4 << 3) + 6) << kOpcodeShift,
- SB = ((5 << 3) + 0) << kOpcodeShift,
- SH = ((5 << 3) + 1) << kOpcodeShift,
- SWL = ((5 << 3) + 2) << kOpcodeShift,
- SW = ((5 << 3) + 3) << kOpcodeShift,
- SWR = ((5 << 3) + 6) << kOpcodeShift,
-
- LWC1 = ((6 << 3) + 1) << kOpcodeShift,
- LDC1 = ((6 << 3) + 5) << kOpcodeShift,
- BEQZC = ((6 << 3) + 6) << kOpcodeShift,
-
- PREF = ((6 << 3) + 3) << kOpcodeShift,
-
- SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift,
- BNEZC = ((7 << 3) + 6) << kOpcodeShift,
-
- COP1X = ((1 << 4) + 3) << kOpcodeShift
+ SPECIAL = 0 << kOpcodeShift,
+ REGIMM = 1 << kOpcodeShift,
+
+ J = ((0 << 3) + 2) << kOpcodeShift,
+ JAL = ((0 << 3) + 3) << kOpcodeShift,
+ BEQ = ((0 << 3) + 4) << kOpcodeShift,
+ BNE = ((0 << 3) + 5) << kOpcodeShift,
+ BLEZ = ((0 << 3) + 6) << kOpcodeShift,
+ BGTZ = ((0 << 3) + 7) << kOpcodeShift,
+
+ ADDI = ((1 << 3) + 0) << kOpcodeShift,
+ ADDIU = ((1 << 3) + 1) << kOpcodeShift,
+ SLTI = ((1 << 3) + 2) << kOpcodeShift,
+ SLTIU = ((1 << 3) + 3) << kOpcodeShift,
+ ANDI = ((1 << 3) + 4) << kOpcodeShift,
+ ORI = ((1 << 3) + 5) << kOpcodeShift,
+ XORI = ((1 << 3) + 6) << kOpcodeShift,
+ LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
+
+ BEQC = ((2 << 3) + 0) << kOpcodeShift,
+ COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
+ BEQL = ((2 << 3) + 4) << kOpcodeShift,
+ BNEL = ((2 << 3) + 5) << kOpcodeShift,
+ BLEZL = ((2 << 3) + 6) << kOpcodeShift,
+ BGTZL = ((2 << 3) + 7) << kOpcodeShift,
+
+ DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
+ SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
+
+ LB = ((4 << 3) + 0) << kOpcodeShift,
+ LH = ((4 << 3) + 1) << kOpcodeShift,
+ LWL = ((4 << 3) + 2) << kOpcodeShift,
+ LW = ((4 << 3) + 3) << kOpcodeShift,
+ LBU = ((4 << 3) + 4) << kOpcodeShift,
+ LHU = ((4 << 3) + 5) << kOpcodeShift,
+ LWR = ((4 << 3) + 6) << kOpcodeShift,
+ SB = ((5 << 3) + 0) << kOpcodeShift,
+ SH = ((5 << 3) + 1) << kOpcodeShift,
+ SWL = ((5 << 3) + 2) << kOpcodeShift,
+ SW = ((5 << 3) + 3) << kOpcodeShift,
+ SWR = ((5 << 3) + 6) << kOpcodeShift,
+
+ LWC1 = ((6 << 3) + 1) << kOpcodeShift,
+ BC = ((6 << 3) + 2) << kOpcodeShift,
+ LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+ POP66 = ((6 << 3) + 6) << kOpcodeShift,
+
+ PREF = ((6 << 3) + 3) << kOpcodeShift,
+
+ SWC1 = ((7 << 3) + 1) << kOpcodeShift,
+ BALC = ((7 << 3) + 2) << kOpcodeShift,
+ PCREL = ((7 << 3) + 3) << kOpcodeShift,
+ SDC1 = ((7 << 3) + 5) << kOpcodeShift,
+ POP76 = ((7 << 3) + 6) << kOpcodeShift,
+
+ COP1X = ((1 << 4) + 3) << kOpcodeShift
};
enum SecondaryField {
@@ -435,6 +447,14 @@ enum SecondaryField {
// SPECIAL3 Encoding of Function Field.
EXT = ((0 << 3) + 0),
INS = ((0 << 3) + 4),
+ BSHFL = ((4 << 3) + 0),
+
+ // SPECIAL3 Encoding of sa Field.
+ BITSWAP = ((0 << 3) + 0),
+ ALIGN = ((0 << 3) + 2),
+ WSBH = ((0 << 3) + 2),
+ SEB = ((2 << 3) + 0),
+ SEH = ((3 << 3) + 0),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
@@ -457,6 +477,15 @@ enum SecondaryField {
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
+
+ ADD_S = ((0 << 3) + 0),
+ SUB_S = ((0 << 3) + 1),
+ MUL_S = ((0 << 3) + 2),
+ DIV_S = ((0 << 3) + 3),
+ ABS_S = ((0 << 3) + 5),
+ SQRT_S = ((0 << 3) + 4),
+ MOV_S = ((0 << 3) + 6),
+ NEG_S = ((0 << 3) + 7),
ROUND_L_S = ((1 << 3) + 0),
TRUNC_L_S = ((1 << 3) + 1),
CEIL_L_S = ((1 << 3) + 2),
@@ -465,10 +494,14 @@ enum SecondaryField {
TRUNC_W_S = ((1 << 3) + 5),
CEIL_W_S = ((1 << 3) + 6),
FLOOR_W_S = ((1 << 3) + 7),
+ RECIP_S = ((2 << 3) + 5),
+ RSQRT_S = ((2 << 3) + 6),
+ CLASS_S = ((3 << 3) + 3),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
CVT_PS_S = ((4 << 3) + 6),
+
// COP1 Encoding of Function Field When rs=D.
ADD_D = ((0 << 3) + 0),
SUB_D = ((0 << 3) + 1),
@@ -486,6 +519,9 @@ enum SecondaryField {
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
+ RECIP_D = ((2 << 3) + 5),
+ RSQRT_D = ((2 << 3) + 6),
+ CLASS_D = ((3 << 3) + 3),
MIN = ((3 << 3) + 4),
MINA = ((3 << 3) + 5),
MAX = ((3 << 3) + 6),
@@ -501,6 +537,7 @@ enum SecondaryField {
C_ULT_D = ((6 << 3) + 5),
C_OLE_D = ((6 << 3) + 6),
C_ULE_D = ((6 << 3) + 7),
+
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
@@ -544,12 +581,27 @@ enum SecondaryField {
CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
SEL = ((2 << 3) + 0),
+ MOVZ_C = ((2 << 3) + 2),
+ MOVN_C = ((2 << 3) + 3),
SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
+ MOVF = ((2 << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
MADD_D = ((4 << 3) + 1),
+ // PCREL Encoding of rt Field.
+ ADDIUPC = ((0 << 2) + 0),
+ LWPC = ((0 << 2) + 1),
+ AUIPC = ((3 << 3) + 6),
+ ALUIPC = ((3 << 3) + 7),
+
+ // POP66 Encoding of rs Field.
+ JIC = ((0 << 5) + 0),
+
+ // POP76 Encoding of rs Field.
+ JIALC = ((0 << 5) + 0),
+
NULLSF = 0
};
@@ -683,14 +735,21 @@ inline Condition CommuteCondition(Condition cc) {
enum FPUCondition {
kNoFPUCondition = -1,
- F = 0, // False.
- UN = 1, // Unordered.
- EQ = 2, // Equal.
- UEQ = 3, // Unordered or Equal.
- OLT = 4, // Ordered or Less Than.
- ULT = 5, // Unordered or Less Than.
- OLE = 6, // Ordered or Less Than or Equal.
- ULE = 7 // Unordered or Less Than or Equal.
+ F = 0x00, // False.
+ UN = 0x01, // Unordered.
+ EQ = 0x02, // Equal.
+ UEQ = 0x03, // Unordered or Equal.
+ OLT = 0x04, // Ordered or Less Than, on Mips release < 6.
+ LT = 0x04, // Ordered or Less Than, on Mips release >= 6.
+ ULT = 0x05, // Unordered or Less Than.
+ OLE = 0x06, // Ordered or Less Than or Equal, on Mips release < 6.
+ LE = 0x06, // Ordered or Less Than or Equal, on Mips release >= 6.
+ ULE = 0x07, // Unordered or Less Than or Equal.
+
+ // Following constants are available on Mips release >= 6 only.
+ ORD = 0x11, // Ordered, on Mips release >= 6.
+ UNE = 0x12, // Not equal, on Mips release >= 6.
+ NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only.
};
@@ -853,6 +912,11 @@ class Instruction {
return Bits(kFrShift + kFrBits -1, kFrShift);
}
+ inline int Bp2Value() const {
+ DCHECK(InstructionType() == kRegisterType);
+ return Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
+ }
+
// Float Compare condition code instruction bits.
inline int FCccValue() const {
return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
@@ -896,7 +960,6 @@ class Instruction {
}
inline int SaFieldRaw() const {
- DCHECK(InstructionType() == kRegisterType);
return InstructionBits() & kSaFieldMask;
}
@@ -925,13 +988,24 @@ class Instruction {
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
+ inline int32_t Imm18Value() const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
+ }
+
+ inline int32_t Imm19Value() const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
+ }
+
inline int32_t Imm21Value() const {
DCHECK(InstructionType() == kImmediateType);
return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
}
inline int32_t Imm26Value() const {
- DCHECK(InstructionType() == kJumpType);
+ DCHECK((InstructionType() == kJumpType) ||
+ (InstructionType() == kImmediateType));
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
}
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index f2d50650b0..acc7af28e5 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -54,6 +54,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#endif // USE_SIMULATOR.
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index a14fac8d06..896309a36e 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -150,52 +150,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- RegList regs = receiver.bit() | name.bit();
- if (FLAG_vector_ics) {
- regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
- }
- Generate_DebugBreakCallHelper(masm, regs, 0);
-}
-
-
-void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-mips.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for keyed IC load (from ic-mips.cc).
- GenerateLoadICDebugBreak(masm);
-}
-
-
-void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC keyed store call (from ic-mips.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
- // Register state for CompareNil IC
- // ----------- S t a t e -------------
- // -- a0 : value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a0.bit(), 0);
-}
-
-
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that v0 is TOS which
// is an object - this is not generally the case so this should be used with
@@ -292,6 +246,7 @@ const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 88dbcaf29c..69e8514f67 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -399,7 +399,7 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- // No out-of-line constant pool support.
+ // No embedded constant pool support.
UNREACHABLE();
}
@@ -407,4 +407,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index a6690883f0..48427c5455 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -81,13 +81,24 @@ class Decoder {
void PrintSs2(Instruction* instr);
void PrintBc(Instruction* instr);
void PrintCc(Instruction* instr);
+ void PrintBp2(Instruction* instr);
void PrintFunction(Instruction* instr);
void PrintSecondaryField(Instruction* instr);
void PrintUImm16(Instruction* instr);
void PrintSImm16(Instruction* instr);
void PrintXImm16(Instruction* instr);
+ void PrintPCImm16(Instruction* instr, int delta_pc, int n_bits);
+ void PrintXImm18(Instruction* instr);
+ void PrintSImm18(Instruction* instr);
+ void PrintXImm19(Instruction* instr);
+ void PrintSImm19(Instruction* instr);
void PrintXImm21(Instruction* instr);
+ void PrintSImm21(Instruction* instr);
+ void PrintPCImm21(Instruction* instr, int delta_pc, int n_bits);
void PrintXImm26(Instruction* instr);
+ void PrintSImm26(Instruction* instr);
+ void PrintPCImm26(Instruction* instr, int delta_pc, int n_bits);
+ void PrintPCImm26(Instruction* instr);
void PrintCode(Instruction* instr); // For break and trap instructions.
void PrintFormat(Instruction* instr); // For floating format postfix.
// Printing of instruction name.
@@ -236,6 +247,12 @@ void Decoder::PrintCc(Instruction* instr) {
}
+void Decoder::PrintBp2(Instruction* instr) {
+ int bp2 = instr->Bp2Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", bp2);
+}
+
+
// Print 16-bit unsigned immediate value.
void Decoder::PrintUImm16(Instruction* instr) {
int32_t imm = instr->Imm16Value();
@@ -257,6 +274,50 @@ void Decoder::PrintXImm16(Instruction* instr) {
}
+// Print absoulte address for 16-bit offset or immediate value.
+// The absolute address is calculated according following expression:
+// PC + delta_pc + (offset << n_bits)
+void Decoder::PrintPCImm16(Instruction* instr, int delta_pc, int n_bits) {
+ int16_t offset = instr->Imm16Value();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) +
+ delta_pc + (offset << n_bits)));
+}
+
+
+// Print 18-bit signed immediate value.
+void Decoder::PrintSImm18(Instruction* instr) {
+ int32_t imm =
+ ((instr->Imm18Value()) << (32 - kImm18Bits)) >> (32 - kImm18Bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+
+// Print 18-bit hexa immediate value.
+void Decoder::PrintXImm18(Instruction* instr) {
+ int32_t imm = instr->Imm18Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
+// Print 19-bit hexa immediate value.
+void Decoder::PrintXImm19(Instruction* instr) {
+ int32_t imm = instr->Imm19Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
+// Print 19-bit signed immediate value.
+void Decoder::PrintSImm19(Instruction* instr) {
+ int32_t imm19 = instr->Imm19Value();
+ // set sign
+ imm19 <<= (32 - kImm19Bits);
+ imm19 >>= (32 - kImm19Bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm19);
+}
+
+
// Print 21-bit immediate value.
void Decoder::PrintXImm21(Instruction* instr) {
uint32_t imm = instr->Imm21Value();
@@ -264,13 +325,76 @@ void Decoder::PrintXImm21(Instruction* instr) {
}
-// Print 26-bit immediate value.
+// Print 21-bit signed immediate value.
+void Decoder::PrintSImm21(Instruction* instr) {
+ int32_t imm21 = instr->Imm21Value();
+ // set sign
+ imm21 <<= (32 - kImm21Bits);
+ imm21 >>= (32 - kImm21Bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm21);
+}
+
+
+// Print absoulte address for 21-bit offset or immediate value.
+// The absolute address is calculated according following expression:
+// PC + delta_pc + (offset << n_bits)
+void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
+ int32_t imm21 = instr->Imm21Value();
+ // set sign
+ imm21 <<= (32 - kImm21Bits);
+ imm21 >>= (32 - kImm21Bits);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) +
+ delta_pc + (imm21 << n_bits)));
+}
+
+
+// Print 26-bit hex immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
uint32_t imm = instr->Imm26Value() << kImmFieldShift;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
+// Print 26-bit signed immediate value.
+void Decoder::PrintSImm26(Instruction* instr) {
+ int32_t imm26 = instr->Imm26Value();
+ // set sign
+ imm26 <<= (32 - kImm26Bits);
+ imm26 >>= (32 - kImm26Bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm26);
+}
+
+
+// Print absoulte address for 26-bit offset or immediate value.
+// The absolute address is calculated according following expression:
+// PC + delta_pc + (offset << n_bits)
+void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) {
+ int32_t imm26 = instr->Imm26Value();
+ // set sign
+ imm26 <<= (32 - kImm26Bits);
+ imm26 >>= (32 - kImm26Bits);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) +
+ delta_pc + (imm26 << n_bits)));
+}
+
+
+// Print absoulte address for 26-bit offset or immediate value.
+// The absolute address is calculated according following expression:
+// PC[GPRLEN-1 .. 28] || instr_index26 || 00
+void Decoder::PrintPCImm26(Instruction* instr) {
+ int32_t imm26 = instr->Imm26Value();
+ uint32_t pc_mask = ~0xfffffff;
+ uint32_t pc = ((uint32_t)(instr + 1) & pc_mask) | (imm26 << 2);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress((reinterpret_cast<byte*>(pc))));
+}
+
+
// Print 26-bit immediate value.
void Decoder::PrintCode(Instruction* instr) {
if (instr->OpcodeFieldRaw() != SPECIAL)
@@ -389,25 +513,134 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 'i': { // 'imm16u or 'imm26.
if (format[3] == '1') {
- DCHECK(STRING_STARTS_WITH(format, "imm16"));
- if (format[5] == 's') {
- DCHECK(STRING_STARTS_WITH(format, "imm16s"));
- PrintSImm16(instr);
- } else if (format[5] == 'u') {
- DCHECK(STRING_STARTS_WITH(format, "imm16u"));
- PrintSImm16(instr);
- } else {
- DCHECK(STRING_STARTS_WITH(format, "imm16x"));
- PrintXImm16(instr);
+ if (format[4] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "imm16"));
+ switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm16s"));
+ PrintSImm16(instr);
+ break;
+ case 'u':
+ DCHECK(STRING_STARTS_WITH(format, "imm16u"));
+ PrintSImm16(instr);
+ break;
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm16x"));
+ PrintXImm16(instr);
+ break;
+ case 'p': { // The PC relative address.
+ DCHECK(STRING_STARTS_WITH(format, "imm16p"));
+ int delta_pc = 0;
+ int n_bits = 0;
+ switch (format[6]) {
+ case '4': {
+ DCHECK(STRING_STARTS_WITH(format, "imm16p4"));
+ delta_pc = 4;
+ switch (format[8]) {
+ case '2':
+ DCHECK(STRING_STARTS_WITH(format, "imm16p4s2"));
+ n_bits = 2;
+ PrintPCImm16(instr, delta_pc, n_bits);
+ return 9;
+ }
+ }
+ }
+ }
+ }
+ return 6;
+ } else if (format[4] == '8') {
+ DCHECK(STRING_STARTS_WITH(format, "imm18"));
+ switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm18s"));
+ PrintSImm18(instr);
+ break;
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm18x"));
+ PrintXImm18(instr);
+ break;
+ }
+ return 6;
+ } else if (format[4] == '9') {
+ DCHECK(STRING_STARTS_WITH(format, "imm19"));
+ switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm19s"));
+ PrintSImm19(instr);
+ break;
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm19x"));
+ PrintXImm19(instr);
+ break;
+ }
+ return 6;
}
- return 6;
} else if (format[3] == '2' && format[4] == '1') {
- DCHECK(STRING_STARTS_WITH(format, "imm21x"));
- PrintXImm21(instr);
+ DCHECK(STRING_STARTS_WITH(format, "imm21"));
+ switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm21s"));
+ PrintSImm21(instr);
+ break;
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm21x"));
+ PrintXImm21(instr);
+ break;
+ case 'p': { // The PC relative address.
+ DCHECK(STRING_STARTS_WITH(format, "imm21p"));
+ int delta_pc = 0;
+ int n_bits = 0;
+ switch (format[6]) {
+ case '4': {
+ DCHECK(STRING_STARTS_WITH(format, "imm21p4"));
+ delta_pc = 4;
+ switch (format[8]) {
+ case '2':
+ DCHECK(STRING_STARTS_WITH(format, "imm21p4s2"));
+ n_bits = 2;
+ PrintPCImm21(instr, delta_pc, n_bits);
+ return 9;
+ }
+ }
+ }
+ }
+ }
return 6;
} else if (format[3] == '2' && format[4] == '6') {
- DCHECK(STRING_STARTS_WITH(format, "imm26x"));
- PrintXImm26(instr);
+ DCHECK(STRING_STARTS_WITH(format, "imm26"));
+ switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm26s"));
+ PrintSImm26(instr);
+ break;
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm26x"));
+ PrintXImm26(instr);
+ break;
+ case 'p': { // The PC relative address.
+ DCHECK(STRING_STARTS_WITH(format, "imm26p"));
+ int delta_pc = 0;
+ int n_bits = 0;
+ switch (format[6]) {
+ case '4': {
+ DCHECK(STRING_STARTS_WITH(format, "imm26p4"));
+ delta_pc = 4;
+ switch (format[8]) {
+ case '2':
+ DCHECK(STRING_STARTS_WITH(format, "imm26p4s2"));
+ n_bits = 2;
+ PrintPCImm26(instr, delta_pc, n_bits);
+ return 9;
+ }
+ }
+ }
+ }
+ case 'j': { // Absolute address for jump instructions.
+ DCHECK(STRING_STARTS_WITH(format, "imm26j"));
+ PrintPCImm26(instr);
+ break;
+ }
+ }
return 6;
}
}
@@ -442,10 +675,23 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
}
}
- case 'b': { // 'bc - Special for bc1 cc field.
- DCHECK(STRING_STARTS_WITH(format, "bc"));
- PrintBc(instr);
- return 2;
+ case 'b': {
+ switch (format[1]) {
+ case 'c': { // 'bc - Special for bc1 cc field.
+ DCHECK(STRING_STARTS_WITH(format, "bc"));
+ PrintBc(instr);
+ return 2;
+ }
+ case 'p': {
+ switch (format[2]) {
+ case '2': { // 'bp2
+ DCHECK(STRING_STARTS_WITH(format, "bp2"));
+ PrintBp2(instr);
+ return 3;
+ }
+ }
+ }
+ }
}
case 'C': { // 'Cc - Special for c.xx.d cc field.
DCHECK(STRING_STARTS_WITH(format, "Cc"));
@@ -511,6 +757,19 @@ bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
case SELNEZ_C:
Format(instr, "selnez.'t 'fd, 'fs, 'ft");
break;
+ case MOVZ_C:
+ Format(instr, "movz.'t 'fd, 'fs, 'rt");
+ break;
+ case MOVN_C:
+ Format(instr, "movn.'t 'fd, 'fs, 'rt");
+ break;
+ case MOVF:
+ if (instr->Bit(16)) {
+ Format(instr, "movt.'t 'fd, 'fs, 'Cc");
+ } else {
+ Format(instr, "movf.'t 'fd, 'fs, 'Cc");
+ }
+ break;
case ADD_D:
Format(instr, "add.'t 'fd, 'fs, 'ft");
break;
@@ -535,6 +794,12 @@ bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
case SQRT_D:
Format(instr, "sqrt.'t 'fd, 'fs");
break;
+ case RECIP_D:
+ Format(instr, "recip.'t 'fd, 'fs");
+ break;
+ case RSQRT_D:
+ Format(instr, "rsqrt.'t 'fd, 'fs");
+ break;
case CVT_W_D:
Format(instr, "cvt.w.'t 'fd, 'fs");
break;
@@ -550,12 +815,24 @@ bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
case ROUND_W_D:
Format(instr, "round.w.'t 'fd, 'fs");
break;
+ case ROUND_L_D:
+ Format(instr, "round.l.'t 'fd, 'fs");
+ break;
case FLOOR_W_D:
Format(instr, "floor.w.'t 'fd, 'fs");
break;
+ case FLOOR_L_D:
+ Format(instr, "floor.l.'t 'fd, 'fs");
+ break;
case CEIL_W_D:
Format(instr, "ceil.w.'t 'fd, 'fs");
break;
+ case CLASS_D:
+ Format(instr, "class.'t 'fd, 'fs");
+ break;
+ case CEIL_L_D:
+ Format(instr, "ceil.l.'t 'fd, 'fs");
+ break;
case CVT_S_D:
Format(instr, "cvt.s.'t 'fd, 'fs");
break;
@@ -619,6 +896,9 @@ void Decoder::DecodeTypeRegisterLRsType(Instruction* instr) {
case CVT_S_L:
Format(instr, "cvt.s.l 'fd, 'fs");
break;
+ case CMP_AF:
+ Format(instr, "cmp.af.d 'fd, 'fs, 'ft");
+ break;
case CMP_UN:
Format(instr, "cmp.un.d 'fd, 'fs, 'ft");
break;
@@ -907,7 +1187,7 @@ void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) {
void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case INS: {
- if (IsMipsArchVariant(kMips32r2)) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
} else {
Unknown(instr);
@@ -915,13 +1195,48 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
break;
}
case EXT: {
- if (IsMipsArchVariant(kMips32r2)) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
} else {
Unknown(instr);
}
break;
}
+ case BSHFL: {
+ int sa = instr->SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP: {
+ if (IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "bitswap 'rd, 'rt");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case SEB:
+ case SEH:
+ case WSBH:
+ UNREACHABLE();
+ break;
+ default: {
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN: {
+ if (IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "align 'rd, 'rs, 'rt, 'bp2");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1003,16 +1318,16 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->RsFieldRaw()) {
case BC1:
if (instr->FBtrueValue()) {
- Format(instr, "bc1t 'bc, 'imm16u");
+ Format(instr, "bc1t 'bc, 'imm16u -> 'imm16p4s2");
} else {
- Format(instr, "bc1f 'bc, 'imm16u");
+ Format(instr, "bc1f 'bc, 'imm16u -> 'imm16p4s2");
}
break;
case BC1EQZ:
- Format(instr, "bc1eqz 'ft, 'imm16u");
+ Format(instr, "bc1eqz 'ft, 'imm16u -> 'imm16p4s2");
break;
case BC1NEZ:
- Format(instr, "bc1nez 'ft, 'imm16u");
+ Format(instr, "bc1nez 'ft, 'imm16u -> 'imm16p4s2");
break;
default:
UNREACHABLE();
@@ -1023,19 +1338,19 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
- Format(instr, "bltz 'rs, 'imm16u");
+ Format(instr, "bltz 'rs, 'imm16u -> 'imm16p4s2");
break;
case BLTZAL:
- Format(instr, "bltzal 'rs, 'imm16u");
+ Format(instr, "bltzal 'rs, 'imm16u -> 'imm16p4s2");
break;
case BGEZ:
- Format(instr, "bgez 'rs, 'imm16u");
+ Format(instr, "bgez 'rs, 'imm16u -> 'imm16p4s2");
break;
case BGEZAL:
- Format(instr, "bgezal 'rs, 'imm16u");
+ Format(instr, "bgezal 'rs, 'imm16u -> 'imm16p4s2");
break;
case BGEZALL:
- Format(instr, "bgezall 'rs, 'imm16u");
+ Format(instr, "bgezall 'rs, 'imm16u -> 'imm16p4s2");
break;
default:
UNREACHABLE();
@@ -1043,81 +1358,83 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break; // Case REGIMM.
// ------------- Branch instructions.
case BEQ:
- Format(instr, "beq 'rs, 'rt, 'imm16u");
+ Format(instr, "beq 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ break;
+ case BC:
+ Format(instr, "bc 'imm26s -> 'imm26p4s2");
+ break;
+ case BALC:
+ Format(instr, "balc 'imm26s -> 'imm26p4s2");
break;
case BNE:
- Format(instr, "bne 'rs, 'rt, 'imm16u");
+ Format(instr, "bne 'rs, 'rt, 'imm16u -> 'imm16p4s2");
break;
case BLEZ:
- if ((instr->RtFieldRaw() == 0)
- && (instr->RsFieldRaw() != 0)) {
- Format(instr, "blez 'rs, 'imm16u");
- } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
- && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgeuc 'rs, 'rt, 'imm16u");
- } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgezalc 'rs, 'imm16u");
- } else if ((instr->RsFieldRaw() == 0)
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "blezalc 'rs, 'imm16u");
+ if ((instr->RtValue() == 0) && (instr->RsValue() != 0)) {
+ Format(instr, "blez 'rs, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() != instr->RsValue()) &&
+ (instr->RsValue() != 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() == instr->RsValue()) &&
+ (instr->RtValue() != 0)) {
+ Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
+ Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
break;
case BGTZ:
- if ((instr->RtFieldRaw() == 0)
- && (instr->RsFieldRaw() != 0)) {
- Format(instr, "bgtz 'rs, 'imm16u");
- } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
- && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bltuc 'rs, 'rt, 'imm16u");
- } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bltzalc 'rt, 'imm16u");
- } else if ((instr->RsFieldRaw() == 0)
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgtzalc 'rt, 'imm16u");
+ if ((instr->RtValue() == 0) && (instr->RsValue() != 0)) {
+ Format(instr, "bgtz 'rs, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() != instr->RsValue()) &&
+ (instr->RsValue() != 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bltuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() == instr->RsValue()) &&
+ (instr->RtValue() != 0)) {
+ Format(instr, "bltzalc 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bgtzalc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
break;
case BLEZL:
- if ((instr->RtFieldRaw() == instr->RsFieldRaw())
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgezc 'rt, 'imm16u");
- } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
- && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgec 'rs, 'rt, 'imm16u");
- } else if ((instr->RsFieldRaw() == 0)
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "blezc 'rt, 'imm16u");
+ if ((instr->RtValue() == instr->RsValue()) && (instr->RtValue() != 0)) {
+ Format(instr, "bgezc 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() != instr->RsValue()) &&
+ (instr->RsValue() != 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bgec 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
+ Format(instr, "blezc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
break;
case BGTZL:
- if ((instr->RtFieldRaw() == instr->RsFieldRaw())
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bltzc 'rt, 'imm16u");
- } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
- && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bltc 'rs, 'rt, 'imm16u");
- } else if ((instr->RsFieldRaw() == 0)
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgtzc 'rt, 'imm16u");
+ if ((instr->RtValue() == instr->RsValue()) && (instr->RtValue() != 0)) {
+ Format(instr, "bltzc 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() != instr->RsValue()) &&
+ (instr->RsValue() != 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bgtzc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
break;
- case BEQZC:
- if (instr->RsFieldRaw() != 0) {
- Format(instr, "beqzc 'rs, 'imm21x");
+ case POP66:
+ if (instr->RsValue() == JIC) {
+ Format(instr, "jic 'rt, 'imm16s");
+ } else {
+ Format(instr, "beqzc 'rs, 'imm21s -> 'imm21p4s2");
}
break;
- case BNEZC:
- if (instr->RsFieldRaw() != 0) {
- Format(instr, "bnezc 'rs, 'imm21x");
+ case POP76:
+ if (instr->RsValue() == JIALC) {
+ Format(instr, "jialc 'rt, 'imm16x");
+ } else {
+ Format(instr, "bnezc 'rs, 'imm21x -> 'imm21p4s2");
}
break;
// ------------- Arithmetic instructions.
@@ -1126,10 +1443,10 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "addi 'rt, 'rs, 'imm16s");
} else {
// Check if BOVC or BEQC instruction.
- if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
- Format(instr, "bovc 'rs, 'rt, 'imm16s");
- } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
- Format(instr, "beqc 'rs, 'rt, 'imm16s");
+ if (instr->RsValue() >= instr->RtValue()) {
+ Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ } else if (instr->RsValue() < instr->RtValue()) {
+ Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
UNREACHABLE();
}
@@ -1138,10 +1455,10 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case DADDI:
if (IsMipsArchVariant(kMips32r6)) {
// Check if BNVC or BNEC instruction.
- if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
- Format(instr, "bnvc 'rs, 'rt, 'imm16s");
- } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
- Format(instr, "bnec 'rs, 'rt, 'imm16s");
+ if (instr->RsValue() >= instr->RtValue()) {
+ Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ } else if (instr->RsValue() < instr->RtValue()) {
+ Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
UNREACHABLE();
}
@@ -1228,6 +1545,35 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case SDC1:
Format(instr, "sdc1 'ft, 'imm16s('rs)");
break;
+ case PCREL: {
+ int32_t imm21 = instr->Imm21Value();
+ // rt field: 5-bits checking
+ uint8_t rt = (imm21 >> kImm16Bits);
+ switch (rt) {
+ case ALUIPC:
+ Format(instr, "aluipc 'rs, 'imm16s");
+ break;
+ case AUIPC:
+ Format(instr, "auipc 'rs, 'imm16s");
+ break;
+ default: {
+ // rt field: checking of the most significant 2-bits
+ rt = (imm21 >> kImm19Bits);
+ switch (rt) {
+ case LWPC:
+ Format(instr, "lwpc 'rs, 'imm19s");
+ break;
+ case ADDIUPC:
+ Format(instr, "addiupc 'rs, 'imm19s");
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ break;
+ }
default:
printf("a 0x%x \n", instr->OpcodeFieldRaw());
UNREACHABLE();
@@ -1239,10 +1585,10 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
void Decoder::DecodeTypeJump(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case J:
- Format(instr, "j 'imm26x");
+ Format(instr, "j 'imm26x -> 'imm26j");
break;
case JAL:
- Format(instr, "jal 'imm26x");
+ Format(instr, "jal 'imm26x -> 'imm26j");
break;
default:
UNREACHABLE();
@@ -1279,8 +1625,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
}
-} } // namespace v8::internal
-
+} // namespace internal
+} // namespace v8
//------------------------------------------------------------------------------
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index b65f1bff95..3e6293e2e5 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -31,12 +31,7 @@ Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
}
-Object*& ExitFrame::constant_pool_slot() const {
- UNREACHABLE();
- return Memory::Object_at(NULL);
-}
-
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 633a887b5b..f0b734db1c 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -169,36 +169,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kImplicitReceiverOffset = -6 * kPointerSize;
- static const int kConstructorOffset = -5 * kPointerSize;
- static const int kLengthOffset = -4 * kPointerSize;
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 5ac885a93c..841ee4b995 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -113,10 +113,6 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-mips.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
- handler_table_ =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForRange(function()->handler_count()), TENURED));
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -135,7 +131,7 @@ void FullCodeGenerator::Generate() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis()) {
+ info->MayUseThis() && info->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ lw(at, MemOperand(sp, receiver_offset));
@@ -200,17 +196,17 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info->scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1.
bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -225,8 +221,9 @@ void FullCodeGenerator::Generate() {
__ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -250,10 +247,50 @@ void FullCodeGenerator::Generate() {
}
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register) {
+ __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep is marked as such.
+ }
+ SetVar(this_function_var, a1, a2, a3);
+ }
+
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+
+ // Get the frame pointer for the calling frame.
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&check_frame_marker, ne, a1,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+
+ Label non_construct_frame, done;
+ __ Branch(&non_construct_frame, ne, a1,
+ Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+
+ __ lw(v0,
+ MemOperand(a2, ConstructFrameConstants::kOriginalConstructorOffset));
+ __ Branch(&done);
+
+ __ bind(&non_construct_frame);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+
+ SetVar(new_target_var, v0, a2, a3);
+ }
// Possibly allocate RestParameters
int rest_index;
@@ -263,16 +300,13 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
- --num_parameters;
- ++rest_index;
- }
__ Addu(a3, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ li(a2, Operand(Smi::FromInt(num_parameters)));
__ li(a1, Operand(Smi::FromInt(rest_index)));
- __ Push(a3, a2, a1);
+ __ li(a0, Operand(Smi::FromInt(language_mode())));
+ __ Push(a3, a2, a1, a0);
RestParamAccessStub stub(isolate());
__ CallStub(&stub);
@@ -310,7 +344,7 @@ void FullCodeGenerator::Generate() {
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type, has_new_target);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, v0, a1, a2);
@@ -335,7 +369,7 @@ void FullCodeGenerator::Generate() {
VariableDeclaration* function = scope()->function();
DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
@@ -471,11 +505,8 @@ void FullCodeGenerator::EmitReturnSequence() {
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t arg_count = info_->scope()->num_parameters() + 1;
- if (IsSubclassConstructor(info_->function()->kind())) {
- arg_count++;
- }
int32_t sp_delta = arg_count * kPointerSize;
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ SetReturnPosition(function());
__ RecordJSReturn();
masm_->mov(sp, fp);
int no_frame_start = masm_->pc_offset();
@@ -844,7 +875,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
@@ -852,8 +884,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
zone());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -861,7 +893,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
@@ -872,7 +904,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
__ li(a2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
@@ -904,25 +936,26 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ sw(result_register(), StackOperand(variable));
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
@@ -941,7 +974,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ li(a2, Operand(variable->name()));
__ li(a1, Operand(Smi::FromInt(NONE)));
@@ -959,20 +992,21 @@ void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
// TODO(rossberg)
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::LOOKUP:
UNREACHABLE();
}
}
@@ -1051,9 +1085,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ SetExpressionPosition(clause);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
+ strength(language_mode())).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1097,8 +1131,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt, SKIP_BREAK);
+
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- SetStatementPosition(stmt);
Label loop, exit;
ForIn loop_statement(this, stmt);
@@ -1106,7 +1141,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionPosition(stmt->enumerable());
+ SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1206,7 +1241,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- SetExpressionPosition(stmt->each());
+ SetExpressionAsStatementPosition(stmt->each());
// Load the current count to a0, load the length to a1.
__ lw(a0, MemOperand(sp, 0 * kPointerSize));
@@ -1240,10 +1275,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(a1, a3); // Enumerable and current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kForInFilter, 2);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mov(a3, result_register());
- __ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(loop_statement.continue_label(), eq, a3, Operand(at));
// Update the 'each' property or variable from the possibly filtered
// entry in register a3.
@@ -1251,7 +1287,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), a3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each());
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
@@ -1312,38 +1348,16 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
-void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
- Comment cnmt(masm_, "[ SuperReference ");
-
- __ lw(LoadDescriptor::ReceiverRegister(),
- MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
- __ li(LoadDescriptor::NameRegister(), home_object_symbol);
-
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
- }
-
- Label done;
- __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value()));
- __ CallRuntime(Runtime::kThrowNonMethodError, 0);
- __ bind(&done);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset) {
+ int offset,
+ FeedbackVectorICSlot slot) {
if (NeedsHomeObject(initializer)) {
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ li(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ lw(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
}
}
@@ -1394,17 +1408,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
-
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
- ? NOT_CONTEXTUAL
- : CONTEXTUAL;
- CallLoadIC(mode);
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_state);
}
@@ -1473,30 +1479,43 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ // Inside typeof use a regular load, not a contextual load, to avoid
+ // a reference error.
+ CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
// Record position before possible IC call.
- SetSourcePosition(proxy->position());
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- CallGlobalLoadIC(var->name());
+ EmitGlobalVariableLoad(proxy, typeof_state);
context()->Plug(v0);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1567,16 +1586,20 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
__ bind(&slow);
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Runtime::FunctionId function_id =
+ typeof_state == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ __ CallRuntime(function_id, 2);
__ bind(&done);
context()->Plug(v0);
}
@@ -1647,7 +1670,6 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
@@ -1667,13 +1689,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// result_saved is false the result is in v0.
bool result_saved = false;
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
AccessorTable accessor_table(zone());
int property_index = 0;
+ // store_slot_index points to the vector IC slot for the next store IC used.
+ // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
+ // and must be updated if the number of store ICs emitted here changes.
+ int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1701,7 +1722,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- CallStoreIC(key->LiteralFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ CallStoreIC();
+ } else {
+ CallStoreIC(key->LiteralFeedbackId());
+ }
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1709,6 +1735,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ lw(StoreDescriptor::ValueRegister(), MemOperand(sp));
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ }
CallStoreIC();
}
} else {
@@ -1722,7 +1751,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
__ push(a0);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1760,9 +1790,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(a0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(it->second->getter, 2);
+ EmitSetHomeObjectIfNeeded(
+ it->second->getter, 2,
+ expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(it->second->setter, 3);
+ EmitSetHomeObjectIfNeeded(
+ it->second->setter, 3,
+ expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1797,7 +1831,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1843,6 +1878,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(v0);
}
+
+ // Verify that compilation exactly consumed the number of store ic slots that
+ // the ObjectLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1883,8 +1922,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ if (subexpr->IsSpread()) break;
+
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1898,7 +1940,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ lw(t2, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
__ sw(result_register(), FieldMemOperand(a1, offset));
@@ -1907,14 +1949,40 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
kRAHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
- __ li(a3, Operand(Smi::FromInt(i)));
+ __ li(a3, Operand(Smi::FromInt(array_index)));
__ mov(a0, result_register());
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ __ Pop(); // literal index
+ __ Pop(v0);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ __ Push(v0);
+ if (subexpr->IsSpread()) {
+ VisitForStackValue(subexpr->AsSpread()->expression());
+ __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ } else {
+ VisitForStackValue(subexpr);
+ __ CallRuntime(Runtime::kAppendElement, 2);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
if (result_saved) {
__ Pop(); // literal index
context()->PlugTOS();
@@ -1928,9 +1996,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = GetAssignType(property);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1947,8 +2016,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case NAMED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch = a1;
@@ -1958,8 +2029,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case KEYED_SUPER_PROPERTY: {
const Register scratch = a1;
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ Move(scratch, result_register());
VisitForAccumulatorValue(property->key());
__ Push(scratch, result_register());
@@ -2017,7 +2090,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(v0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
@@ -2034,14 +2106,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
}
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
+ expr->op(), expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
break;
@@ -2065,6 +2136,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
@@ -2147,7 +2220,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(a0); // result
- EnterTryBlock(expr->index(), &l_catch);
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(a0); // result
__ jmp(&l_suspend);
@@ -2158,7 +2232,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
const int generator_object_depth = kPointerSize + try_block_size;
__ lw(a0, MemOperand(sp, generator_object_depth));
__ push(a0); // g
- __ Push(Smi::FromInt(expr->index())); // handler-index
+ __ Push(Smi::FromInt(handler_index)); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
__ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
@@ -2172,7 +2246,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EmitReturnSequence();
__ mov(a0, v0);
__ bind(&l_resume); // received in a0
- ExitTryBlock(expr->index());
+ ExitTryBlock(handler_index);
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2185,11 +2259,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ lw(load_receiver, MemOperand(sp, kPointerSize));
__ lw(load_name, MemOperand(sp, 2 * kPointerSize));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- }
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(a0, v0);
__ mov(a1, a0);
@@ -2205,10 +2277,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- }
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // v0=result.done
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
@@ -2218,10 +2288,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- }
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // v0=result.value
context()->DropAndPlug(2, v0); // drop iter and g
break;
@@ -2357,51 +2425,45 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!prop->IsSuperAccess());
__ li(LoadDescriptor::NameRegister(), Operand(key->value()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
- }
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL, language_mode());
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
+
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ Push(key->value());
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
- } else {
- CallIC(ic, prop->PropertyFeedbackId());
- }
+ SetExpressionPosition(prop);
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallIC(ic);
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
- SetSourcePosition(prop->position());
-
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ SetExpressionPosition(prop);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
}
@@ -2427,8 +2489,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2498,7 +2560,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
+ int* used_store_slots) {
// Constructor is in v0.
DCHECK(lit != NULL);
__ push(v0);
@@ -2532,7 +2595,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(value, 2,
+ lit->SlotForHomeObject(value, used_store_slots));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -2571,8 +2635,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
__ pop(a1);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2580,17 +2644,18 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorICSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
Property* prop = expr->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
break;
}
case NAMED_PROPERTY: {
@@ -2600,13 +2665,15 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ Push(v0);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
// stack: value, this; v0: home_object
Register scratch = a2;
Register scratch2 = a3;
@@ -2621,9 +2688,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
case KEYED_SUPER_PROPERTY: {
__ Push(v0);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
Register scratch = a2;
Register scratch2 = a3;
@@ -2646,6 +2713,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(StoreDescriptor::NameRegister(), result_register());
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2669,12 +2737,14 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
- if (var->IsUnallocated()) {
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorICSlot slot) {
+ if (var->IsUnallocatedOrGlobalSlot()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(), Operand(var->name()));
__ lw(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
} else if (var->mode() == LET && op != Token::INIT_LET) {
@@ -2763,13 +2833,16 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2808,9 +2881,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
// Call keyed store IC.
// The arguments are:
// - a0 is the value,
@@ -2822,7 +2892,12 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2831,6 +2906,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
Expression* key = expr->key();
if (key->IsPropertyName()) {
@@ -2839,9 +2916,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Move(LoadDescriptor::ReceiverRegister(), v0);
EmitNamedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
EmitNamedSuperPropertyLoad(expr);
}
} else {
@@ -2852,9 +2929,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
@@ -2906,22 +2983,23 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ SetExpressionPosition(expr);
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
const Register scratch = a1;
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
__ mov(scratch, v0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(scratch, v0, v0, scratch);
__ Push(key->value());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2929,7 +3007,8 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -2971,15 +3050,16 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
// Load the function from the receiver.
const Register scratch = a1;
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
__ Move(scratch, v0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(scratch, v0, v0, scratch);
VisitForStackValue(prop->key());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2987,7 +3067,8 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -3003,14 +3084,12 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
}
// Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -3036,32 +3115,20 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// t2: the receiver of the enclosing function.
__ lw(t2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // t1: the receiver of the enclosing function.
- int receiver_offset = 2 + info_->scope()->num_parameters();
- __ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
+ // t1: the language mode.
+ __ li(t1, Operand(Smi::FromInt(language_mode())));
- // t0: the language mode.
- __ li(t0, Operand(Smi::FromInt(language_mode())));
-
- // a1: the start position of the scope the calls resides in.
- __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
+ // t0: the start position of the scope the calls resides in.
+ __ li(t0, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ Push(t3);
- __ Push(t2, t1, t0, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::EmitLoadSuperConstructor() {
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(a0);
- __ CallRuntime(Runtime::kGetPrototype, 1);
+ __ Push(t3, t2, t1, t0);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperReference* super_ref) {
+ SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
Variable* this_var = super_ref->this_var()->var();
GetVar(a1, this_var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -3072,7 +3139,52 @@ void FullCodeGenerator::EmitInitializeThisAfterSuper(
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
- EmitVariableAssignment(this_var, Token::INIT_CONST);
+ EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
+}
+
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in v0)
+ // and the object holding it (returned in v1).
+ DCHECK(!context_register().is(a2));
+ __ li(a2, Operand(callee->name()));
+ __ Push(context_register(), a2);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ Push(v0, v1); // Function, receiver.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ Branch(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(v0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ push(a1);
+ __ bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ push(a2); // Reserved receiver slot.
+ }
}
@@ -3089,16 +3201,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
-
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ push(a2); // Reserved receiver slot.
+ PushCalleeAndWithBaseObject(expr);
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
@@ -3111,15 +3218,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(a1);
EmitResolvePossiblyDirectEval(arg_count);
- // The runtime call returns a pair of values in v0 (function) and
- // v1 (receiver). Touch up the stack with the right values.
+ // Touch up the stack with the resolved function.
__ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ sw(v1, MemOperand(sp, arg_count * kPointerSize));
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3131,43 +3235,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCallWithLoadIC(expr);
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
- VariableProxy* proxy = callee->AsVariableProxy();
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
- }
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in v0)
- // and the object holding it (returned in v1).
- DCHECK(!context_register().is(a2));
- __ li(a2, Operand(proxy->name()));
- __ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ Push(v0, v1); // Function, receiver.
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ Branch(&call);
- __ bind(&done);
- // Push function.
- __ push(v0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ push(a1);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found
- // by LoadContextSlot.
+ PushCalleeAndWithBaseObject(expr);
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
@@ -3179,10 +3247,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
- {
- PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
- }
if (is_named_call) {
EmitCallWithLoadIC(expr);
} else {
@@ -3194,9 +3259,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
- }
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ push(a1);
// Emit function call.
@@ -3219,7 +3282,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- DCHECK(!expr->expression()->IsSuperReference());
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
@@ -3231,7 +3294,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
@@ -3255,11 +3318,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor();
+ VariableProxy* new_target_proxy = super_call_ref->new_target_var();
+ VisitForStackValue(new_target_proxy);
+
+ EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
// Push the arguments ("left-to-right") on the stack.
@@ -3271,7 +3337,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
@@ -3297,7 +3363,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
+ EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(v0);
}
@@ -3586,6 +3652,28 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_TYPED_ARRAY_TYPE), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3832,6 +3920,28 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = nullptr;
+ Label* if_false = nullptr;
+ Label* fall_through = nullptr;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_DATE_TYPE), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3840,20 +3950,15 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done, not_date_object;
Register object = v0;
Register result = v0;
Register scratch0 = t5;
Register scratch1 = a1;
- __ JumpIfSmi(object, &not_date_object);
- __ GetObjectType(object, scratch1, scratch1);
- __ Branch(&not_date_object, ne, scratch1, Operand(JS_DATE_TYPE));
-
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ li(scratch1, Operand(stamp));
@@ -3869,13 +3974,10 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ li(a1, Operand(index));
__ Move(a0, object);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
+ __ bind(&done);
}
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(v0);
+ context()->Plug(result);
}
@@ -4183,11 +4285,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
- EmitLoadSuperConstructor();
+ // new.target
+ VisitForStackValue(args->at(0));
+
+ // .this_function
+ VisitForStackValue(args->at(1));
+ __ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
// Check if the calling frame is an arguments adaptor frame.
@@ -4206,8 +4312,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(a1, a1);
- // Subtract 1 from arguments count, for new.target.
- __ Addu(a1, a1, Operand(-1));
__ mov(a0, a1);
// Get arguments pointer in a2.
@@ -4589,11 +4693,14 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr == CallRuntime("ReflectConstruct")
+ DCHECK_EQ(1, expr->arguments()->length());
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
+
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
- SuperReference* super_reference = args->at(0)->AsSuperReference();
+ SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
@@ -4603,8 +4710,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ push(at);
__ sw(v0, MemOperand(sp, kPointerSize));
- // Push super
- EmitLoadSuperConstructor();
+ // Push super constructor
+ EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
// Push arguments array
@@ -4620,7 +4727,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, v0);
- EmitInitializeThisAfterSuper(super_reference);
+ // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
+ EmitInitializeThisAfterSuper(super_call_ref);
}
@@ -4633,13 +4741,9 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Load the function from the receiver.
__ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- }
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
}
@@ -4647,8 +4751,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4673,6 +4776,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
@@ -4698,6 +4802,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime function.
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
context()->Plug(v0);
}
@@ -4722,10 +4827,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- DCHECK(is_sloppy(language_mode()) || var->is_this());
- if (var->IsUnallocated()) {
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
__ lw(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
__ li(a0, Operand(Smi::FromInt(SLOPPY)));
@@ -4735,7 +4841,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- context()->Plug(var->is_this());
+ context()->Plug(is_this);
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
@@ -4823,10 +4929,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4849,8 +4954,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
const Register scratch = a1;
__ lw(scratch, MemOperand(sp, kPointerSize));
@@ -4860,8 +4966,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
const Register scratch = a1;
const Register scratch1 = t0;
__ Move(scratch, result_register());
@@ -4942,9 +5049,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call);
__ bind(&slow);
}
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ if (!is_strong(language_mode())) {
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4976,22 +5085,25 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a1, v0);
__ li(a0, Operand(Smi::FromInt(count_value)));
- // Record position before stub call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
+
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), Token::ADD, language_mode()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
+ strength(language_mode())).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
+ if (is_strong(language_mode())) {
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Store the value returned in v0.
switch (assign_type) {
case VARIABLE:
if (expr->is_postfix()) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(v0);
}
@@ -5002,7 +5114,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
}
@@ -5012,7 +5124,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -5051,7 +5168,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -5066,45 +5188,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- DCHECK(!context()->IsEffect());
- DCHECK(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
- __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(proxy->name()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallLoadIC(NOT_CONTEXTUAL);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(v0);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ li(a0, Operand(proxy->name()));
- __ Push(cp, a0);
- __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(v0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Expression* sub_expr,
Handle<String> check) {
@@ -5182,7 +5265,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -5233,9 +5316,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Split(cc, a1, Operand(a0), if_true, if_false, NULL);
__ bind(&slow_case);
}
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+
+ Handle<Code> ic = CodeFactory::CompareIC(
+ isolate(), op, strength(language_mode())).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -5348,6 +5431,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ li(at, Operand(pending_message_obj));
__ lw(a1, MemOperand(at));
__ push(a1);
+
+ ClearPendingMessage();
}
@@ -5372,6 +5457,23 @@ void FullCodeGenerator::ExitFinallyBlock() {
}
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(a1));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+ __ li(at, Operand(pending_message_obj));
+ __ sw(a1, MemOperand(at));
+}
+
+
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+ DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ __ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(slot)));
+}
+
+
#undef __
@@ -5451,6 +5553,7 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 01db51672a..0379da0ad8 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return a1; }
const Register LoadDescriptor::NameRegister() { return a2; }
+const Register LoadDescriptor::SlotRegister() { return a0; }
-const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return a0; }
-
-
-const Register VectorLoadICDescriptor::VectorRegister() { return a3; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
const Register StoreDescriptor::ReceiverRegister() { return a1; }
@@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return t0; }
+
+
+const Register VectorStoreICDescriptor::VectorRegister() { return a3; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
@@ -56,109 +60,101 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-const Register GrowArrayElementsDescriptor::CapacityRegister() { return a2; }
-void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewContextDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void NumberToStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a3};
- data->Initialize(arraysize(registers), registers, NULL);
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastCloneShallowArrayDescriptor::Initialize(
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a3, a2, a1};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a3, a2, a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowObjectDescriptor::Initialize(
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a3, a2, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a3, a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CreateAllocationSiteDescriptor::Initialize(
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2, a3};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a2, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2, a3, a1};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a2, a3, a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreArrayLiteralElementDescriptor::Initialize(
+void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a3, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a3, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1};
- data->Initialize(arraysize(registers), registers, NULL);
+void CallFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CallFunctionWithFeedbackDescriptor::Initialize(
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a3};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a1, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a3, a2};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a1, a3, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
@@ -166,210 +162,183 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// vector (Smi)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {cp, a0, a1, a2};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a0, a1, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void RegExpConstructResultDescriptor::Initialize(
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void TransitionElementsKindDescriptor::Initialize(
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0, a1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a0, a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void AllocateHeapNumberDescriptor::Initialize(
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
- Register registers[] = {cp};
- data->Initialize(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-void ArrayConstructorConstantArgCountDescriptor::Initialize(
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
// a0 -- number of arguments
// a1 -- function
// a2 -- allocation site with elements kind
- Register registers[] = {cp, a1, a2};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a1, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, a1, a2, a0};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(),
- Representation::Tagged(), Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a1, a2, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
- CallInterfaceDescriptorData* data) {
+void InternalArrayConstructorConstantArgCountDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
// a0 -- number of arguments
// a1 -- constructor function
- Register registers[] = {cp, a1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void InternalArrayConstructorDescriptor::Initialize(
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, a1, a0};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareNilDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToBooleanDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void BinaryOpWithAllocationSiteDescriptor::Initialize(
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void StringAddDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void KeyedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a2, // key
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void NamedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a2, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallHandlerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a0, // receiver
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a1, // JSFunction
a0, // actual number of arguments
a2, // expected number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a0, // callee
t0, // call_data
a2, // holder
a1, // api_function_address
a3, // actual number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Integer32(), // actual number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiAccessorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a0, // callee
t0, // call_data
a2, // holder
a1, // api_function_address
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void MathRoundVariantDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a1, // math rounding function
+ a3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index b173e30a2b..5c26001ef0 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -143,8 +143,8 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info_->is_native()) {
+ if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
+ !info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -217,8 +217,9 @@ bool LCodeGen::GeneratePrologue() {
__ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -593,52 +594,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- bool has_closure_id = !info()->closure().is_null() &&
- !info()->closure().is_identical_to(environment->closure());
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- }
+ WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
+ AddToTranslation(
+ environment, translation, value, environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@@ -927,28 +893,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (auto function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
}
-
inlined_function_count_ = deoptimization_literals_.length();
}
@@ -1723,20 +1672,16 @@ void LCodeGen::DoSubI(LSubI* instr) {
} else { // can_overflow.
Register overflow = scratch0();
Register scratch = scratch1();
- if (right->IsStackSlot() || right->IsConstantOperand()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ SubuAndCheckForOverflow(ToRegister(result),
ToRegister(left),
right_reg,
overflow); // Reg at also used as scratch.
} else {
- DCHECK(right->IsRegister());
- // Due to overflow check macros not supporting constant operands,
- // handling the IsConstantOperand case was moved to prev if clause.
- __ SubuAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- ToRegister(right),
- overflow); // Reg at also used as scratch.
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
+ ToOperand(right), overflow, scratch);
}
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
Operand(zero_reg));
@@ -1800,21 +1745,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
- Label runtime, done;
DCHECK(object.is(a0));
DCHECK(result.is(v0));
DCHECK(!scratch.is(scratch0()));
DCHECK(!scratch.is(object));
- __ SmiTst(object, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
- __ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject, scratch,
- Operand(JS_DATE_TYPE));
-
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ li(scratch, Operand(stamp));
@@ -1929,21 +1868,16 @@ void LCodeGen::DoAddI(LAddI* instr) {
} else { // can_overflow.
Register overflow = scratch0();
Register scratch = scratch1();
- if (right->IsStackSlot() ||
- right->IsConstantOperand()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ AdduAndCheckForOverflow(ToRegister(result),
ToRegister(left),
right_reg,
overflow); // Reg at also used as scratch.
} else {
- DCHECK(right->IsRegister());
- // Due to overflow check macros not supporting constant operands,
- // handling the IsConstantOperand case was moved to prev if clause.
- __ AdduAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- ToRegister(right),
- overflow); // Reg at also used as scratch.
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
+ ToOperand(right), overflow, scratch);
}
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
Operand(zero_reg));
@@ -2059,8 +1993,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(a0));
DCHECK(ToRegister(instr->result()).is(v0));
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), instr->op(), instr->language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
@@ -2542,7 +2476,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2844,7 +2779,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// On MIPS there is no need for a "no inlined smi code" marker (nop).
@@ -2906,10 +2842,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = VectorLoadICDescriptor::SlotRegister();
- DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(a0));
AllowDeferredHandleDereference vector_structure_check;
@@ -2922,6 +2857,20 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ li(vector_register, vector);
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ li(slot_register, Operand(Smi::FromInt(index)));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object())
@@ -2929,11 +2878,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3032,12 +2979,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in a2.
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- }
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL,
- instr->hydrogen()->initialization_state()).code();
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3203,7 +3149,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3362,9 +3309,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3951,30 +3898,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
-void LCodeGen::DoTailCallThroughMegamorphicCache(
- LTailCallThroughMegamorphicCache* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register name = ToRegister(instr->name());
- DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(name.is(LoadDescriptor::NameRegister()));
- DCHECK(receiver.is(a1));
- DCHECK(name.is(a2));
-
- Register scratch = t0;
- Register extra = t1;
- Register extra2 = t2;
- Register extra3 = t5;
-
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(
- masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Tail call to miss if we ended up here.
- LoadIC::GenerateMiss(masm());
-}
-
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
@@ -4244,10 +4167,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
__ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- StoreIC::initialize_stub(isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state());
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4357,7 +4284,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4478,6 +4406,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@@ -4485,6 +4417,101 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = v0;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ jmp(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ Branch(deferred->entry(), le, ToRegister(current_capacity),
+ Operand(constant_key));
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ Branch(deferred->entry(), ge, ToRegister(key),
+ Operand(constant_capacity));
+ } else {
+ __ Branch(deferred->entry(), ge, ToRegister(key),
+ Operand(ToRegister(current_capacity)));
+ }
+
+ if (instr->elements()->IsRegister()) {
+ __ mov(result, ToRegister(instr->elements()));
+ } else {
+ __ lw(result, ToMemOperand(instr->elements()));
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = v0;
+ __ mov(result, zero_reg);
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ if (instr->object()->IsRegister()) {
+ __ mov(result, ToRegister(instr->object()));
+ } else {
+ __ lw(result, ToMemOperand(instr->object()));
+ }
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
+ } else {
+ __ mov(a3, ToRegister(key));
+ __ SmiTag(a3);
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ mov(a0, result);
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ __ SmiTst(result, at);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+}
+
+
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Register scratch = scratch0();
@@ -6006,4 +6033,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 6d75c4c1f6..6c5b695c28 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -26,7 +26,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@@ -110,6 +109,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -239,7 +239,6 @@ class LCodeGen: public LCodeGenBase {
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -355,10 +354,11 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
index 1bec0c8cda..df92ab9f5c 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -296,4 +296,5 @@ void LGapResolver::EmitMove(int index) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 06b8d51f5b..fd90584ea9 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -1097,10 +1097,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
ops.Add(target, zone());
- for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op =
- UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), cp);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@@ -1110,20 +1118,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
-LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
- HTailCallThroughMegamorphicCache* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* receiver_register =
- UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
- LOperand* name_register =
- UseFixed(instr->name(), LoadDescriptor::NameRegister());
-
- // Not marked as call. It can't deoptimize, and it never returns.
- return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
-}
-
-
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
@@ -1816,7 +1810,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), a0);
LDateField* result =
new(zone()) LDateField(object, FixedTemp(a1), instr->index());
- return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+ return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -2095,7 +2089,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@@ -2144,7 +2138,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2218,7 +2212,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2283,8 +2277,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- return MarkAsCall(
- new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result =
+ new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+ return MarkAsCall(result, instr);
}
@@ -2316,6 +2318,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = Use(instr->object());
+ LOperand* elements = Use(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, v0);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2354,8 +2371,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
- LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
+ LStoreNamedGeneric* result =
+ new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2432,7 +2456,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@@ -2549,7 +2573,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
+ chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@@ -2619,6 +2643,7 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
return MarkAsCall(DefineFixed(result, cp), instr);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 667487014a..82c5ffd29d 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -117,6 +117,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@@ -151,7 +152,6 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
- V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -471,26 +471,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
};
-class LTailCallThroughMegamorphicCache final
- : public LTemplateInstruction<0, 3, 0> {
- public:
- LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
- LOperand* name) {
- inputs_[0] = context;
- inputs_[1] = receiver;
- inputs_[2] = name;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* name() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
- "tail-call-through-megamorphic-cache")
- DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -1174,6 +1154,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ Strength strength() { return hydrogen()->strength(); }
+
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1530,7 +1512,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
+ Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@@ -1828,8 +1810,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
inputs_.AddAll(operands, zone);
}
@@ -1839,6 +1825,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@@ -2151,17 +2141,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2204,22 +2199,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* obj,
- LOperand* key,
- LOperand* value) {
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = obj;
+ inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2275,6 +2272,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
};
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 0c3e0ceb2e..f554b0c1ef 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -523,6 +523,7 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// hash = hash ^ (hash >> 16);
srl(at, reg0, 16);
xor_(reg0, reg0, at);
+ And(reg0, reg0, Operand(0x3fffffff));
}
@@ -3803,7 +3804,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
sw(filler, MemOperand(start_offset));
Addu(start_offset, start_offset, kPointerSize);
bind(&entry);
- Branch(&loop, lt, start_offset, Operand(end_offset));
+ Branch(&loop, ult, start_offset, Operand(end_offset));
}
@@ -4451,17 +4452,17 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
} else {
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
- addiu(dst, left, right.immediate()); // Left is overwritten.
+ Addu(dst, left, right.immediate()); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
// Load right since xori takes uint16 as immediate.
- addiu(t9, zero_reg, right.immediate());
+ Addu(t9, zero_reg, right);
xor_(overflow_dst, dst, t9);
and_(overflow_dst, overflow_dst, scratch);
} else {
- addiu(dst, left, right.immediate());
+ Addu(dst, left, right.immediate());
xor_(overflow_dst, dst, left);
// Load right since xori takes uint16 as immediate.
- addiu(t9, zero_reg, right.immediate());
+ Addu(t9, zero_reg, right);
xor_(scratch, dst, t9);
and_(overflow_dst, scratch, overflow_dst);
}
@@ -4519,17 +4520,17 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
} else {
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
- addiu(dst, left, -(right.immediate())); // Left is overwritten.
+ Subu(dst, left, right); // Left is overwritten.
xor_(overflow_dst, dst, scratch); // scratch is original left.
// Load right since xori takes uint16 as immediate.
- addiu(t9, zero_reg, right.immediate());
+ Addu(t9, zero_reg, right);
xor_(scratch, scratch, t9); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
} else {
- addiu(dst, left, -(right.immediate()));
+ Subu(dst, left, right);
xor_(overflow_dst, dst, left);
// Load right since xori takes uint16 as immediate.
- addiu(t9, zero_reg, right.immediate());
+ Addu(t9, zero_reg, right);
xor_(scratch, left, t9);
and_(overflow_dst, scratch, overflow_dst);
}
@@ -5566,7 +5567,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
- Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
+ Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
@@ -6128,6 +6129,7 @@ void MacroAssembler::TruncatingDiv(Register result,
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index cd14e38070..f29802130d 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -1241,6 +1241,7 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 68fbdae96e..83b5905cd0 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -989,8 +989,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
-Simulator::~Simulator() {
-}
+Simulator::~Simulator() { free(stack_); }
// When the generated code calls an external reference we need to catch that in
@@ -1036,7 +1035,7 @@ class Redirection {
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
- addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+ addr_of_swi - offsetof(Redirection, swi_instruction_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
@@ -1046,6 +1045,14 @@ class Redirection {
return redirection->external_function();
}
+ static void DeleteChain(Redirection* redirection) {
+ while (redirection != nullptr) {
+ Redirection* next = redirection->next_;
+ delete redirection;
+ redirection = next;
+ }
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -1054,6 +1061,19 @@ class Redirection {
};
+// static
+void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+ Redirection::DeleteChain(first);
+ if (i_cache != nullptr) {
+ for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ entry = i_cache->Next(entry)) {
+ delete static_cast<CachePage*>(entry->value);
+ }
+ delete i_cache;
+ }
+}
+
+
void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
@@ -1314,6 +1334,102 @@ bool Simulator::set_fcsr_round_error(double original, double rounded) {
}
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(double original, double rounded) {
+ bool ret = false;
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(float original, float rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int32 || rounded < min_int32) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(float original, float rounded) {
+ bool ret = false;
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
void Simulator::round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
@@ -1355,6 +1471,129 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
}
+void Simulator::round_according_to_fcsr(float toRound, float& rounded,
+ int32_t& rounded_int, float fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RD (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int32_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ }
+}
+
+
+void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
+ int64_t& rounded_int, double fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int64_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ }
+}
+
+
+void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
+ int64_t& rounded_int, float fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int64_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ }
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
@@ -1381,6 +1620,40 @@ int32_t Simulator::get_pc() const {
// executed in the simulator. Since the host is typically IA32 we will not
// get the correct MIPS-like behaviour on unaligned accesses.
+void Simulator::TraceRegWr(int32_t value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ SNPrintF(trace_buf_, "%08x", value);
+ }
+}
+
+
+// TODO(plind): consider making icount_ printing a flag option.
+void Simulator::TraceMemRd(int32_t addr, int32_t value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ SNPrintF(trace_buf_, "%08x <-- [%08x] (%d)", value, addr, icount_);
+ }
+}
+
+
+void Simulator::TraceMemWr(int32_t addr, int32_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (t) {
+ case BYTE:
+ SNPrintF(trace_buf_, " %02x --> [%08x]",
+ static_cast<int8_t>(value), addr);
+ break;
+ case HALF:
+ SNPrintF(trace_buf_, " %04x --> [%08x]", static_cast<int16_t>(value),
+ addr);
+ break;
+ case WORD:
+ SNPrintF(trace_buf_, "%08x --> [%08x]", value, addr);
+ break;
+ }
+ }
+}
+
+
int Simulator::ReadW(int32_t addr, Instruction* instr) {
if (addr >=0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
@@ -1391,6 +1664,7 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
}
if ((addr & kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
}
PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
@@ -1412,6 +1686,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
}
if ((addr & kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ TraceMemWr(addr, value, WORD);
*ptr = value;
return;
}
@@ -1452,6 +1727,7 @@ void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
}
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
@@ -1465,6 +1741,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
}
PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
@@ -1478,6 +1755,7 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ TraceMemWr(addr, value, HALF);
*ptr = value;
return;
}
@@ -1491,6 +1769,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ TraceMemWr(addr, value, HALF);
*ptr = value;
return;
}
@@ -1503,24 +1782,28 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
uint32_t Simulator::ReadBU(int32_t addr) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr & 0xff;
}
int32_t Simulator::ReadB(int32_t addr) {
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
}
void Simulator::WriteB(int32_t addr, uint8_t value) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ TraceMemWr(addr, value, BYTE);
*ptr = value;
}
void Simulator::WriteB(int32_t addr, int8_t value) {
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ TraceMemWr(addr, value, BYTE);
*ptr = value;
}
@@ -1930,6 +2213,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
const uint32_t rt_u = static_cast<uint32_t>(rt);
const int32_t rd_reg = instr->RdValue();
const uint32_t sa = instr->SaValue();
+ const uint8_t bp = instr->Bp2Value();
const int32_t fs_reg = instr->FsValue();
@@ -2172,6 +2456,60 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
*alu_out = (rs_u & (mask << lsb)) >> lsb;
break;
}
+ case BSHFL: {
+ int sa = instr->SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP: {
+ uint32_t input = static_cast<uint32_t>(rt);
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xff;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte =
+ static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ *alu_out = static_cast<int32_t>(output);
+ break;
+ }
+ case SEB:
+ case SEH:
+ case WSBH:
+ UNREACHABLE();
+ break;
+ default: {
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN: {
+ if (bp == 0) {
+ *alu_out = static_cast<int32_t>(rt);
+ } else {
+ uint32_t rt_hi = rt << (8 * bp);
+ uint32_t rs_lo = rs >> (8 * (4 - bp));
+ *alu_out = static_cast<int32_t>(rt_hi | rs_lo);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ break;
+ }
default:
UNREACHABLE();
}
@@ -2191,7 +2529,9 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
uint32_t cc, fcsr_cc;
int64_t i64;
fs = get_fpu_register_double(fs_reg);
- ft = get_fpu_register_double(ft_reg);
+ ft = (instr->FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg)
+ : 0.0;
+ fd = get_fpu_register_double(fd_reg);
int64_t ft_int = bit_cast<int64_t>(ft);
int64_t fd_int = bit_cast<int64_t>(fd);
cc = instr->FCccValue();
@@ -2246,6 +2586,37 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
DCHECK(IsMipsArchVariant(kMips32r6));
set_fpu_register_double(fd_reg, (ft_int & 0x1) != 0 ? fs : 0.0);
break;
+ case MOVZ_C: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ int32_t rt_reg = instr->RtValue();
+ int32_t rt = get_register(rt_reg);
+ if (rt == 0) {
+ set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVN_C: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ int32_t rt_reg = instr->RtValue();
+ int32_t rt = get_register(rt_reg);
+ if (rt != 0) {
+ set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVF: {
+ // Same function field for MOVT.D and MOVF.D
+ uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ ft_cc = get_fcsr_condition_bit(ft_cc);
+ if (instr->Bit(16)) { // Read Tf bit.
+ // MOVT.D
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ } else {
+ // MOVF.D
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
fs = get_fpu_register_double(fs_reg);
@@ -2259,6 +2630,48 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
set_fpu_register_double(fd_reg, (fs >= ft) ? ft : fs);
}
break;
+ case MINA:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_double(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else {
+ double result;
+ if (fabs(fs) > fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) < fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_double(fd_reg, result);
+ }
+ break;
+ case MAXA:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_double(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else {
+ double result;
+ if (fabs(fs) < fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) > fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_double(fd_reg, result);
+ }
+ break;
case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
fs = get_fpu_register_double(fs_reg);
@@ -2297,6 +2710,18 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
case SQRT_D:
set_fpu_register_double(fd_reg, fast_sqrt(fs));
break;
+ case RSQRT_D: {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ double result = 1.0 / fast_sqrt(fs);
+ set_fpu_register_double(fd_reg, result);
+ break;
+ }
+ case RECIP_D: {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ double result = 1.0 / fs;
+ set_fpu_register_double(fd_reg, result);
+ break;
+ }
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
@@ -2372,59 +2797,151 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
set_fpu_register_float(fd_reg, static_cast<float>(fs));
break;
case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
- double rounded = trunc(fs);
- i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ int64_t result;
+ double rounded;
+ round64_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
+ break;
}
case TRUNC_L_D: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
}
case ROUND_L_D: { // Mips32r2 instruction.
- double rounded = fs > 0 ? std::floor(fs + 0.5) : std::ceil(fs - 0.5);
- i64 = static_cast<int64_t>(rounded);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ double rounded = std::floor(fs + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ int64_t i64 = static_cast<int64_t>(result);
if (IsFp64Mode()) {
set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
}
- case FLOOR_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(std::floor(fs));
+ case FLOOR_L_D: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ double rounded = std::floor(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
- case CEIL_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(std::ceil(fs));
+ }
+ case CEIL_L_D: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ double rounded = std::ceil(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
} else {
- set_fpu_register_word(fd_reg, i64 & 0xffffffff);
- set_fpu_register_word(fd_reg + 1, i64 >> 32);
+ UNSUPPORTED();
}
break;
- case C_F_D:
- UNIMPLEMENTED_MIPS();
+ }
+ case CLASS_D: { // Mips32r6 instruction
+ // Convert double input to uint64_t for easier bit manipulation
+ uint64_t classed = bit_cast<uint64_t>(fs);
+
+ // Extracting sign, exponent and mantissa from the input double
+ uint32_t sign = (classed >> 63) & 1;
+ uint32_t exponent = (classed >> 52) & 0x00000000000007ff;
+ uint64_t mantissa = classed & 0x000fffffffffffff;
+ uint64_t result;
+ double dResult;
+
+ // Setting flags if input double is negative infinity,
+ // positive infinity, negative zero or positive zero
+ bool negInf = (classed == 0xFFF0000000000000);
+ bool posInf = (classed == 0x7FF0000000000000);
+ bool negZero = (classed == 0x8000000000000000);
+ bool posZero = (classed == 0x0000000000000000);
+
+ bool signalingNan;
+ bool quietNan;
+ bool negSubnorm;
+ bool posSubnorm;
+ bool negNorm;
+ bool posNorm;
+
+ // Setting flags if double is NaN
+ signalingNan = false;
+ quietNan = false;
+ if (!negInf && !posInf && exponent == 0x7ff) {
+ quietNan = ((mantissa & 0x0008000000000000) != 0) &&
+ ((mantissa & (0x0008000000000000 - 1)) == 0);
+ signalingNan = !quietNan;
+ }
+
+ // Setting flags if double is subnormal number
+ posSubnorm = false;
+ negSubnorm = false;
+ if ((exponent == 0) && (mantissa != 0)) {
+ DCHECK(sign == 0 || sign == 1);
+ posSubnorm = (sign == 0);
+ negSubnorm = (sign == 1);
+ }
+
+ // Setting flags if double is normal number
+ posNorm = false;
+ negNorm = false;
+ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan &&
+ !quietNan && !negZero && !posZero) {
+ DCHECK(sign == 0 || sign == 1);
+ posNorm = (sign == 0);
+ negNorm = (sign == 1);
+ }
+
+ // Calculating result according to description of CLASS.D instruction
+ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) |
+ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
+ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
+
+ DCHECK(result != 0);
+
+ dResult = bit_cast<double>(result);
+ set_fpu_register_double(fd_reg, dResult);
+
+ break;
+ }
+ case C_F_D: {
+ set_fcsr_bit(fcsr_cc, false);
break;
+ }
default:
UNREACHABLE();
}
@@ -2433,7 +2950,10 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
void Simulator::DecodeTypeRegisterWRsType(Instruction* instr, int32_t& alu_out,
const int32_t& fd_reg,
- const int32_t& fs_reg) {
+ const int32_t& fs_reg,
+ const int32_t& ft_reg) {
+ float fs = get_fpu_register_float(fs_reg);
+ float ft = get_fpu_register_float(ft_reg);
switch (instr->FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
alu_out = get_fpu_register_signed_word(fs_reg);
@@ -2443,7 +2963,80 @@ void Simulator::DecodeTypeRegisterWRsType(Instruction* instr, int32_t& alu_out,
alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
- default: // Mips64r6 CMP.S instructions unimplemented.
+ case CMP_AF:
+ set_fpu_register_word(fd_reg, 0);
+ break;
+ case CMP_UN:
+ if (std::isnan(fs) || std::isnan(ft)) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_EQ:
+ if (fs == ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_UEQ:
+ if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_LT:
+ if (fs < ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_ULT:
+ if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_LE:
+ if (fs <= ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_ULE:
+ if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_OR:
+ if (!std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_UNE:
+ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_NE:
+ if (fs != ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ default:
UNREACHABLE();
}
}
@@ -2453,38 +3046,93 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
const int32_t& ft_reg,
const int32_t& fs_reg,
const int32_t& fd_reg) {
- float fs, ft;
+ float fs, ft, fd;
fs = get_fpu_register_float(fs_reg);
ft = get_fpu_register_float(ft_reg);
- int64_t ft_int = static_cast<int64_t>(get_fpu_register_double(ft_reg));
+ fd = get_fpu_register_float(fd_reg);
+ int32_t ft_int = bit_cast<int32_t>(ft);
+ int32_t fd_int = bit_cast<int32_t>(fd);
uint32_t cc, fcsr_cc;
cc = instr->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
- case ADD_D:
+ case RINT: {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ float result, temp_result;
+ double temp;
+ float upper = std::ceil(fs);
+ float lower = std::floor(fs);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ if (upper - fs < fs - lower) {
+ result = upper;
+ } else if (upper - fs > fs - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ float reminder = modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fs > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ set_fpu_register_float(fd_reg, result);
+ if (result != fs) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
+ case ADD_S:
set_fpu_register_float(fd_reg, fs + ft);
break;
- case SUB_D:
+ case SUB_S:
set_fpu_register_float(fd_reg, fs - ft);
break;
- case MUL_D:
+ case MUL_S:
set_fpu_register_float(fd_reg, fs * ft);
break;
- case DIV_D:
+ case DIV_S:
set_fpu_register_float(fd_reg, fs / ft);
break;
- case ABS_D:
+ case ABS_S:
set_fpu_register_float(fd_reg, fabs(fs));
break;
- case MOV_D:
+ case MOV_S:
set_fpu_register_float(fd_reg, fs);
break;
- case NEG_D:
+ case NEG_S:
set_fpu_register_float(fd_reg, -fs);
break;
- case SQRT_D:
+ case SQRT_S:
set_fpu_register_float(fd_reg, fast_sqrt(fs));
break;
+ case RSQRT_S: {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float result = 1.0 / fast_sqrt(fs);
+ set_fpu_register_float(fd_reg, result);
+ break;
+ }
+ case RECIP_S: {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float result = 1.0 / fs;
+ set_fpu_register_float(fd_reg, result);
+ break;
+ }
+ case C_F_D:
+ set_fcsr_bit(fcsr_cc, false);
+ break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
@@ -2509,18 +3157,314 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
case CVT_D_S:
set_fpu_register_double(fd_reg, static_cast<double>(fs));
break;
+ case SEL:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ set_fpu_register_float(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
+ break;
+ case CLASS_S: { // Mips32r6 instruction
+ // Convert float input to uint32_t for easier bit manipulation
+ float fs = get_fpu_register_float(fs_reg);
+ uint32_t classed = bit_cast<uint32_t>(fs);
+
+ // Extracting sign, exponent and mantissa from the input float
+ uint32_t sign = (classed >> 31) & 1;
+ uint32_t exponent = (classed >> 23) & 0x000000ff;
+ uint32_t mantissa = classed & 0x007fffff;
+ uint32_t result;
+ float fResult;
+
+ // Setting flags if input float is negative infinity,
+ // positive infinity, negative zero or positive zero
+ bool negInf = (classed == 0xFF800000);
+ bool posInf = (classed == 0x7F800000);
+ bool negZero = (classed == 0x80000000);
+ bool posZero = (classed == 0x00000000);
+
+ bool signalingNan;
+ bool quietNan;
+ bool negSubnorm;
+ bool posSubnorm;
+ bool negNorm;
+ bool posNorm;
+
+ // Setting flags if float is NaN
+ signalingNan = false;
+ quietNan = false;
+ if (!negInf && !posInf && (exponent == 0xff)) {
+ quietNan = ((mantissa & 0x00200000) == 0) &&
+ ((mantissa & (0x00200000 - 1)) == 0);
+ signalingNan = !quietNan;
+ }
+
+ // Setting flags if float is subnormal number
+ posSubnorm = false;
+ negSubnorm = false;
+ if ((exponent == 0) && (mantissa != 0)) {
+ DCHECK(sign == 0 || sign == 1);
+ posSubnorm = (sign == 0);
+ negSubnorm = (sign == 1);
+ }
+
+ // Setting flags if float is normal number
+ posNorm = false;
+ negNorm = false;
+ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan &&
+ !quietNan && !negZero && !posZero) {
+ DCHECK(sign == 0 || sign == 1);
+ posNorm = (sign == 0);
+ negNorm = (sign == 1);
+ }
+
+ // Calculating result according to description of CLASS.S instruction
+ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) |
+ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
+ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
+
+ DCHECK(result != 0);
+
+ fResult = bit_cast<float>(result);
+ set_fpu_register_float(fd_reg, fResult);
+
+ break;
+ }
case SELEQZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(
- fd_reg, (ft_int & 0x1) == 0 ? get_fpu_register_double(fs_reg) : 0.0);
+ set_fpu_register_float(
+ fd_reg, (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg) : 0.0);
break;
case SELNEZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(
- fd_reg, (ft_int & 0x1) != 0 ? get_fpu_register_double(fs_reg) : 0.0);
+ set_fpu_register_float(
+ fd_reg, (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ break;
+ case MOVZ_C: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ int32_t rt_reg = instr->RtValue();
+ int32_t rt = get_register(rt_reg);
+ if (rt == 0) {
+ set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVN_C: {
+ DCHECK(IsMipsArchVariant(kMips32r2));
+ int32_t rt_reg = instr->RtValue();
+ int32_t rt = get_register(rt_reg);
+ if (rt != 0) {
+ set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVF: {
+ // Same function field for MOVT.D and MOVF.D
+ uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ ft_cc = get_fcsr_condition_bit(ft_cc);
+
+ if (instr->Bit(16)) { // Read Tf bit.
+ // MOVT.D
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ } else {
+ // MOVF.D
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case TRUNC_W_S: { // Truncate single to word (round towards 0).
+ float rounded = trunc(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case TRUNC_L_S: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float rounded = trunc(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
break;
+ }
+ case FLOOR_W_S: // Round double to word towards negative infinity.
+ {
+ float rounded = std::floor(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case FLOOR_L_S: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float rounded = std::floor(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case ROUND_W_S: {
+ float rounded = std::floor(fs + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ROUND_L_S: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float rounded = std::floor(fs + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ int64_t i64 = static_cast<int64_t>(result);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case CEIL_W_S: // Round double to word towards positive infinity.
+ {
+ float rounded = std::ceil(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case CEIL_L_S: { // Mips32r2 instruction.
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ float rounded = std::ceil(fs);
+ int64_t i64 = static_cast<int64_t>(rounded);
+ if (IsFp64Mode()) {
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case MIN:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ set_fpu_register_float(fd_reg, (fs >= ft) ? ft : fs);
+ }
+ break;
+ case MAX:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ set_fpu_register_float(fd_reg, (fs <= ft) ? ft : fs);
+ }
+ break;
+ case MINA:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ float result;
+ if (fabs(fs) > fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) < fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_float(fd_reg, result);
+ }
+ break;
+ case MAXA:
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ float result;
+ if (fabs(fs) < fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) > fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_float(fd_reg, result);
+ }
+ break;
+ case CVT_L_S: {
+ if (IsFp64Mode()) {
+ int64_t result;
+ float rounded;
+ round64_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ } else {
+ UNSUPPORTED();
+ }
+ break;
+ }
+ case CVT_W_S: {
+ float rounded;
+ int32_t result;
+ round_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
default:
- // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
+ // CVT_W_S CVT_L_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
// CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
UNREACHABLE();
}
@@ -2547,10 +3491,16 @@ void Simulator::DecodeTypeRegisterLRsType(Instruction* instr,
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
- UNIMPLEMENTED_MIPS();
+ if (IsFp64Mode()) {
+ i64 = get_fpu_register(fs_reg);
+ } else {
+ i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg));
+ i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg + 1)) << 32;
+ }
+ set_fpu_register_float(fd_reg, static_cast<float>(i64));
break;
case CMP_AF: // Mips64r6 CMP.D instructions.
- UNIMPLEMENTED_MIPS();
+ set_fpu_register(fd_reg, 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
@@ -2601,7 +3551,28 @@ void Simulator::DecodeTypeRegisterLRsType(Instruction* instr,
set_fpu_register(fd_reg, 0);
}
break;
- default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED.
+ case CMP_OR:
+ if (!std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_UNE:
+ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_NE:
+ if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ default:
UNREACHABLE();
}
}
@@ -2645,7 +3616,7 @@ void Simulator::DecodeTypeRegisterCOP1(
DecodeTypeRegisterDRsType(instr, fr_reg, fs_reg, ft_reg, fd_reg);
break;
case W:
- DecodeTypeRegisterWRsType(instr, alu_out, fd_reg, fs_reg);
+ DecodeTypeRegisterWRsType(instr, alu_out, fd_reg, fs_reg, ft_reg);
break;
case L:
DecodeTypeRegisterLRsType(instr, ft_reg, fs_reg, fd_reg);
@@ -2821,7 +3792,10 @@ void Simulator::DecodeTypeRegisterSPECIAL(
break;
// Conditional moves.
case MOVN:
- if (rt) set_register(rd_reg, rs);
+ if (rt) {
+ set_register(rd_reg, rs);
+ TraceRegWr(rs);
+ }
break;
case MOVCI: {
uint32_t cc = instr->FBccValue();
@@ -2834,10 +3808,14 @@ void Simulator::DecodeTypeRegisterSPECIAL(
break;
}
case MOVZ:
- if (!rt) set_register(rd_reg, rs);
+ if (!rt) {
+ set_register(rd_reg, rs);
+ TraceRegWr(rs);
+ }
break;
default: // For other special opcodes we do the default operation.
set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
}
}
@@ -2848,27 +3826,35 @@ void Simulator::DecodeTypeRegisterSPECIAL2(Instruction* instr,
switch (instr->FunctionFieldRaw()) {
case MUL:
set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
// HI and LO are UNPREDICTABLE after the operation.
set_register(LO, Unpredictable);
set_register(HI, Unpredictable);
break;
default: // For other special2 opcodes we do the default operation.
set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
}
}
void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
const int32_t& rt_reg,
+ const int32_t& rd_reg,
int32_t& alu_out) {
switch (instr->FunctionFieldRaw()) {
case INS:
// Ins instr leaves result in Rt, rather than Rd.
set_register(rt_reg, alu_out);
+ TraceRegWr(alu_out);
break;
case EXT:
- // Ext instr leaves result in Rt, rather than Rd.
set_register(rt_reg, alu_out);
+ TraceRegWr(alu_out);
+ break;
+ case BSHFL:
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
break;
default:
UNREACHABLE();
@@ -2937,7 +3923,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
DecodeTypeRegisterSPECIAL2(instr, rd_reg, alu_out);
break;
case SPECIAL3:
- DecodeTypeRegisterSPECIAL3(instr, rt_reg, alu_out);
+ DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
@@ -2952,11 +3938,15 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
+ int32_t rs_reg = instr->RsValue();
int32_t rs = get_register(instr->RsValue());
uint32_t rs_u = static_cast<uint32_t>(rs);
int32_t rt_reg = instr->RtValue(); // Destination register.
int32_t rt = get_register(rt_reg);
int16_t imm16 = instr->Imm16Value();
+ int32_t imm19 = instr->Imm19Value();
+ int32_t imm21 = instr->Imm21Value();
+ int32_t imm26 = instr->Imm26Value();
int32_t ft_reg = instr->FtValue(); // Destination register.
int64_t ft;
@@ -2964,12 +3954,17 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
// Sign extended immediate.
- int32_t se_imm16 = imm16;
+ int32_t se_imm16 = imm16;
+ int32_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xfff80000 : 0);
+ int32_t se_imm26 = imm26 | ((imm26 & 0x2000000) ? 0xfc000000 : 0);
+
// Get current pc.
int32_t current_pc = get_pc();
// Next pc.
int32_t next_pc = bad_ra;
+ // pc increment
+ int16_t pc_increment;
// Used for conditional branch instructions.
bool do_branch = false;
@@ -3083,6 +4078,33 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BGTZ:
do_branch = rs > 0;
break;
+ case POP66: {
+ if (rs_reg) { // BEQZC
+ int32_t se_imm21 =
+ static_cast<int32_t>(imm21 << (kOpcodeBits + kRsBits));
+ se_imm21 = se_imm21 >> (kOpcodeBits + kRsBits);
+ if (rs == 0)
+ next_pc = current_pc + 4 + (se_imm21 << 2);
+ else
+ next_pc = current_pc + 4;
+ } else { // JIC
+ next_pc = rt + imm16;
+ }
+ break;
+ }
+ case BC: {
+ next_pc = current_pc + 4 + (se_imm26 << 2);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case BALC: {
+ set_register(31, current_pc + 4);
+ next_pc = current_pc + 4 + (se_imm26 << 2);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
// ------------- Arithmetic instructions.
case ADDI:
if (HaveSameSign(rs, se_imm16)) {
@@ -3197,6 +4219,55 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case SDC1:
addr = rs + se_imm16;
break;
+ // ------------- JIALC and BNEZC instructions.
+ case POP76:
+ // Next pc.
+ next_pc = rt + se_imm16;
+ // The instruction after the jump is NOT executed.
+ pc_increment = Instruction::kInstrSize;
+ if (instr->IsLinkingInstruction()) {
+ set_register(31, current_pc + pc_increment);
+ }
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ // ------------- PC-Relative instructions.
+ case PCREL: {
+ // rt field: checking 5-bits.
+ uint8_t rt = (imm21 >> kImm16Bits);
+ switch (rt) {
+ case ALUIPC:
+ addr = current_pc + (se_imm16 << 16);
+ alu_out = static_cast<int64_t>(~0x0FFFF) & addr;
+ break;
+ case AUIPC:
+ alu_out = current_pc + (se_imm16 << 16);
+ break;
+ default: {
+ // rt field: checking the most significant 2-bits.
+ rt = (imm21 >> kImm19Bits);
+ switch (rt) {
+ case LWPC: {
+ int32_t offset = imm19;
+ // Set sign.
+ offset <<= (kOpcodeBits + kRsBits + 2);
+ offset >>= (kOpcodeBits + kRsBits + 2);
+ addr = current_pc + (offset << 2);
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ alu_out = *ptr;
+ break;
+ }
+ case ADDIUPC:
+ alu_out = current_pc + (se_imm19 << 2);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ break;
+ }
default:
UNREACHABLE();
}
@@ -3233,6 +4304,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case XORI:
case LUI:
set_register(rt_reg, alu_out);
+ TraceRegWr(alu_out);
break;
// ------------- Memory instructions.
case LB:
@@ -3274,6 +4346,8 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
addr = rs + se_imm16;
WriteD(addr, get_fpu_register_double(ft_reg), instr);
break;
+ case PCREL:
+ set_register(rs_reg, alu_out);
default:
break;
}
@@ -3327,14 +4401,12 @@ void Simulator::InstructionDecode(Instruction* instr) {
CheckICache(isolate_->simulator_i_cache(), instr);
}
pc_modified_ = false;
+ v8::internal::EmbeddedVector<char, 256> buffer;
if (::v8::internal::FLAG_trace_sim) {
+ SNPrintF(trace_buf_, "%s", "");
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // Use a reasonably large buffer.
- v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
- PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr),
- buffer.start());
}
switch (instr->InstructionType()) {
@@ -3350,6 +4422,10 @@ void Simulator::InstructionDecode(Instruction* instr) {
default:
UNSUPPORTED();
}
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(" 0x%08x %-44s %s\n", reinterpret_cast<intptr_t>(instr),
+ buffer.start(), trace_buf_.start());
+ }
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) +
Instruction::kInstrSize);
@@ -3536,7 +4612,8 @@ uintptr_t Simulator::PopAddress() {
#undef UNSUPPORTED
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // USE_SIMULATOR
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index a135f8d71d..00b79b3cfe 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -178,8 +178,17 @@ class Simulator {
void set_fcsr_rounding_mode(FPURoundingMode mode);
unsigned int get_fcsr_rounding_mode();
bool set_fcsr_round_error(double original, double rounded);
+ bool set_fcsr_round_error(float original, float rounded);
+ bool set_fcsr_round64_error(double original, double rounded);
+ bool set_fcsr_round64_error(float original, float rounded);
void round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs);
+ void round_according_to_fcsr(float toRound, float& rounded,
+ int32_t& rounded_int, float fs);
+ void round64_according_to_fcsr(double toRound, double& rounded,
+ int64_t& rounded_int, double fs);
+ void round64_according_to_fcsr(float toRound, float& rounded,
+ int64_t& rounded_int, float fs);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
@@ -197,6 +206,8 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
+ static void TearDown(HashMap* i_cache, Redirection* first);
+
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
@@ -257,6 +268,20 @@ class Simulator {
inline double ReadD(int32_t addr, Instruction* instr);
inline void WriteD(int32_t addr, double value, Instruction* instr);
+ // Helpers for data value tracing.
+ enum TraceType {
+ BYTE,
+ HALF,
+ WORD
+ // DWORD,
+ // DFLOAT - Floats may have printing issues due to paired lwc1's
+ };
+
+ void TraceRegWr(int32_t value);
+ void TraceMemWr(int32_t addr, int32_t value, TraceType t);
+ void TraceMemRd(int32_t addr, int32_t value);
+ EmbeddedVector<char, 128> trace_buf_;
+
// Operations depending on endianness.
// Get Double Higher / Lower word.
inline int32_t GetDoubleHIW(double* addr);
@@ -273,7 +298,8 @@ class Simulator {
const int32_t& fs_reg, const int32_t& ft_reg,
const int32_t& fd_reg);
void DecodeTypeRegisterWRsType(Instruction* instr, int32_t& alu_out,
- const int32_t& fd_reg, const int32_t& fs_reg);
+ const int32_t& fd_reg, const int32_t& fs_reg,
+ const int32_t& ft_reg);
void DecodeTypeRegisterSRsType(Instruction* instr, const int32_t& ft_reg,
const int32_t& fs_reg, const int32_t& fd_reg);
void DecodeTypeRegisterLRsType(Instruction* instr, const int32_t& ft_reg,
@@ -307,7 +333,7 @@ class Simulator {
int32_t& alu_out);
void DecodeTypeRegisterSPECIAL3(Instruction* instr, const int32_t& rt_reg,
- int32_t& alu_out);
+ const int32_t& rd_reg, int32_t& alu_out);
// Helper function for DecodeTypeRegister.
void ConfigureTypeRegister(Instruction* instr,
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 7f18335b59..bfeb3002eb 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -196,24 +196,17 @@ Address Assembler::break_address_from_return_address(Address pc) {
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
- // Encoded internal references are lui/ori load of 48-bit absolute address.
- Instr instr_lui = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
- Instr instr_ori = Assembler::instr_at(pc + 1 * Assembler::kInstrSize);
- Instr instr_ori2 = Assembler::instr_at(pc + 3 * Assembler::kInstrSize);
- DCHECK(Assembler::IsLui(instr_lui));
- DCHECK(Assembler::IsOri(instr_ori));
- DCHECK(Assembler::IsOri(instr_ori2));
- instr_lui &= ~kImm16Mask;
- instr_ori &= ~kImm16Mask;
- instr_ori2 &= ~kImm16Mask;
- int64_t imm = reinterpret_cast<int64_t>(target);
- DCHECK((imm & 3) == 0);
- Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
- instr_lui | ((imm >> 32) & kImm16Mask));
- Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr_ori | ((imm >> 16) & kImm16Mask));
- Assembler::instr_at_put(pc + 3 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
+ // Encoded internal references are j/jal instructions.
+ Instr instr = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
+
+ uint64_t imm28 =
+ (reinterpret_cast<uint64_t>(target) & static_cast<uint64_t>(kImm28Mask));
+
+ instr &= ~kImm26Mask;
+ uint64_t imm26 = imm28 >> 2;
+ DCHECK(is_uint26(imm26));
+
+ instr_at_put(pc, instr | (imm26 & kImm26Mask));
// Currently used only by deserializer, and all code will be flushed
// after complete deserialization, no need to flush on each reference.
}
@@ -222,7 +215,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
- DCHECK(IsLui(instr_at(pc)));
+ DCHECK(IsJ(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
} else {
DCHECK(mode == RelocInfo::INTERNAL_REFERENCE);
@@ -270,18 +263,14 @@ Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) {
return Memory::Address_at(pc_);
} else {
- // Encoded internal references are lui/ori load of 48-bit absolute address.
+ // Encoded internal references are j/jal instructions.
DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
- Instr instr_lui = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
- Instr instr_ori = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
- Instr instr_ori2 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize);
- DCHECK(Assembler::IsLui(instr_lui));
- DCHECK(Assembler::IsOri(instr_ori));
- DCHECK(Assembler::IsOri(instr_ori2));
- int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 32;
- imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 16;
- imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask));
- return reinterpret_cast<Address>(imm);
+ Instr instr = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
+ instr &= kImm26Mask;
+ uint64_t imm28 = instr << 2;
+ uint64_t segment =
+ (reinterpret_cast<uint64_t>(pc_) & ~static_cast<uint64_t>(kImm28Mask));
+ return reinterpret_cast<Address>(segment | imm28);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 685100f59a..ea497509c6 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -211,13 +211,14 @@ Operand::Operand(Handle<Object> handle) {
}
-MemOperand::MemOperand(Register rm, int64_t offset) : Operand(rm) {
+MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
}
-MemOperand::MemOperand(Register rm, int64_t unit, int64_t multiplier,
- OffsetAddend offset_addend) : Operand(rm) {
+MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
+ OffsetAddend offset_addend)
+ : Operand(rm) {
offset_ = unit * multiplier + offset_addend;
}
@@ -290,7 +291,8 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->reloc_size =
+ static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
}
@@ -637,7 +639,7 @@ int Assembler::target_at(int pos, bool is_internal) {
}
}
// Check we have a branch or jump instruction.
- DCHECK(IsBranch(instr) || IsLui(instr));
+ DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) {
@@ -673,8 +675,18 @@ int Assembler::target_at(int pos, bool is_internal) {
return pos - delta;
}
} else {
- UNREACHABLE();
- return 0;
+ DCHECK(IsJ(instr) || IsJal(instr));
+ int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if (imm28 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
+ instr_address &= kImm28Mask;
+ int delta = static_cast<int>(instr_address - imm28);
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
}
}
@@ -694,7 +706,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
return;
}
- DCHECK(IsBranch(instr) || IsLui(instr));
+ DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
if (IsBranch(instr)) {
int32_t imm18 = target_pos - (pos + kBranchPCOffset);
DCHECK((imm18 & 3) == 0);
@@ -725,7 +737,16 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori2 | (imm & kImm16Mask));
} else {
- UNREACHABLE();
+ DCHECK(IsJ(instr) || IsJal(instr));
+ uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+ imm28 &= kImm28Mask;
+ DCHECK((imm28 & 3) == 0);
+
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
+ DCHECK(is_uint26(imm26));
+
+ instr_at_put(pos, instr | (imm26 & kImm26Mask));
}
}
@@ -787,7 +808,8 @@ void Assembler::bind_to(Label* L, int pos) {
}
target_at_put(fixup_pos, pos, false);
} else {
- DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
+ DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
+ IsEmittedConstant(instr));
target_at_put(fixup_pos, pos, false);
}
}
@@ -945,6 +967,20 @@ void Assembler::GenInstrImmediate(Opcode opcode,
}
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t j) {
+ DCHECK(rs.is_valid() && (is_uint21(j)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (j & kImm21Mask);
+ emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26) {
+ DCHECK(is_int26(offset26));
+ Instr instr = opcode | (offset26 & kImm26Mask);
+ emit(instr);
+}
+
+
void Assembler::GenInstrJump(Opcode opcode,
uint32_t address) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -984,7 +1020,6 @@ uint64_t Assembler::jump_address(Label* L) {
return kEndOfJumpChain;
}
}
-
uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
DCHECK((imm & 3) == 0);
@@ -1090,7 +1125,7 @@ int32_t Assembler::branch_offset21_compact(Label* L,
}
}
- int32_t offset = target_pos - pc_offset();
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
DCHECK((offset & 3) == 0);
DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
@@ -1137,6 +1172,19 @@ void Assembler::bal(int16_t offset) {
}
+void Assembler::bc(int32_t offset) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrImmediate(BC, offset);
+}
+
+
+void Assembler::balc(int32_t offset) {
+ DCHECK(kArchVariant == kMips64r6);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BALC, offset);
+}
+
+
void Assembler::beq(Register rs, Register rt, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BEQ, rs, rt, offset);
@@ -1336,7 +1384,7 @@ void Assembler::beqc(Register rs, Register rt, int16_t offset) {
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
- Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
+ Instr instr = POP66 | (rs.code() << kRsShift) | (offset & kImm21Mask);
emit(instr);
}
@@ -1351,7 +1399,7 @@ void Assembler::bnec(Register rs, Register rt, int16_t offset) {
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
- Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
+ Instr instr = POP76 | (rs.code() << kRsShift) | offset;
emit(instr);
}
@@ -1359,12 +1407,14 @@ void Assembler::bnezc(Register rs, int32_t offset) {
void Assembler::j(int64_t target) {
#if DEBUG
// Get pc of delay slot.
- uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- DCHECK(in_range && ((target & 3) == 0));
+ if (target != kEndOfJumpChain) {
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = ((ipc ^ static_cast<uint64_t>(target)) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
+ DCHECK(in_range && ((target & 3) == 0));
+ }
#endif
- GenInstrJump(J, target >> 2);
+ GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
}
@@ -1385,13 +1435,15 @@ void Assembler::jr(Register rs) {
void Assembler::jal(int64_t target) {
#ifdef DEBUG
// Get pc of delay slot.
- uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- DCHECK(in_range && ((target & 3) == 0));
+ if (target != kEndOfJumpChain) {
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = ((ipc ^ static_cast<uint64_t>(target)) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
+ DCHECK(in_range && ((target & 3) == 0));
+ }
#endif
positions_recorder()->WriteRecordedPositions();
- GenInstrJump(JAL, target >> 2);
+ GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
}
@@ -1404,29 +1456,18 @@ void Assembler::jalr(Register rs, Register rd) {
}
-void Assembler::j_or_jr(int64_t target, Register rs) {
- // Get pc of delay slot.
- uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- if (in_range) {
- j(target);
- } else {
- jr(t9);
- }
+void Assembler::jic(Register rt, int16_t offset) {
+ DCHECK(kArchVariant == kMips64r6);
+ Instr instr = POP66 | (JIC << kRsShift) | (rt.code() << kRtShift) |
+ (offset & kImm16Mask);
+ emit(instr);
}
-void Assembler::jal_or_jalr(int64_t target, Register rs) {
- // Get pc of delay slot.
- uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
- (kImm26Bits+kImmFieldShift)) == 0;
- if (in_range) {
- jal(target);
- } else {
- jalr(t9);
- }
+void Assembler::jialc(Register rt, int16_t offset) {
+ DCHECK(kArchVariant == kMips64r6);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(POP76, zero_reg, rt, offset);
}
@@ -1687,7 +1728,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
@@ -1697,7 +1738,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
@@ -1897,6 +1938,7 @@ void Assembler::lui(Register rd, int32_t j) {
void Assembler::aui(Register rs, Register rt, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
+ DCHECK(!(rs.is(zero_reg)));
DCHECK(is_uint16(j));
GenInstrImmediate(LUI, rs, rt, j);
}
@@ -1960,6 +2002,56 @@ void Assembler::sd(Register rd, const MemOperand& rs) {
}
+// ---------PC-Relative instructions-----------
+
+void Assembler::addiupc(Register rs, int32_t imm19) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(rs.is_valid() && is_int19(imm19));
+ int32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
+void Assembler::lwpc(Register rs, int32_t offset19) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(rs.is_valid() && is_int19(offset19));
+ int32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
+void Assembler::lwupc(Register rs, int32_t offset19) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(rs.is_valid() && is_int19(offset19));
+ int32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
+void Assembler::ldpc(Register rs, int32_t offset18) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(rs.is_valid() && is_int18(offset18));
+ int32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
+void Assembler::auipc(Register rs, int16_t imm16) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(rs.is_valid() && is_int16(imm16));
+ int32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
+void Assembler::aluipc(Register rs, int16_t imm16) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(rs.is_valid() && is_int16(imm16));
+ int32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ GenInstrImmediate(PCREL, rs, imm21);
+}
+
+
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -2141,17 +2233,6 @@ void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
}
-void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
- DCHECK((fmt == D) || (fmt == S));
-
- Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
- fs.code() << kFsShift | fd.code() << kFdShift | SEL;
- emit(instr);
-}
-
-
void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(kArchVariant == kMips64r6);
@@ -2175,14 +2256,6 @@ void Assembler::seleqz(Register rd, Register rs, Register rt) {
}
-// FPR.
-void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
-}
-
-
// GPR.
void Assembler::selnez(Register rd, Register rs, Register rt) {
DCHECK(kArchVariant == kMips64r6);
@@ -2190,15 +2263,6 @@ void Assembler::selnez(Register rd, Register rs, Register rt) {
}
-// FPR.
-void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
- FPURegister ft) {
- DCHECK(kArchVariant == kMips64r6);
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
-}
-
-
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
if (kArchVariant != kMips64r6) {
@@ -2234,6 +2298,18 @@ void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
}
+void Assembler::bitswap(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
+}
+
+
+void Assembler::dbitswap(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
+}
+
+
void Assembler::pref(int32_t hint, const MemOperand& rs) {
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
@@ -2242,6 +2318,22 @@ void Assembler::pref(int32_t hint, const MemOperand& rs) {
}
+void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(is_uint3(bp));
+ uint16_t sa = (ALIGN << kBp2Bits) | bp;
+ GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
+}
+
+
+void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(is_uint3(bp));
+ uint16_t sa = (DALIGN << kBp3Bits) | bp;
+ GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
+}
+
+
// --------Coprocessor-instructions----------------
// Load, store, move.
@@ -2334,6 +2426,118 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
}
+void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK((fmt == D) || (fmt == S));
+
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
+}
+
+
+void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ sel(S, fd, fs, ft);
+}
+
+
+void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ sel(D, fd, fs, ft);
+}
+
+
+// FPR.
+void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
+}
+
+
+void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ seleqz(D, fd, fs, ft);
+}
+
+
+void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ seleqz(S, fd, fs, ft);
+}
+
+
+void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ selnez(D, fd, fs, ft);
+}
+
+
+void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
+ selnez(S, fd, fs, ft);
+}
+
+
+void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
+}
+
+
+void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
+}
+
+
+void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(kArchVariant == kMips64r2);
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(kArchVariant == kMips64r2);
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(kArchVariant == kMips64r2);
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
+ DCHECK(kArchVariant == kMips64r2);
+ FPURegister ft;
+ ft.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
+}
+
+
+void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
+}
+
+
+void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
+ DCHECK(kArchVariant == kMips64r2);
+ GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
+}
+
+
+// FPR.
+void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
+}
+
+
// Arithmetic.
void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
@@ -2397,6 +2601,11 @@ void Assembler::mov_d(FPURegister fd, FPURegister fs) {
}
+void Assembler::mov_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, MOV_D);
+}
+
+
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
}
@@ -2417,8 +2626,27 @@ void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
}
-// Conversions.
+void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
+}
+
+
+void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
+}
+
+
+void Assembler::recip_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
+}
+
+
+void Assembler::recip_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
+}
+
+// Conversions.
void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
}
@@ -2477,30 +2705,30 @@ void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
DCHECK(kArchVariant == kMips64r6);
- GenInstrRegister(COP1, D, f0, fs, fd, RINT);
+ GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
}
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
@@ -2535,16 +2763,28 @@ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
}
-void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
- FPURegister fs) {
+void Assembler::class_s(FPURegister fd, FPURegister fs) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
+}
+
+
+void Assembler::class_d(FPURegister fd, FPURegister fs) {
+ DCHECK(kArchVariant == kMips64r6);
+ GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
+}
+
+
+void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
DCHECK(kArchVariant == kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
}
-void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
- FPURegister fs) {
+void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
DCHECK(kArchVariant == kMips64r6);
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
@@ -2557,7 +2797,7 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -2573,7 +2813,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips64r2);
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -2594,6 +2834,17 @@ void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
}
+void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ cmp(cond, W, fd, fs, ft);
+}
+
+void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
+ FPURegister ft) {
+ cmp(cond, L, fd, fs, ft);
+}
+
+
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK(kArchVariant == kMips64r6);
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
@@ -2613,6 +2864,7 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(kArchVariant != kMips64r6);
DCHECK(is_uint3(cc));
+ DCHECK(fmt == S || fmt == D);
DCHECK((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
| cc << 8 | 3 << 4 | cond;
@@ -2620,6 +2872,18 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
}
+void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
+ uint16_t cc) {
+ c(cond, S, fs, ft, cc);
+}
+
+
+void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
+ uint16_t cc) {
+ c(cond, D, fs, ft, cc);
+}
+
+
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
DCHECK(src2 == 0.0);
@@ -2656,6 +2920,7 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
}
Instr instr = instr_at(pc);
DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
+ DCHECK(IsJ(instr) || IsLui(instr) || IsJal(instr));
if (IsLui(instr)) {
Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
@@ -2687,8 +2952,21 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
instr_ori2 | (imm & kImm16Mask));
return 4; // Number of instructions patched.
} else {
- UNREACHABLE();
- return 0; // Number of instructions patched.
+ uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+
+ imm28 += pc_delta;
+ imm28 &= kImm28Mask;
+ DCHECK((imm28 & 3) == 0);
+
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ DCHECK(is_uint26(imm26));
+
+ instr_at_put(pc, instr | (imm26 & kImm26Mask));
+ return 1; // Number of instructions patched.
}
}
@@ -2709,7 +2987,8 @@ void Assembler::GrowBuffer() {
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc.reloc_size =
+ static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
// Copy the data.
intptr_t pc_delta = desc.buffer - buffer_;
@@ -2754,6 +3033,13 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dq(uint64_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
+}
+
+
void Assembler::dd(Label* label) {
CheckBuffer();
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
@@ -2851,14 +3137,8 @@ void Assembler::CheckTrampolinePool() {
// references until associated instructions are emitted and available
// to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- // TODO(plind): Verify this, presume I cannot use macro-assembler
- // here.
- lui(at, (imm64 >> 32) & kImm16Mask);
- ori(at, at, (imm64 >> 16) & kImm16Mask);
- dsll(at, at, 16);
- ori(at, at, imm64 & kImm16Mask);
+ j(imm64);
}
- jr(at);
nop();
}
bind(&after_pool);
@@ -2963,20 +3243,7 @@ void Assembler::set_target_address_at(Address pc,
}
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return isolate->factory()->empty_constant_pool_array();
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return;
-}
-
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 7f026bf57c..314970238b 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -404,8 +404,8 @@ class MemOperand : public Operand {
offset_zero = 0
};
- explicit MemOperand(Register rn, int64_t offset = 0);
- explicit MemOperand(Register rn, int64_t unit, int64_t multiplier,
+ explicit MemOperand(Register rn, int32_t offset = 0);
+ explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
OffsetAddend offset_addend = offset_zero);
int32_t offset() const { return offset_; }
@@ -493,19 +493,16 @@ class Assembler : public AssemblerBase {
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
// On MIPS there is no Constant Pool so we skip that parameter.
- INLINE(static Address target_address_at(Address pc,
- ConstantPoolArray* constant_pool)) {
+ INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
}
- INLINE(static void set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED)) {
+ INLINE(static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
set_target_address_at(pc, target, icache_flush_mode);
}
INLINE(static Address target_address_at(Address pc, Code* code)) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(Address pc,
@@ -513,7 +510,7 @@ class Assembler : public AssemblerBase {
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED)) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@@ -524,6 +521,8 @@ class Assembler : public AssemblerBase {
// Return the code target address of the patch debug break slot
inline static Address break_address_from_return_address(Address pc);
+ static void JumpLabelToJumpRegister(Address pc);
+
static void QuietNaN(HeapObject* nan);
// This sets the branch destination (which gets loaded at the call address).
@@ -598,6 +597,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -634,6 +636,10 @@ class Assembler : public AssemblerBase {
void b(Label* L) { b(branch_offset(L, false)>>2); }
void bal(int16_t offset);
void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+ void bc(int32_t offset);
+ void bc(Label* L) { bc(branch_offset(L, false) >> 2); }
+ void balc(int32_t offset);
+ void balc(Label* L) { balc(branch_offset(L, false) >> 2); }
void beq(Register rs, Register rt, int16_t offset);
void beq(Register rs, Register rt, Label* L) {
@@ -743,8 +749,8 @@ class Assembler : public AssemblerBase {
void jal(int64_t target);
void jalr(Register rs, Register rd = ra);
void jr(Register target);
- void j_or_jr(int64_t target, Register rs);
- void jal_or_jalr(int64_t target, Register rs);
+ void jic(Register rt, int16_t offset);
+ void jialc(Register rt, int16_t offset);
// -------Data-processing-instructions---------
@@ -847,6 +853,16 @@ class Assembler : public AssemblerBase {
void sd(Register rd, const MemOperand& rs);
+ // ---------PC-Relative-instructions-----------
+
+ void addiupc(Register rs, int32_t imm19);
+ void lwpc(Register rs, int32_t offset19);
+ void lwupc(Register rs, int32_t offset19);
+ void ldpc(Register rs, int32_t offset18);
+ void auipc(Register rs, int16_t imm16);
+ void aluipc(Register rs, int16_t imm16);
+
+
// ----------------Prefetch--------------------
void pref(int32_t hint, const MemOperand& rs);
@@ -881,17 +897,36 @@ class Assembler : public AssemblerBase {
void movf(Register rd, Register rs, uint16_t cc = 0);
void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
+ void sel_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void sel_d(FPURegister fd, FPURegister fs, FPURegister ft);
void seleqz(Register rd, Register rs, Register rt);
void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft);
void selnez(Register rs, Register rt, Register rd);
void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft);
+ void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft);
+ void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft);
+
+ void movz_s(FPURegister fd, FPURegister fs, Register rt);
+ void movz_d(FPURegister fd, FPURegister fs, Register rt);
+ void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movn_s(FPURegister fd, FPURegister fs, Register rt);
+ void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void bitswap(Register rd, Register rt);
+ void dbitswap(Register rd, Register rt);
+ void align(Register rd, Register rs, Register rt, uint8_t bp);
+ void dalign(Register rd, Register rs, Register rt, uint8_t bp);
// --------Coprocessor-instructions----------------
@@ -926,10 +961,15 @@ class Assembler : public AssemblerBase {
void abs_s(FPURegister fd, FPURegister fs);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
+ void mov_s(FPURegister fd, FPURegister fs);
void neg_s(FPURegister fd, FPURegister fs);
void neg_d(FPURegister fd, FPURegister fs);
void sqrt_s(FPURegister fd, FPURegister fs);
void sqrt_d(FPURegister fd, FPURegister fs);
+ void rsqrt_s(FPURegister fd, FPURegister fs);
+ void rsqrt_d(FPURegister fd, FPURegister fs);
+ void recip_d(FPURegister fd, FPURegister fs);
+ void recip_s(FPURegister fd, FPURegister fs);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
@@ -958,6 +998,9 @@ class Assembler : public AssemblerBase {
void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs);
+ void class_s(FPURegister fd, FPURegister fs);
+ void class_d(FPURegister fd, FPURegister fs);
+
void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
@@ -982,6 +1025,8 @@ class Assembler : public AssemblerBase {
// Conditions and branches for MIPSr6.
void cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister ft, FPURegister fs);
+ void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
+ void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void bc1eqz(int16_t offset, FPURegister ft);
void bc1eqz(Label* L, FPURegister ft) {
@@ -995,6 +1040,8 @@ class Assembler : public AssemblerBase {
// Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
void bc1f(Label* L, uint16_t cc = 0) {
@@ -1088,6 +1135,8 @@ class Assembler : public AssemblerBase {
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dq(uint64_t data);
+ void dp(uintptr_t data) { dq(data); }
void dd(Label* label);
// Emits the address of the code stub's first instruction.
@@ -1105,7 +1154,9 @@ class Assembler : public AssemblerBase {
inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
// Get the number of bytes available in the buffer.
- inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+ inline intptr_t available_space() const {
+ return reloc_info_writer.pos() - pc_;
+ }
// Read/patch instructions.
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
@@ -1172,11 +1223,12 @@ class Assembler : public AssemblerBase {
void CheckTrampolinePool();
- // Allocate a constant pool of the correct size for the generated code.
- Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
-
- // Generate the constant pool for the generated code.
- void PopulateConstantPool(ConstantPoolArray* constant_pool);
+ void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+ }
protected:
// Relocation for a type-recording IC has the AST id added to it. This
@@ -1352,6 +1404,8 @@ class Assembler : public AssemblerBase {
Register r1,
FPURegister r2,
int32_t j);
+ void GenInstrImmediate(Opcode opcode, Register rs, int32_t j);
+ void GenInstrImmediate(Opcode opcode, int32_t offset26);
void GenInstrJump(Opcode opcode,
@@ -1418,13 +1472,13 @@ class Assembler : public AssemblerBase {
int32_t get_trampoline_entry(int32_t pos);
int unbound_labels_count_;
- // If trampoline is emitted, generated code is becoming large. As this is
- // already a slow case which can possibly break our code generation for the
- // extreme case, we use this information to trigger different mode of
+ // After trampoline is emitted, long branches are used in generated code for
+ // the forward branches whose target offsets could be beyond reach of branch
+ // instruction. We use this information to trigger different mode of
// branch instruction generation, where we use jump instructions rather
// than regular branch instructions.
bool trampoline_emitted_;
- static const int kTrampolineSlotsSize = 6 * kInstrSize;
+ static const int kTrampolineSlotsSize = 2 * kInstrSize;
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
static const int kInvalidSlotPos = -1;
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index 5c8879704f..ca916374a8 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -347,6 +347,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
+ bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -378,10 +379,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(a2);
}
- // Preserve the two incoming parameters on the stack.
- // Tag arguments count.
- __ dsll32(a0, a0, 0);
- __ MultiPushReversed(a0.bit() | a1.bit());
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ if (use_new_target) {
+ __ Push(a0, a1, a3);
+ } else {
+ __ Push(a0, a1);
+ }
Label rt_call, allocated, normal_new, count_incremented;
__ Branch(&normal_new, eq, a1, Operand(a3));
@@ -453,7 +457,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// a1: constructor function
// a2: initial map
- // a3: object size (not including memento if create_memento)
+ // a3: object size (including memento if create_memento)
// t0: JSObject (not tagged)
__ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
__ mov(t1, t0);
@@ -535,7 +539,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Daddu(t0, t0, Operand(kHeapObjectTag));
// Check if a non-empty properties array is needed. Continue with
- // allocated object if not fall through to runtime call if it is.
+ // allocated object if not; allocate and initialize a FixedArray if yes.
// a1: constructor function
// t0: JSObject
// t1: start of next object (not tagged)
@@ -574,7 +578,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor
// a3: number of elements in properties array (untagged)
// t0: JSObject
- // t1: start of next object
+ // t1: start of FixedArray (untagged)
__ LoadRoot(t2, Heap::kFixedArrayMapRootIndex);
__ mov(a2, t1);
__ sd(t2, MemOperand(a2, JSObject::kMapOffset));
@@ -595,20 +599,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ dsll(a7, a3, kPointerSizeLog2);
__ daddu(t2, a2, a7); // End of object.
DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (!is_api_function || create_memento) {
- __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, kUndefinedValueNotLoaded, t3, Operand(a6));
- }
- __ jmp(&entry);
- __ bind(&loop);
- __ sd(t3, MemOperand(a2));
- __ daddiu(a2, a2, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, less, a2, Operand(t2));
+ if (!is_api_function || create_memento) {
+ __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, kUndefinedValueNotLoaded, t3, Operand(a6));
}
+ __ InitializeFieldsWithFiller(a2, t2, t3);
// Store the initialized FixedArray into the properties field of
// the JSObject.
@@ -643,7 +640,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated);
if (create_memento) {
- __ ld(a2, MemOperand(sp, kPointerSize * 2));
+ int offset = (use_new_target ? 3 : 2) * kPointerSize;
+ __ ld(a2, MemOperand(sp, offset));
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
__ Branch(&count_incremented, eq, a2, Operand(t1));
// a2 is an AllocationSite. We are creating a memento from it, so we
@@ -656,22 +654,24 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
- __ Push(t0, t0);
+ // Restore the parameters.
+ if (use_new_target) {
+ __ Pop(a3); // new.target
+ }
+ __ Pop(a1);
- // Reload the number of arguments from the stack.
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- __ ld(a1, MemOperand(sp, 2 * kPointerSize));
- __ ld(a3, MemOperand(sp, 3 * kPointerSize));
+ __ ld(a0, MemOperand(sp));
+ __ SmiUntag(a0);
+
+ if (use_new_target) {
+ __ Push(a3, t0, t0);
+ } else {
+ __ Push(t0, t0);
+ }
// Set up pointer to last argument.
__ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Set up number of arguments for function call below.
- __ SmiUntag(a0, a3);
-
// Copy arguments and receiver to the expression stack.
// a0: number of arguments
// a1: constructor function
@@ -679,10 +679,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: new.target (if used)
+ // sp[2/3]: number of arguments (smi-tagged)
Label loop, entry;
- __ SmiUntag(a3);
+ __ mov(a3, a0);
__ jmp(&entry);
__ bind(&loop);
__ dsll(a4, a3, kPointerSizeLog2);
@@ -707,7 +707,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ // TODO(arv): Remove the "!use_new_target" before supporting optimization
+ // of functions that reference new.target
+ if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -722,8 +724,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// v0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
__ JumpIfSmi(v0, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -741,9 +743,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// v0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ld(a1, MemOperand(sp, 2 * kPointerSize));
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
+ int offset = (use_new_target ? 2 : 1) * kPointerSize;
+ __ ld(a1, MemOperand(sp, offset));
// Leave construct frame.
}
@@ -757,12 +760,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@@ -816,8 +824,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ Daddu(a4, a4, Operand(-1));
__ Branch(&loop, ge, a4, Operand(zero_reg));
- __ Daddu(a0, a0, Operand(1));
-
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@@ -841,9 +847,10 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// Restore context from the frame.
// v0: result
- // sp[0]: number of arguments (smi-tagged)
+ // sp[0]: new.target
+ // sp[1]: number of arguments (smi-tagged)
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ ld(a1, MemOperand(sp, 0));
+ __ ld(a1, MemOperand(sp, kPointerSize));
// Leave construct frame.
}
@@ -1413,6 +1420,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
__ ld(key, MemOperand(fp, indexOffset));
__ Branch(&entry);
@@ -1422,7 +1431,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ ld(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
+ FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
+ Handle<TypeFeedbackVector> feedback_vector =
+ masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
+ int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ li(slot, Operand(Smi::FromInt(index)));
+ __ li(vector, feedback_vector);
+ Handle<Code> ic =
+ KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
__ push(v0);
@@ -1751,6 +1767,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
+
+ // If the function is strong we need to throw an error.
+ Label no_strong_error;
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(a5, FieldMemOperand(a4, SharedFunctionInfo::kStrongModeByteOffset));
+ __ And(a5, a5, Operand(1 << SharedFunctionInfo::kStrongModeBitWithinByte));
+ __ Branch(&no_strong_error, eq, a5, Operand(zero_reg));
+
+ // What we really care about is the required number of arguments.
+ DCHECK_EQ(kPointerSize, kInt64Size);
+ __ lw(a5, FieldMemOperand(a4, SharedFunctionInfo::kLengthOffset));
+ __ srl(a5, a5, 1);
+ __ Branch(&no_strong_error, ge, a0, Operand(a5));
+
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ }
+
+ __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into a0 and copy end address is fp.
@@ -1828,6 +1865,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index e53064f05a..90a72ada64 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -92,9 +92,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc);
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cc, Strength strength);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -112,17 +111,17 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) ||
- a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ a0.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments, adjust sp.
__ Dsubu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
// Store argument to stack.
- __ sd(descriptor.GetEnvironmentParameterRegister(i),
+ __ sd(descriptor.GetRegisterParameter(i),
MemOperand(sp, (param_count - 1 - i) * kPointerSize));
}
__ CallExternalReference(miss, param_count);
@@ -272,9 +271,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc) {
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cc, Strength strength) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t1;
@@ -289,14 +287,31 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Smis. If it's not a heap number, then return equal.
__ GetObjectType(a0, t0, t0);
if (cc == less || cc == greater) {
+ // Call runtime on identical JSObjects.
__ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics, since
+ // we need to throw a TypeError. Smis have already been ruled out.
+ __ Branch(&return_equal, eq, t0, Operand(HEAP_NUMBER_TYPE));
+ __ And(t0, t0, Operand(kIsNotStringMask));
+ __ Branch(slow, ne, t0, Operand(zero_reg));
+ }
} else {
__ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
__ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics,
+ // since we need to throw a TypeError. Smis and heap numbers have
+ // already been ruled out.
+ __ And(t0, t0, Operand(kIsNotStringMask));
+ __ Branch(slow, ne, t0, Operand(zero_reg));
+ }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -580,7 +595,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
+ EmitIdenticalObjectComparison(masm, &slow, cc, strength());
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -708,7 +723,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
- native = Builtins::COMPARE;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result.
if (cc == lt || cc == le) {
ncr = GREATER;
@@ -1371,8 +1387,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register scratch = a5;
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!FLAG_vector_ics ||
- !scratch.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
@@ -1596,9 +1611,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(a4, a5, VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister()));
+ DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
a5, &miss);
@@ -1609,7 +1623,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@@ -1669,8 +1682,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[4] : receiver displacement
// sp[8] : function
- CHECK(!has_new_target());
-
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1702,8 +1713,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// a6 : allocated object (tagged)
// t1 : mapped parameter count (tagged)
- CHECK(!has_new_target());
-
__ ld(a1, MemOperand(sp, 0 * kPointerSize));
// a1 = parameter count (tagged)
@@ -1769,7 +1778,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kNormalOffset =
Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
const int kAliasedOffset =
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
@@ -1960,15 +1969,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- if (has_new_target()) {
- Label skip_decrement;
- __ Branch(&skip_decrement, eq, a1, Operand(Smi::FromInt(0)));
- // Subtract 1 from smi-tagged arguments count.
- __ SmiUntag(a1);
- __ Daddu(a1, a1, Operand(-1));
- __ SmiTag(a1);
- __ bind(&skip_decrement);
- }
__ sd(a1, MemOperand(sp, 0));
__ SmiScale(at, a1, kPointerSizeLog2);
@@ -2053,9 +2053,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // sp[0] : index of rest parameter
- // sp[4] : number of parameters
- // sp[8] : receiver displacement
+ // sp[0] : language mode
+ // sp[4] : index of rest parameter
+ // sp[8] : number of parameters
+ // sp[12] : receiver displacement
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -2066,17 +2067,17 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ sd(a1, MemOperand(sp, 2 * kPointerSize));
__ SmiScale(at, a1, kPointerSizeLog2);
__ Daddu(a3, a2, Operand(at));
__ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sd(a3, MemOperand(sp, 2 * kPointerSize));
+ __ sd(a3, MemOperand(sp, 3 * kPointerSize));
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+ __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@@ -2917,6 +2918,13 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, a5, Operand(at));
+ // Increment the call count for monomorphic function calls.
+ __ dsrl(t0, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a3, a2, Operand(t0));
+ __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
+ __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
+
__ mov(a2, a4);
__ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
@@ -2976,6 +2984,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(a1, &extra_checks_or_miss);
+ // Increment the call count for monomorphic function calls.
+ __ dsrl(t0, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a3, a2, Operand(t0));
+ __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
+ __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -3051,6 +3066,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
__ sd(a4, FieldMemOperand(a2, with_types_offset));
+ // Initialize the call counter.
+ __ dsrl(at, a3, 32 - kPointerSizeLog2);
+ __ Daddu(at, a2, Operand(at));
+ __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ sd(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+
// Store the function. Use a stub since we need a frame for allocation.
// a2 - vector
// a3 - slot
@@ -3116,9 +3137,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
// Consumed by runtime conversion function:
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Push(VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else {
__ Push(object_, index_);
}
@@ -3134,9 +3155,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// have a chance to overwrite it.
__ Move(index_, v0);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Pop(VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister(), object_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_);
} else {
__ pop(object_);
}
@@ -3786,7 +3807,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -4613,15 +4634,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawLoadStub stub(isolate(), state());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawKeyedLoadStub stub(isolate());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4640,12 +4661,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
-void VectorRawLoadStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4738,11 +4757,11 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
-void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
- Register name = VectorLoadICDescriptor::NameRegister(); // a2
- Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
- Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
+ Register name = LoadWithVectorDescriptor::NameRegister(); // a2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
Register feedback = a4;
Register receiver_map = a5;
Register scratch1 = a6;
@@ -4785,21 +4804,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
-void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
- Register key = VectorLoadICDescriptor::NameRegister(); // a2
- Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
- Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
+ Register key = LoadWithVectorDescriptor::NameRegister(); // a2
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
Register feedback = a4;
Register receiver_map = a5;
Register scratch1 = a6;
@@ -4834,7 +4853,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&try_poly_name, ne, feedback, Operand(at));
Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
@@ -4858,6 +4877,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ StoreIC::GenerateMiss(masm);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -5547,6 +5618,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 05a193c517..4f45b08018 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -1153,6 +1153,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/constants-mips64.cc b/deps/v8/src/mips64/constants-mips64.cc
index 6a26c2bd5b..fd183a7b01 100644
--- a/deps/v8/src/mips64/constants-mips64.cc
+++ b/deps/v8/src/mips64/constants-mips64.cc
@@ -141,6 +141,8 @@ bool Instruction::IsForbiddenInBranchDelay() const {
case BNEL:
case BLEZL:
case BGTZL:
+ case BC:
+ case BALC:
return true;
case REGIMM:
switch (RtFieldRaw()) {
@@ -173,6 +175,11 @@ bool Instruction::IsLinkingInstruction() const {
switch (op) {
case JAL:
return true;
+ case POP76:
+ if (RsFieldRawNoAssert() == JIALC)
+ return true; // JIALC
+ else
+ return false; // BNEZC
case REGIMM:
switch (RtFieldRaw()) {
case BGEZAL:
@@ -291,6 +298,42 @@ Instruction::Type Instruction::InstructionType() const {
case EXT:
case DEXT:
return kRegisterType;
+ case BSHFL: {
+ int sa = SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP:
+ return kRegisterType;
+ case WSBH:
+ case SEB:
+ case SEH:
+ return kUnsupported;
+ }
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ }
+ case DBSHFL: {
+ int sa = SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case DBITSWAP:
+ return kRegisterType;
+ case DSBH:
+ case DSHD:
+ return kUnsupported;
+ }
+ sa = SaFieldRaw() >> kSaShift;
+ sa >>= kBp3Bits;
+ switch (sa) {
+ case DALIGN:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ }
default:
return kUnsupported;
}
@@ -327,8 +370,8 @@ Instruction::Type Instruction::InstructionType() const {
case BNEL:
case BLEZL:
case BGTZL:
- case BEQZC:
- case BNEZC:
+ case POP66:
+ case POP76:
case LB:
case LH:
case LWL:
@@ -348,6 +391,9 @@ Instruction::Type Instruction::InstructionType() const {
case LDC1:
case SWC1:
case SDC1:
+ case PCREL:
+ case BC:
+ case BALC:
return kImmediateType;
// 26 bits immediate type instructions. e.g.: j imm26.
case J:
@@ -360,6 +406,7 @@ Instruction::Type Instruction::InstructionType() const {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index a1e6c0b05d..0284478c08 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -221,9 +221,17 @@ const int kSaBits = 5;
const int kFunctionShift = 0;
const int kFunctionBits = 6;
const int kLuiShift = 16;
+const int kBp2Shift = 6;
+const int kBp2Bits = 2;
+const int kBp3Shift = 6;
+const int kBp3Bits = 3;
const int kImm16Shift = 0;
const int kImm16Bits = 16;
+const int kImm18Shift = 0;
+const int kImm18Bits = 18;
+const int kImm19Shift = 0;
+const int kImm19Bits = 19;
const int kImm21Shift = 0;
const int kImm21Bits = 21;
const int kImm26Shift = 0;
@@ -256,6 +264,9 @@ const int kFBtrueBits = 1;
// Instruction bit masks.
const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kImm18Mask = ((1 << kImm18Bits) - 1) << kImm18Shift;
+const int kImm19Mask = ((1 << kImm19Bits) - 1) << kImm19Shift;
+const int kImm21Mask = ((1 << kImm21Bits) - 1) << kImm21Shift;
const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
@@ -276,72 +287,75 @@ const int64_t kTh16MaskOf64 = (int64_t)0xffff << 16;
// We use this presentation to stay close to the table representation in
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
enum Opcode {
- SPECIAL = 0 << kOpcodeShift,
- REGIMM = 1 << kOpcodeShift,
-
- J = ((0 << 3) + 2) << kOpcodeShift,
- JAL = ((0 << 3) + 3) << kOpcodeShift,
- BEQ = ((0 << 3) + 4) << kOpcodeShift,
- BNE = ((0 << 3) + 5) << kOpcodeShift,
- BLEZ = ((0 << 3) + 6) << kOpcodeShift,
- BGTZ = ((0 << 3) + 7) << kOpcodeShift,
-
- ADDI = ((1 << 3) + 0) << kOpcodeShift,
- ADDIU = ((1 << 3) + 1) << kOpcodeShift,
- SLTI = ((1 << 3) + 2) << kOpcodeShift,
- SLTIU = ((1 << 3) + 3) << kOpcodeShift,
- ANDI = ((1 << 3) + 4) << kOpcodeShift,
- ORI = ((1 << 3) + 5) << kOpcodeShift,
- XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
- DAUI = ((3 << 3) + 5) << kOpcodeShift,
-
- BEQC = ((2 << 3) + 0) << kOpcodeShift,
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
- BEQL = ((2 << 3) + 4) << kOpcodeShift,
- BNEL = ((2 << 3) + 5) << kOpcodeShift,
- BLEZL = ((2 << 3) + 6) << kOpcodeShift,
- BGTZL = ((2 << 3) + 7) << kOpcodeShift,
-
- DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
- DADDIU = ((3 << 3) + 1) << kOpcodeShift,
- LDL = ((3 << 3) + 2) << kOpcodeShift,
- LDR = ((3 << 3) + 3) << kOpcodeShift,
- SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
- SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
-
- LB = ((4 << 3) + 0) << kOpcodeShift,
- LH = ((4 << 3) + 1) << kOpcodeShift,
- LWL = ((4 << 3) + 2) << kOpcodeShift,
- LW = ((4 << 3) + 3) << kOpcodeShift,
- LBU = ((4 << 3) + 4) << kOpcodeShift,
- LHU = ((4 << 3) + 5) << kOpcodeShift,
- LWR = ((4 << 3) + 6) << kOpcodeShift,
- LWU = ((4 << 3) + 7) << kOpcodeShift,
-
- SB = ((5 << 3) + 0) << kOpcodeShift,
- SH = ((5 << 3) + 1) << kOpcodeShift,
- SWL = ((5 << 3) + 2) << kOpcodeShift,
- SW = ((5 << 3) + 3) << kOpcodeShift,
- SDL = ((5 << 3) + 4) << kOpcodeShift,
- SDR = ((5 << 3) + 5) << kOpcodeShift,
- SWR = ((5 << 3) + 6) << kOpcodeShift,
-
- LWC1 = ((6 << 3) + 1) << kOpcodeShift,
- LLD = ((6 << 3) + 4) << kOpcodeShift,
- LDC1 = ((6 << 3) + 5) << kOpcodeShift,
- BEQZC = ((6 << 3) + 6) << kOpcodeShift,
- LD = ((6 << 3) + 7) << kOpcodeShift,
-
- PREF = ((6 << 3) + 3) << kOpcodeShift,
-
- SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- SCD = ((7 << 3) + 4) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift,
- BNEZC = ((7 << 3) + 6) << kOpcodeShift,
- SD = ((7 << 3) + 7) << kOpcodeShift,
-
- COP1X = ((1 << 4) + 3) << kOpcodeShift
+ SPECIAL = 0 << kOpcodeShift,
+ REGIMM = 1 << kOpcodeShift,
+
+ J = ((0 << 3) + 2) << kOpcodeShift,
+ JAL = ((0 << 3) + 3) << kOpcodeShift,
+ BEQ = ((0 << 3) + 4) << kOpcodeShift,
+ BNE = ((0 << 3) + 5) << kOpcodeShift,
+ BLEZ = ((0 << 3) + 6) << kOpcodeShift,
+ BGTZ = ((0 << 3) + 7) << kOpcodeShift,
+
+ ADDI = ((1 << 3) + 0) << kOpcodeShift,
+ ADDIU = ((1 << 3) + 1) << kOpcodeShift,
+ SLTI = ((1 << 3) + 2) << kOpcodeShift,
+ SLTIU = ((1 << 3) + 3) << kOpcodeShift,
+ ANDI = ((1 << 3) + 4) << kOpcodeShift,
+ ORI = ((1 << 3) + 5) << kOpcodeShift,
+ XORI = ((1 << 3) + 6) << kOpcodeShift,
+ LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
+ DAUI = ((3 << 3) + 5) << kOpcodeShift,
+
+ BEQC = ((2 << 3) + 0) << kOpcodeShift,
+ COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
+ BEQL = ((2 << 3) + 4) << kOpcodeShift,
+ BNEL = ((2 << 3) + 5) << kOpcodeShift,
+ BLEZL = ((2 << 3) + 6) << kOpcodeShift,
+ BGTZL = ((2 << 3) + 7) << kOpcodeShift,
+
+ DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
+ DADDIU = ((3 << 3) + 1) << kOpcodeShift,
+ LDL = ((3 << 3) + 2) << kOpcodeShift,
+ LDR = ((3 << 3) + 3) << kOpcodeShift,
+ SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
+
+ LB = ((4 << 3) + 0) << kOpcodeShift,
+ LH = ((4 << 3) + 1) << kOpcodeShift,
+ LWL = ((4 << 3) + 2) << kOpcodeShift,
+ LW = ((4 << 3) + 3) << kOpcodeShift,
+ LBU = ((4 << 3) + 4) << kOpcodeShift,
+ LHU = ((4 << 3) + 5) << kOpcodeShift,
+ LWR = ((4 << 3) + 6) << kOpcodeShift,
+ LWU = ((4 << 3) + 7) << kOpcodeShift,
+
+ SB = ((5 << 3) + 0) << kOpcodeShift,
+ SH = ((5 << 3) + 1) << kOpcodeShift,
+ SWL = ((5 << 3) + 2) << kOpcodeShift,
+ SW = ((5 << 3) + 3) << kOpcodeShift,
+ SDL = ((5 << 3) + 4) << kOpcodeShift,
+ SDR = ((5 << 3) + 5) << kOpcodeShift,
+ SWR = ((5 << 3) + 6) << kOpcodeShift,
+
+ LWC1 = ((6 << 3) + 1) << kOpcodeShift,
+ BC = ((6 << 3) + 2) << kOpcodeShift,
+ LLD = ((6 << 3) + 4) << kOpcodeShift,
+ LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+ POP66 = ((6 << 3) + 6) << kOpcodeShift,
+ LD = ((6 << 3) + 7) << kOpcodeShift,
+
+ PREF = ((6 << 3) + 3) << kOpcodeShift,
+
+ SWC1 = ((7 << 3) + 1) << kOpcodeShift,
+ BALC = ((7 << 3) + 2) << kOpcodeShift,
+ PCREL = ((7 << 3) + 3) << kOpcodeShift,
+ SCD = ((7 << 3) + 4) << kOpcodeShift,
+ SDC1 = ((7 << 3) + 5) << kOpcodeShift,
+ POP76 = ((7 << 3) + 6) << kOpcodeShift,
+ SD = ((7 << 3) + 7) << kOpcodeShift,
+
+ COP1X = ((1 << 4) + 3) << kOpcodeShift
};
enum SecondaryField {
@@ -443,7 +457,21 @@ enum SecondaryField {
DINSU = ((0 << 3) + 6),
DINS = ((0 << 3) + 7),
- DSBH = ((4 << 3) + 4),
+ BSHFL = ((4 << 3) + 0),
+ DBSHFL = ((4 << 3) + 4),
+
+ // SPECIAL3 Encoding of sa Field.
+ BITSWAP = ((0 << 3) + 0),
+ ALIGN = ((0 << 3) + 2),
+ WSBH = ((0 << 3) + 2),
+ SEB = ((2 << 3) + 0),
+ SEH = ((3 << 3) + 0),
+
+ DBITSWAP = ((0 << 3) + 0),
+ DALIGN = ((0 << 3) + 1),
+ DBITSWAP_SA = ((0 << 3) + 0) << kSaShift,
+ DSBH = ((0 << 3) + 2),
+ DSHD = ((0 << 3) + 5),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
@@ -470,6 +498,15 @@ enum SecondaryField {
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
+
+ ADD_S = ((0 << 3) + 0),
+ SUB_S = ((0 << 3) + 1),
+ MUL_S = ((0 << 3) + 2),
+ DIV_S = ((0 << 3) + 3),
+ ABS_S = ((0 << 3) + 5),
+ SQRT_S = ((0 << 3) + 4),
+ MOV_S = ((0 << 3) + 6),
+ NEG_S = ((0 << 3) + 7),
ROUND_L_S = ((1 << 3) + 0),
TRUNC_L_S = ((1 << 3) + 1),
CEIL_L_S = ((1 << 3) + 2),
@@ -478,6 +515,9 @@ enum SecondaryField {
TRUNC_W_S = ((1 << 3) + 5),
CEIL_W_S = ((1 << 3) + 6),
FLOOR_W_S = ((1 << 3) + 7),
+ RECIP_S = ((2 << 3) + 5),
+ RSQRT_S = ((2 << 3) + 6),
+ CLASS_S = ((3 << 3) + 3),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
@@ -499,6 +539,9 @@ enum SecondaryField {
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
+ RECIP_D = ((2 << 3) + 5),
+ RSQRT_D = ((2 << 3) + 6),
+ CLASS_D = ((3 << 3) + 3),
MIN = ((3 << 3) + 4),
MINA = ((3 << 3) + 5),
MAX = ((3 << 3) + 6),
@@ -514,6 +557,7 @@ enum SecondaryField {
C_ULT_D = ((6 << 3) + 5),
C_OLE_D = ((6 << 3) + 6),
C_ULE_D = ((6 << 3) + 7),
+
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
@@ -557,6 +601,9 @@ enum SecondaryField {
CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
SEL = ((2 << 3) + 0),
+ MOVF = ((2 << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
+ MOVZ_C = ((2 << 3) + 2), // COP1 on FPR registers.
+ MOVN_C = ((2 << 3) + 3), // COP1 on FPR registers.
SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
@@ -564,6 +611,21 @@ enum SecondaryField {
// COP1X Encoding of Function Field.
MADD_D = ((4 << 3) + 1),
+ // PCREL Encoding of rt Field.
+ ADDIUPC = ((0 << 2) + 0),
+ LWPC = ((0 << 2) + 1),
+ LWUPC = ((0 << 2) + 2),
+ LDPC = ((0 << 3) + 6),
+ // reserved ((1 << 3) + 6),
+ AUIPC = ((3 << 3) + 6),
+ ALUIPC = ((3 << 3) + 7),
+
+ // POP66 Encoding of rs Field.
+ JIC = ((0 << 5) + 0),
+
+ // POP76 Encoding of rs Field.
+ JIALC = ((0 << 5) + 0),
+
NULLSF = 0
};
@@ -697,14 +759,21 @@ inline Condition CommuteCondition(Condition cc) {
enum FPUCondition {
kNoFPUCondition = -1,
- F = 0, // False.
- UN = 1, // Unordered.
- EQ = 2, // Equal.
- UEQ = 3, // Unordered or Equal.
- OLT = 4, // Ordered or Less Than.
- ULT = 5, // Unordered or Less Than.
- OLE = 6, // Ordered or Less Than or Equal.
- ULE = 7 // Unordered or Less Than or Equal.
+ F = 0x00, // False.
+ UN = 0x01, // Unordered.
+ EQ = 0x02, // Equal.
+ UEQ = 0x03, // Unordered or Equal.
+ OLT = 0x04, // Ordered or Less Than, on Mips release < 6.
+ LT = 0x04, // Ordered or Less Than, on Mips release >= 6.
+ ULT = 0x05, // Unordered or Less Than.
+ OLE = 0x06, // Ordered or Less Than or Equal, on Mips release < 6.
+ LE = 0x06, // Ordered or Less Than or Equal, on Mips release >= 6.
+ ULE = 0x07, // Unordered or Less Than or Equal.
+
+ // Following constants are available on Mips release >= 6 only.
+ ORD = 0x11, // Ordered, on Mips release >= 6.
+ UNE = 0x12, // Not equal, on Mips release >= 6.
+ NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only.
};
@@ -867,6 +936,16 @@ class Instruction {
return Bits(kFrShift + kFrBits -1, kFrShift);
}
+ inline int Bp2Value() const {
+ DCHECK(InstructionType() == kRegisterType);
+ return Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift);
+ }
+
+ inline int Bp3Value() const {
+ DCHECK(InstructionType() == kRegisterType);
+ return Bits(kBp3Shift + kBp3Bits - 1, kBp3Shift);
+ }
+
// Float Compare condition code instruction bits.
inline int FCccValue() const {
return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
@@ -910,7 +989,6 @@ class Instruction {
}
inline int SaFieldRaw() const {
- DCHECK(InstructionType() == kRegisterType);
return InstructionBits() & kSaFieldMask;
}
@@ -939,13 +1017,24 @@ class Instruction {
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
+ inline int32_t Imm18Value() const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift);
+ }
+
+ inline int32_t Imm19Value() const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift);
+ }
+
inline int32_t Imm21Value() const {
DCHECK(InstructionType() == kImmediateType);
return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
}
inline int32_t Imm26Value() const {
- DCHECK(InstructionType() == kJumpType);
+ DCHECK((InstructionType() == kJumpType) ||
+ (InstructionType() == kImmediateType));
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
}
diff --git a/deps/v8/src/mips64/cpu-mips64.cc b/deps/v8/src/mips64/cpu-mips64.cc
index 027d5a103e..9c600bfa67 100644
--- a/deps/v8/src/mips64/cpu-mips64.cc
+++ b/deps/v8/src/mips64/cpu-mips64.cc
@@ -54,6 +54,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#endif // USE_SIMULATOR.
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/debug-mips64.cc b/deps/v8/src/mips64/debug-mips64.cc
index 8ef247d8c9..75e37c5e77 100644
--- a/deps/v8/src/mips64/debug-mips64.cc
+++ b/deps/v8/src/mips64/debug-mips64.cc
@@ -153,51 +153,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- RegList regs = receiver.bit() | name.bit();
- if (FLAG_vector_ics) {
- regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
- }
- Generate_DebugBreakCallHelper(masm, regs, 0);
-}
-
-
-void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for keyed IC load (from ic-mips64.cc).
- GenerateLoadICDebugBreak(masm);
-}
-
-
-void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC keyed store call (from ic-mips64.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
- // Register state for CompareNil IC
- // ----------- S t a t e -------------
- // -- a0 : value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a0.bit(), 0);
-}
-
-
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that v0 is TOS which
// is an object - this is not generally the case so this should be used with
@@ -295,6 +250,7 @@ const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index d9743bfb2b..cf5700f334 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -403,7 +403,7 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- // No out-of-line constant pool support.
+ // No embedded constant pool support.
UNREACHABLE();
}
@@ -411,4 +411,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index dc43769fea..ee624db2f8 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -86,10 +86,22 @@ class Decoder {
void PrintUImm16(Instruction* instr);
void PrintSImm16(Instruction* instr);
void PrintXImm16(Instruction* instr);
+ void PrintPCImm16(Instruction* instr, int delta_pc, int n_bits);
+ void PrintXImm18(Instruction* instr);
+ void PrintSImm18(Instruction* instr);
+ void PrintXImm19(Instruction* instr);
+ void PrintSImm19(Instruction* instr);
void PrintXImm21(Instruction* instr);
+
+ void PrintPCImm21(Instruction* instr, int delta_pc, int n_bits);
void PrintXImm26(Instruction* instr);
+ void PrintSImm26(Instruction* instr);
+ void PrintPCImm26(Instruction* instr, int delta_pc, int n_bits);
+ void PrintPCImm26(Instruction* instr);
void PrintCode(Instruction* instr); // For break and trap instructions.
void PrintFormat(Instruction* instr); // For floating format postfix.
+ void PrintBp2(Instruction* instr);
+ void PrintBp3(Instruction* instr);
// Printing of instruction name.
void PrintInstructionName(Instruction* instr);
@@ -251,7 +263,8 @@ void Decoder::PrintUImm16(Instruction* instr) {
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
- int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
+ int32_t imm =
+ ((instr->Imm16Value()) << (32 - kImm16Bits)) >> (32 - kImm16Bits);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
@@ -263,6 +276,50 @@ void Decoder::PrintXImm16(Instruction* instr) {
}
+// Print absoulte address for 16-bit offset or immediate value.
+// The absolute address is calculated according following expression:
+// PC + delta_pc + (offset << n_bits)
+void Decoder::PrintPCImm16(Instruction* instr, int delta_pc, int n_bits) {
+ int16_t offset = instr->Imm16Value();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) +
+ delta_pc + (offset << n_bits)));
+}
+
+
+// Print 18-bit signed immediate value.
+void Decoder::PrintSImm18(Instruction* instr) {
+ int32_t imm =
+ ((instr->Imm18Value()) << (32 - kImm18Bits)) >> (32 - kImm18Bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+
+// Print 18-bit hexa immediate value.
+void Decoder::PrintXImm18(Instruction* instr) {
+ int32_t imm = instr->Imm18Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
+// Print 19-bit hexa immediate value.
+void Decoder::PrintXImm19(Instruction* instr) {
+ int32_t imm = instr->Imm19Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
+// Print 19-bit signed immediate value.
+void Decoder::PrintSImm19(Instruction* instr) {
+ int32_t imm19 = instr->Imm19Value();
+ // set sign
+ imm19 <<= (32 - kImm19Bits);
+ imm19 >>= (32 - kImm19Bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm19);
+}
+
+
// Print 21-bit immediate value.
void Decoder::PrintXImm21(Instruction* instr) {
uint32_t imm = instr->Imm21Value();
@@ -270,13 +327,78 @@ void Decoder::PrintXImm21(Instruction* instr) {
}
-// Print 26-bit immediate value.
+// Print absoulte address for 21-bit offset or immediate value.
+// The absolute address is calculated according following expression:
+// PC + delta_pc + (offset << n_bits)
+void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
+ int32_t imm21 = instr->Imm21Value();
+ // set sign
+ imm21 <<= (32 - kImm21Bits);
+ imm21 >>= (32 - kImm21Bits);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) +
+ delta_pc + (imm21 << n_bits)));
+}
+
+
+// Print 26-bit hex immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
- uint32_t imm = instr->Imm26Value() << kImmFieldShift;
+ uint32_t imm = static_cast<uint32_t>(instr->Imm26Value()) << kImmFieldShift;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
+// Print 26-bit signed immediate value.
+void Decoder::PrintSImm26(Instruction* instr) {
+ int32_t imm26 = instr->Imm26Value();
+ // set sign
+ imm26 <<= (32 - kImm26Bits);
+ imm26 >>= (32 - kImm26Bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm26);
+}
+
+
+// Print absoulte address for 26-bit offset or immediate value.
+// The absolute address is calculated according following expression:
+// PC + delta_pc + (offset << n_bits)
+void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) {
+ int32_t imm26 = instr->Imm26Value();
+ // set sign
+ imm26 <<= (32 - kImm26Bits);
+ imm26 >>= (32 - kImm26Bits);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) +
+ delta_pc + (imm26 << n_bits)));
+}
+
+
+// Print absoulte address for 26-bit offset or immediate value.
+// The absolute address is calculated according following expression:
+// PC[GPRLEN-1 .. 28] || instr_index26 || 00
+void Decoder::PrintPCImm26(Instruction* instr) {
+ int32_t imm26 = instr->Imm26Value();
+ uint64_t pc_mask = ~0xfffffff;
+ uint64_t pc = ((uint64_t)(instr + 1) & pc_mask) | (imm26 << 2);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress((reinterpret_cast<byte*>(pc))));
+}
+
+
+void Decoder::PrintBp2(Instruction* instr) {
+ int bp2 = instr->Bp2Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", bp2);
+}
+
+
+void Decoder::PrintBp3(Instruction* instr) {
+ int bp3 = instr->Bp3Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", bp3);
+}
+
+
// Print 26-bit immediate value.
void Decoder::PrintCode(Instruction* instr) {
if (instr->OpcodeFieldRaw() != SPECIAL)
@@ -395,25 +517,130 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 'i': { // 'imm16u or 'imm26.
if (format[3] == '1') {
- DCHECK(STRING_STARTS_WITH(format, "imm16"));
- if (format[5] == 's') {
- DCHECK(STRING_STARTS_WITH(format, "imm16s"));
- PrintSImm16(instr);
- } else if (format[5] == 'u') {
- DCHECK(STRING_STARTS_WITH(format, "imm16u"));
- PrintSImm16(instr);
- } else {
- DCHECK(STRING_STARTS_WITH(format, "imm16x"));
- PrintXImm16(instr);
+ if (format[4] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "imm16"));
+ switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm16s"));
+ PrintSImm16(instr);
+ break;
+ case 'u':
+ DCHECK(STRING_STARTS_WITH(format, "imm16u"));
+ PrintSImm16(instr);
+ break;
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm16x"));
+ PrintXImm16(instr);
+ break;
+ case 'p': { // The PC relative address.
+ DCHECK(STRING_STARTS_WITH(format, "imm16p"));
+ int delta_pc = 0;
+ int n_bits = 0;
+ switch (format[6]) {
+ case '4': {
+ DCHECK(STRING_STARTS_WITH(format, "imm16p4"));
+ delta_pc = 4;
+ switch (format[8]) {
+ case '2':
+ DCHECK(STRING_STARTS_WITH(format, "imm16p4s2"));
+ n_bits = 2;
+ PrintPCImm16(instr, delta_pc, n_bits);
+ return 9;
+ }
+ }
+ }
+ }
+ }
+ return 6;
+ } else if (format[4] == '8') {
+ DCHECK(STRING_STARTS_WITH(format, "imm18"));
+ switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm18s"));
+ PrintSImm18(instr);
+ break;
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm18x"));
+ PrintXImm18(instr);
+ break;
+ }
+ return 6;
+ } else if (format[4] == '9') {
+ DCHECK(STRING_STARTS_WITH(format, "imm19"));
+ switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm19s"));
+ PrintSImm19(instr);
+ break;
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm19x"));
+ PrintXImm19(instr);
+ break;
+ }
+ return 6;
}
- return 6;
} else if (format[3] == '2' && format[4] == '1') {
- DCHECK(STRING_STARTS_WITH(format, "imm21x"));
- PrintXImm21(instr);
+ DCHECK(STRING_STARTS_WITH(format, "imm21"));
+ switch (format[5]) {
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm21x"));
+ PrintXImm21(instr);
+ break;
+ case 'p': { // The PC relative address.
+ DCHECK(STRING_STARTS_WITH(format, "imm21p"));
+ int delta_pc = 0;
+ int n_bits = 0;
+ switch (format[6]) {
+ case '4': {
+ DCHECK(STRING_STARTS_WITH(format, "imm21p4"));
+ delta_pc = 4;
+ switch (format[8]) {
+ case '2':
+ DCHECK(STRING_STARTS_WITH(format, "imm21p4s2"));
+ n_bits = 2;
+ PrintPCImm21(instr, delta_pc, n_bits);
+ return 9;
+ }
+ }
+ }
+ }
+ }
return 6;
} else if (format[3] == '2' && format[4] == '6') {
- DCHECK(STRING_STARTS_WITH(format, "imm26x"));
- PrintXImm26(instr);
+ DCHECK(STRING_STARTS_WITH(format, "imm26"));
+ switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm26s"));
+ PrintSImm26(instr);
+ break;
+ case 'x':
+ DCHECK(STRING_STARTS_WITH(format, "imm26x"));
+ PrintXImm26(instr);
+ break;
+ case 'p': { // The PC relative address.
+ DCHECK(STRING_STARTS_WITH(format, "imm26p"));
+ int delta_pc = 0;
+ int n_bits = 0;
+ switch (format[6]) {
+ case '4': {
+ DCHECK(STRING_STARTS_WITH(format, "imm26p4"));
+ delta_pc = 4;
+ switch (format[8]) {
+ case '2':
+ DCHECK(STRING_STARTS_WITH(format, "imm26p4s2"));
+ n_bits = 2;
+ PrintPCImm26(instr, delta_pc, n_bits);
+ return 9;
+ }
+ }
+ }
+ }
+ case 'j': { // Absolute address for jump instructions.
+ DCHECK(STRING_STARTS_WITH(format, "imm26j"));
+ PrintPCImm26(instr);
+ break;
+ }
+ }
return 6;
}
}
@@ -448,10 +675,28 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
}
}
- case 'b': { // 'bc - Special for bc1 cc field.
- DCHECK(STRING_STARTS_WITH(format, "bc"));
- PrintBc(instr);
- return 2;
+ case 'b': {
+ switch (format[1]) {
+ case 'c': { // 'bc - Special for bc1 cc field.
+ DCHECK(STRING_STARTS_WITH(format, "bc"));
+ PrintBc(instr);
+ return 2;
+ }
+ case 'p': {
+ switch (format[2]) {
+ case '2': { // 'bp2
+ DCHECK(STRING_STARTS_WITH(format, "bp2"));
+ PrintBp2(instr);
+ return 3;
+ }
+ case '3': { // 'bp3
+ DCHECK(STRING_STARTS_WITH(format, "bp3"));
+ PrintBp3(instr);
+ return 3;
+ }
+ }
+ }
+ }
}
case 'C': { // 'Cc - Special for c.xx.d cc field.
DCHECK(STRING_STARTS_WITH(format, "Cc"));
@@ -520,12 +765,28 @@ bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
case RINT:
Format(instr, "rint.'t 'fd, 'fs");
break;
+ case SEL:
+ Format(instr, "sel.'t 'fd, 'fs, 'ft");
+ break;
case SELEQZ_C:
Format(instr, "seleqz.'t 'fd, 'fs, 'ft");
break;
case SELNEZ_C:
Format(instr, "selnez.'t 'fd, 'fs, 'ft");
break;
+ case MOVZ_C:
+ Format(instr, "movz.'t 'fd, 'fs, 'rt");
+ break;
+ case MOVN_C:
+ Format(instr, "movn.'t 'fd, 'fs, 'rt");
+ break;
+ case MOVF:
+ if (instr->Bit(16)) {
+ Format(instr, "movt.'t 'fd, 'fs, 'Cc");
+ } else {
+ Format(instr, "movf.'t 'fd, 'fs, 'Cc");
+ }
+ break;
case MIN:
Format(instr, "min.'t 'fd, 'fs, 'ft");
break;
@@ -562,6 +823,12 @@ bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
case SQRT_D:
Format(instr, "sqrt.'t 'fd, 'fs");
break;
+ case RECIP_D:
+ Format(instr, "recip.'t 'fd, 'fs");
+ break;
+ case RSQRT_D:
+ Format(instr, "rsqrt.'t 'fd, 'fs");
+ break;
case CVT_W_D:
Format(instr, "cvt.w.'t 'fd, 'fs");
break;
@@ -592,6 +859,9 @@ bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
case CEIL_L_D:
Format(instr, "ceil.l.'t 'fd, 'fs");
break;
+ case CLASS_D:
+ Format(instr, "class.'t 'fd, 'fs");
+ break;
case CVT_S_D:
Format(instr, "cvt.s.'t 'fd, 'fs");
break;
@@ -655,6 +925,9 @@ void Decoder::DecodeTypeRegisterLRsType(Instruction* instr) {
case CVT_S_L:
Format(instr, "cvt.s.l 'fd, 'fs");
break;
+ case CMP_AF:
+ Format(instr, "cmp.af.d 'fd, 'fs, 'ft");
+ break;
case CMP_UN:
Format(instr, "cmp.un.d 'fd, 'fs, 'ft");
break;
@@ -1107,6 +1380,68 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
Format(instr, "dext 'rt, 'rs, 'sa, 'ss1");
break;
}
+ case BSHFL: {
+ int sa = instr->SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP: {
+ Format(instr, "bitswap 'rd, 'rt");
+ break;
+ }
+ case SEB:
+ case SEH:
+ case WSBH:
+ UNREACHABLE();
+ break;
+ default: {
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN: {
+ Format(instr, "align 'rd, 'rs, 'rt, 'bp2");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ }
+ case DBSHFL: {
+ int sa = instr->SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case DBITSWAP: {
+ switch (instr->SaFieldRaw() >> kSaShift) {
+ case DBITSWAP_SA:
+ Format(instr, "dbitswap 'rd, 'rt");
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ case DSBH:
+ case DSHD:
+ UNREACHABLE();
+ break;
+ default: {
+ sa >>= kBp3Bits;
+ switch (sa) {
+ case DALIGN: {
+ Format(instr, "dalign 'rd, 'rs, 'rt, 'bp3");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1147,16 +1482,16 @@ void Decoder::DecodeTypeImmediateCOP1(Instruction* instr) {
switch (instr->RsFieldRaw()) {
case BC1:
if (instr->FBtrueValue()) {
- Format(instr, "bc1t 'bc, 'imm16u");
+ Format(instr, "bc1t 'bc, 'imm16u -> 'imm16p4s2");
} else {
- Format(instr, "bc1f 'bc, 'imm16u");
+ Format(instr, "bc1f 'bc, 'imm16u -> 'imm16p4s2");
}
break;
case BC1EQZ:
- Format(instr, "bc1eqz 'ft, 'imm16u");
+ Format(instr, "bc1eqz 'ft, 'imm16u -> 'imm16p4s2");
break;
case BC1NEZ:
- Format(instr, "bc1nez 'ft, 'imm16u");
+ Format(instr, "bc1nez 'ft, 'imm16u -> 'imm16p4s2");
break;
default:
UNREACHABLE();
@@ -1167,19 +1502,19 @@ void Decoder::DecodeTypeImmediateCOP1(Instruction* instr) {
void Decoder::DecodeTypeImmediateREGIMM(Instruction* instr) {
switch (instr->RtFieldRaw()) {
case BLTZ:
- Format(instr, "bltz 'rs, 'imm16u");
+ Format(instr, "bltz 'rs, 'imm16u -> 'imm16p4s2");
break;
case BLTZAL:
- Format(instr, "bltzal 'rs, 'imm16u");
+ Format(instr, "bltzal 'rs, 'imm16u -> 'imm16p4s2");
break;
case BGEZ:
- Format(instr, "bgez 'rs, 'imm16u");
+ Format(instr, "bgez 'rs, 'imm16u -> 'imm16p4s2");
break;
case BGEZAL:
- Format(instr, "bgezal 'rs, 'imm16u");
+ Format(instr, "bgezal 'rs, 'imm16u -> 'imm16p4s2");
break;
case BGEZALL:
- Format(instr, "bgezall 'rs, 'imm16u");
+ Format(instr, "bgezall 'rs, 'imm16u -> 'imm16p4s2");
break;
case DAHI:
Format(instr, "dahi 'rs, 'imm16u");
@@ -1200,85 +1535,87 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break; // Case COP1.
// ------------- REGIMM class.
case REGIMM:
-
- break; // Case REGIMM.
+ DecodeTypeImmediateREGIMM(instr);
+ break; // Case REGIMM.
// ------------- Branch instructions.
case BEQ:
- Format(instr, "beq 'rs, 'rt, 'imm16u");
+ Format(instr, "beq 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ break;
+ case BC:
+ Format(instr, "bc 'imm26s -> 'imm26p4s2");
+ break;
+ case BALC:
+ Format(instr, "balc 'imm26s -> 'imm26p4s2");
break;
case BNE:
- Format(instr, "bne 'rs, 'rt, 'imm16u");
+ Format(instr, "bne 'rs, 'rt, 'imm16u -> 'imm16p4s2");
break;
case BLEZ:
- if ((instr->RtFieldRaw() == 0)
- && (instr->RsFieldRaw() != 0)) {
- Format(instr, "blez 'rs, 'imm16u");
- } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
- && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgeuc 'rs, 'rt, 'imm16u");
- } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgezalc 'rs, 'imm16u");
- } else if ((instr->RsFieldRaw() == 0)
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "blezalc 'rs, 'imm16u");
+ if ((instr->RtValue() == 0) && (instr->RsValue() != 0)) {
+ Format(instr, "blez 'rs, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() != instr->RsValue()) &&
+ (instr->RsValue() != 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() == instr->RsValue()) &&
+ (instr->RtValue() != 0)) {
+ Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
+ Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
break;
case BGTZ:
- if ((instr->RtFieldRaw() == 0)
- && (instr->RsFieldRaw() != 0)) {
- Format(instr, "bgtz 'rs, 'imm16u");
- } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
- && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bltuc 'rs, 'rt, 'imm16u");
- } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bltzalc 'rt, 'imm16u");
- } else if ((instr->RsFieldRaw() == 0)
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgtzalc 'rt, 'imm16u");
+ if ((instr->RtValue() == 0) && (instr->RsValue() != 0)) {
+ Format(instr, "bgtz 'rs, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() != instr->RsValue()) &&
+ (instr->RsValue() != 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bltuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() == instr->RsValue()) &&
+ (instr->RtValue() != 0)) {
+ Format(instr, "bltzalc 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bgtzalc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
break;
case BLEZL:
- if ((instr->RtFieldRaw() == instr->RsFieldRaw())
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgezc 'rt, 'imm16u");
- } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
- && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgec 'rs, 'rt, 'imm16u");
- } else if ((instr->RsFieldRaw() == 0)
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "blezc 'rt, 'imm16u");
+ if ((instr->RtValue() == instr->RsValue()) && (instr->RtValue() != 0)) {
+ Format(instr, "bgezc 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() != instr->RsValue()) &&
+ (instr->RsValue() != 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bgec 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
+ Format(instr, "blezc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
break;
case BGTZL:
- if ((instr->RtFieldRaw() == instr->RsFieldRaw())
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bltzc 'rt, 'imm16u");
- } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
- && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bltc 'rs, 'rt, 'imm16u");
- } else if ((instr->RsFieldRaw() == 0)
- && (instr->RtFieldRaw() != 0)) {
- Format(instr, "bgtzc 'rt, 'imm16u");
+ if ((instr->RtValue() == instr->RsValue()) && (instr->RtValue() != 0)) {
+ Format(instr, "bltzc 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RtValue() != instr->RsValue()) &&
+ (instr->RsValue() != 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
+ Format(instr, "bgtzc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
break;
- case BEQZC:
- if (instr->RsFieldRaw() != 0) {
- Format(instr, "beqzc 'rs, 'imm21x");
+ case POP66:
+ if (instr->RsValue() == JIC) {
+ Format(instr, "jic 'rt, 'imm16s");
+ } else {
+ Format(instr, "beqzc 'rs, 'imm21x -> 'imm21p4s2");
}
break;
- case BNEZC:
- if (instr->RsFieldRaw() != 0) {
- Format(instr, "bnezc 'rs, 'imm21x");
+ case POP76:
+ if (instr->RsValue() == JIALC) {
+ Format(instr, "jialc 'rt, 'imm16x");
+ } else {
+ Format(instr, "bnezc 'rs, 'imm21x -> 'imm21p4s2");
}
break;
// ------------- Arithmetic instructions.
@@ -1287,10 +1624,10 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "addi 'rt, 'rs, 'imm16s");
} else {
// Check if BOVC or BEQC instruction.
- if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
- Format(instr, "bovc 'rs, 'rt, 'imm16s");
- } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
- Format(instr, "beqc 'rs, 'rt, 'imm16s");
+ if (instr->RsValue() >= instr->RtValue()) {
+ Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ } else if (instr->RsValue() < instr->RtValue()) {
+ Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
UNREACHABLE();
}
@@ -1301,10 +1638,10 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "daddi 'rt, 'rs, 'imm16s");
} else {
// Check if BNVC or BNEC instruction.
- if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
- Format(instr, "bnvc 'rs, 'rt, 'imm16s");
- } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
- Format(instr, "bnec 'rs, 'rt, 'imm16s");
+ if (instr->RsValue() >= instr->RtValue()) {
+ Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ } else if (instr->RsValue() < instr->RtValue()) {
+ Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
UNREACHABLE();
}
@@ -1412,9 +1749,52 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case SDC1:
Format(instr, "sdc1 'ft, 'imm16s('rs)");
break;
+ case PCREL: {
+ int32_t imm21 = instr->Imm21Value();
+ // rt field: 5-bits checking
+ uint8_t rt = (imm21 >> kImm16Bits);
+ switch (rt) {
+ case ALUIPC:
+ Format(instr, "aluipc 'rs, 'imm16s");
+ break;
+ case AUIPC:
+ Format(instr, "auipc 'rs, 'imm16s");
+ break;
+ default: {
+ // rt field: checking of the most significant 3-bits
+ rt = (imm21 >> kImm18Bits);
+ switch (rt) {
+ case LDPC:
+ Format(instr, "ldpc 'rs, 'imm18s");
+ break;
+ default: {
+ // rt field: checking of the most significant 2-bits
+ rt = (imm21 >> kImm19Bits);
+ switch (rt) {
+ case LWUPC:
+ Format(instr, "lwupc 'rs, 'imm19s");
+ break;
+ case LWPC:
+ Format(instr, "lwpc 'rs, 'imm19s");
+ break;
+ case ADDIUPC:
+ Format(instr, "addiupc 'rs, 'imm19s");
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ }
default:
printf("a 0x%x \n", instr->OpcodeFieldRaw());
- UNREACHABLE();
+ UNREACHABLE();
break;
}
}
@@ -1423,10 +1803,10 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
void Decoder::DecodeTypeJump(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case J:
- Format(instr, "j 'imm26x");
+ Format(instr, "j 'imm26x -> 'imm26j");
break;
case JAL:
- Format(instr, "jal 'imm26x");
+ Format(instr, "jal 'imm26x -> 'imm26j");
break;
default:
UNREACHABLE();
@@ -1465,8 +1845,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
}
-} } // namespace v8::internal
-
+} // namespace internal
+} // namespace v8
//------------------------------------------------------------------------------
diff --git a/deps/v8/src/mips64/frames-mips64.cc b/deps/v8/src/mips64/frames-mips64.cc
index 2991248ccf..2f0436184e 100644
--- a/deps/v8/src/mips64/frames-mips64.cc
+++ b/deps/v8/src/mips64/frames-mips64.cc
@@ -32,12 +32,7 @@ Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
}
-Object*& ExitFrame::constant_pool_slot() const {
- UNREACHABLE();
- return Memory::Object_at(NULL);
-}
-
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/frames-mips64.h b/deps/v8/src/mips64/frames-mips64.h
index be732ef5f7..4434a98b7f 100644
--- a/deps/v8/src/mips64/frames-mips64.h
+++ b/deps/v8/src/mips64/frames-mips64.h
@@ -169,36 +169,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kImplicitReceiverOffset = -6 * kPointerSize;
- static const int kConstructorOffset = -5 * kPointerSize;
- static const int kLengthOffset = -4 * kPointerSize;
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
diff --git a/deps/v8/src/mips64/full-codegen-mips64.cc b/deps/v8/src/mips64/full-codegen-mips64.cc
index d2eb9dcec2..569dc51afc 100644
--- a/deps/v8/src/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/mips64/full-codegen-mips64.cc
@@ -113,10 +113,6 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-mips.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
- handler_table_ =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForRange(function()->handler_count()), TENURED));
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -135,7 +131,7 @@ void FullCodeGenerator::Generate() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis()) {
+ info->MayUseThis() && info->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ld(at, MemOperand(sp, receiver_offset));
@@ -197,17 +193,17 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info->scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1.
bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -222,8 +218,9 @@ void FullCodeGenerator::Generate() {
__ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -247,10 +244,49 @@ void FullCodeGenerator::Generate() {
}
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register) {
+ __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep is marked as such.
+ }
+ SetVar(this_function_var, a1, a2, a3);
+ }
+
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+ // Get the frame pointer for the calling frame.
+ __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ ld(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&check_frame_marker, ne, a1,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+
+ Label non_construct_frame, done;
+ __ Branch(&non_construct_frame, ne, a1,
+ Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+
+ __ ld(v0,
+ MemOperand(a2, ConstructFrameConstants::kOriginalConstructorOffset));
+ __ Branch(&done);
+
+ __ bind(&non_construct_frame);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+
+ SetVar(new_target_var, v0, a2, a3);
+ }
// Possibly allocate RestParameters
int rest_index;
@@ -260,16 +296,13 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
- --num_parameters;
- ++rest_index;
- }
__ Daddu(a3, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ li(a2, Operand(Smi::FromInt(num_parameters)));
__ li(a1, Operand(Smi::FromInt(rest_index)));
- __ Push(a3, a2, a1);
+ __ li(a0, Operand(Smi::FromInt(language_mode())));
+ __ Push(a3, a2, a1, a0);
RestParamAccessStub stub(isolate());
__ CallStub(&stub);
@@ -307,7 +340,7 @@ void FullCodeGenerator::Generate() {
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type, has_new_target);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, v0, a1, a2);
@@ -331,7 +364,7 @@ void FullCodeGenerator::Generate() {
VariableDeclaration* function = scope()->function();
DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
@@ -462,17 +495,15 @@ void FullCodeGenerator::EmitReturnSequence() {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
+
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t arg_count = info_->scope()->num_parameters() + 1;
- if (IsSubclassConstructor(info_->function()->kind())) {
- arg_count++;
- }
int32_t sp_delta = arg_count * kPointerSize;
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ SetReturnPosition(function());
__ RecordJSReturn();
masm_->mov(sp, fp);
int no_frame_start = masm_->pc_offset();
@@ -841,7 +872,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
@@ -849,8 +881,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
zone());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
@@ -858,7 +890,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
@@ -869,7 +901,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
__ li(a2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
@@ -901,25 +933,26 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ sd(result_register(), StackOperand(variable));
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
@@ -938,7 +971,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ li(a2, Operand(variable->name()));
__ li(a1, Operand(Smi::FromInt(NONE)));
@@ -956,20 +989,21 @@ void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
// TODO(rossberg)
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::LOOKUP:
UNREACHABLE();
}
}
@@ -1048,9 +1082,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ SetExpressionPosition(clause);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
+ strength(language_mode())).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1094,8 +1128,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt, SKIP_BREAK);
+
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- SetStatementPosition(stmt);
Label loop, exit;
ForIn loop_statement(this, stmt);
@@ -1103,7 +1138,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionPosition(stmt->enumerable());
+ SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1203,7 +1238,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- SetExpressionPosition(stmt->each());
+ SetExpressionAsStatementPosition(stmt->each());
// Load the current count to a0, load the length to a1.
__ ld(a0, MemOperand(sp, 0 * kPointerSize));
@@ -1237,10 +1272,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(a1, a3); // Enumerable and current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kForInFilter, 2);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mov(a3, result_register());
- __ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(loop_statement.continue_label(), eq, a3, Operand(at));
// Update the 'each' property or variable from the possibly filtered
// entry in register a3.
@@ -1248,7 +1284,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), a3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each());
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
@@ -1309,38 +1345,16 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
-void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
- Comment cnmt(masm_, "[ SuperReference ");
-
- __ ld(LoadDescriptor::ReceiverRegister(),
- MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
- __ li(LoadDescriptor::NameRegister(), home_object_symbol);
-
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
- }
-
- Label done;
- __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value()));
- __ CallRuntime(Runtime::kThrowNonMethodError, 0);
- __ bind(&done);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset) {
+ int offset,
+ FeedbackVectorICSlot slot) {
if (NeedsHomeObject(initializer)) {
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ li(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ld(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
}
}
@@ -1391,17 +1405,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
-
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
- ? NOT_CONTEXTUAL
- : CONTEXTUAL;
- CallLoadIC(mode);
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_state);
}
@@ -1470,32 +1476,43 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ // Inside typeof use a regular load, not a contextual load, to avoid
+ // a reference error.
+ CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
// Record position before possible IC call.
- SetSourcePosition(proxy->position());
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- // Use inline caching. Variable name is passed in a2 and the global
- // object (receiver) in a0.
- __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- CallGlobalLoadIC(var->name());
+ EmitGlobalVariableLoad(proxy, typeof_state);
context()->Plug(v0);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1566,16 +1583,20 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
__ bind(&slow);
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Runtime::FunctionId function_id =
+ typeof_state == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ __ CallRuntime(function_id, 2);
__ bind(&done);
context()->Plug(v0);
}
@@ -1646,7 +1667,6 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
@@ -1666,13 +1686,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// result_saved is false the result is in v0.
bool result_saved = false;
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
AccessorTable accessor_table(zone());
int property_index = 0;
+ // store_slot_index points to the vector IC slot for the next store IC used.
+ // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
+ // and must be updated if the number of store ICs emitted here changes.
+ int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1700,7 +1719,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- CallStoreIC(key->LiteralFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ CallStoreIC();
+ } else {
+ CallStoreIC(key->LiteralFeedbackId());
+ }
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1708,6 +1732,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ld(StoreDescriptor::ValueRegister(), MemOperand(sp));
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ }
CallStoreIC();
}
} else {
@@ -1721,7 +1748,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
__ push(a0);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1759,9 +1787,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(a0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(it->second->getter, 2);
+ EmitSetHomeObjectIfNeeded(
+ it->second->getter, 2,
+ expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(it->second->setter, 3);
+ EmitSetHomeObjectIfNeeded(
+ it->second->setter, 3,
+ expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1796,7 +1828,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1842,6 +1875,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(v0);
}
+
+ // Verify that compilation exactly consumed the number of store ic slots that
+ // the ObjectLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1882,8 +1919,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ if (subexpr->IsSpread()) break;
+
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1897,7 +1937,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ ld(a6, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ ld(a1, FieldMemOperand(a6, JSObject::kElementsOffset));
__ sd(result_register(), FieldMemOperand(a1, offset));
@@ -1906,14 +1946,40 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
kRAHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
- __ li(a3, Operand(Smi::FromInt(i)));
+ __ li(a3, Operand(Smi::FromInt(array_index)));
__ mov(a0, result_register());
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ __ Pop(); // literal index
+ __ Pop(v0);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ __ Push(v0);
+ if (subexpr->IsSpread()) {
+ VisitForStackValue(subexpr->AsSpread()->expression());
+ __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ } else {
+ VisitForStackValue(subexpr);
+ __ CallRuntime(Runtime::kAppendElement, 2);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
if (result_saved) {
__ Pop(); // literal index
context()->PlugTOS();
@@ -1927,9 +1993,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = GetAssignType(property);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1946,8 +2013,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case NAMED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch = a1;
@@ -1957,8 +2026,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case KEYED_SUPER_PROPERTY: {
const Register scratch = a1;
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ Move(scratch, result_register());
VisitForAccumulatorValue(property->key());
__ Push(scratch, result_register());
@@ -2016,7 +2087,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(v0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
@@ -2033,14 +2103,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
}
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
+ expr->op(), expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
break;
@@ -2064,6 +2133,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
@@ -2145,7 +2216,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(a0); // result
- EnterTryBlock(expr->index(), &l_catch);
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(a0); // result
__ jmp(&l_suspend);
@@ -2156,7 +2228,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
const int generator_object_depth = kPointerSize + try_block_size;
__ ld(a0, MemOperand(sp, generator_object_depth));
__ push(a0); // g
- __ Push(Smi::FromInt(expr->index())); // handler-index
+ __ Push(Smi::FromInt(handler_index)); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
__ sd(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
@@ -2170,7 +2242,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EmitReturnSequence();
__ mov(a0, v0);
__ bind(&l_resume); // received in a0
- ExitTryBlock(expr->index());
+ ExitTryBlock(handler_index);
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2182,11 +2254,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ ld(load_receiver, MemOperand(sp, kPointerSize));
__ ld(load_name, MemOperand(sp, 2 * kPointerSize));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- }
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(a0, v0);
__ mov(a1, a0);
@@ -2202,10 +2272,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- }
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // v0=result.done
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
@@ -2215,10 +2283,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- }
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // v0=result.value
context()->DropAndPlug(2, v0); // drop iter and g
break;
@@ -2356,52 +2422,47 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!prop->IsSuperAccess());
__ li(LoadDescriptor::NameRegister(), Operand(key->value()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
- }
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL, language_mode());
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
+
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ Push(key->value());
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
// Call keyed load IC. It has register arguments receiver and key.
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
- } else {
- CallIC(ic, prop->PropertyFeedbackId());
- }
+ SetExpressionPosition(prop);
+
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallIC(ic);
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
- SetSourcePosition(prop->position());
-
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ SetExpressionPosition(prop);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
}
@@ -2427,8 +2488,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2459,11 +2520,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+ __ DadduAndCheckForOverflow(v0, left, right, scratch1);
__ BranchOnOverflow(&stub_call, scratch1);
break;
case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+ __ DsubuAndCheckForOverflow(v0, left, right, scratch1);
__ BranchOnOverflow(&stub_call, scratch1);
break;
case Token::MUL: {
@@ -2497,7 +2558,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
+ int* used_store_slots) {
// Constructor is in v0.
DCHECK(lit != NULL);
__ push(v0);
@@ -2531,7 +2593,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(value, 2,
+ lit->SlotForHomeObject(value, used_store_slots));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -2570,8 +2633,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
__ pop(a1);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2579,17 +2642,18 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorICSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
Property* prop = expr->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
break;
}
case NAMED_PROPERTY: {
@@ -2599,13 +2663,15 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ Push(v0);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
// stack: value, this; v0: home_object
Register scratch = a2;
Register scratch2 = a3;
@@ -2620,9 +2686,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
case KEYED_SUPER_PROPERTY: {
__ Push(v0);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
Register scratch = a2;
Register scratch2 = a3;
@@ -2645,6 +2711,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Move(StoreDescriptor::NameRegister(), result_register());
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2668,12 +2735,14 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
- if (var->IsUnallocated()) {
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorICSlot slot) {
+ if (var->IsUnallocatedOrGlobalSlot()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(), Operand(var->name()));
__ ld(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
} else if (var->mode() == LET && op != Token::INIT_LET) {
@@ -2766,13 +2835,16 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2811,9 +2883,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
// Call keyed store IC.
// The arguments are:
// - a0 is the value,
@@ -2825,7 +2894,12 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2834,6 +2908,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
Expression* key = expr->key();
if (key->IsPropertyName()) {
@@ -2842,9 +2918,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Move(LoadDescriptor::ReceiverRegister(), v0);
EmitNamedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
EmitNamedSuperPropertyLoad(expr);
}
} else {
@@ -2855,9 +2931,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
@@ -2909,22 +2985,23 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ SetExpressionPosition(expr);
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
const Register scratch = a1;
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
__ mov(scratch, v0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(scratch, v0, v0, scratch);
__ Push(key->value());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2932,7 +3009,8 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -2974,15 +3052,16 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
// Load the function from the receiver.
const Register scratch = a1;
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
__ Move(scratch, v0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(scratch, v0, v0, scratch);
VisitForStackValue(prop->key());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2990,7 +3069,8 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -3006,14 +3086,12 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
}
// Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -3028,19 +3106,15 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // a7: copy of the first argument or undefined if it doesn't exist.
+ // a6: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ ld(a7, MemOperand(sp, arg_count * kPointerSize));
+ __ ld(a6, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(a7, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
}
- // a6: the receiver of the enclosing function.
- __ ld(a6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
// a5: the receiver of the enclosing function.
- int receiver_offset = 2 + info_->scope()->num_parameters();
- __ ld(a5, MemOperand(fp, receiver_offset * kPointerSize));
+ __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// a4: the language mode.
__ li(a4, Operand(Smi::FromInt(language_mode())));
@@ -3049,21 +3123,13 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ Push(a7);
__ Push(a6, a5, a4, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::EmitLoadSuperConstructor() {
- __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(a0);
- __ CallRuntime(Runtime::kGetPrototype, 1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperReference* super_ref) {
+ SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
Variable* this_var = super_ref->this_var()->var();
GetVar(a1, this_var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -3074,7 +3140,52 @@ void FullCodeGenerator::EmitInitializeThisAfterSuper(
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
- EmitVariableAssignment(this_var, Token::INIT_CONST);
+ EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
+}
+
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in v0)
+ // and the object holding it (returned in v1).
+ DCHECK(!context_register().is(a2));
+ __ li(a2, Operand(callee->name()));
+ __ Push(context_register(), a2);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ Push(v0, v1); // Function, receiver.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ Branch(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(v0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ push(a1);
+ __ bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ push(a2); // Reserved receiver slot.
+ }
}
@@ -3091,16 +3202,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
-
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ push(a2); // Reserved receiver slot.
+ PushCalleeAndWithBaseObject(expr);
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
@@ -3113,15 +3219,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(a1);
EmitResolvePossiblyDirectEval(arg_count);
- // The runtime call returns a pair of values in v0 (function) and
- // v1 (receiver). Touch up the stack with the right values.
+ // Touch up the stack with the resolved function.
__ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ sd(v1, MemOperand(sp, arg_count * kPointerSize));
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3133,43 +3236,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCallWithLoadIC(expr);
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
- VariableProxy* proxy = callee->AsVariableProxy();
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
- }
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in v0)
- // and the object holding it (returned in v1).
- DCHECK(!context_register().is(a2));
- __ li(a2, Operand(proxy->name()));
- __ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ Push(v0, v1); // Function, receiver.
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ Branch(&call);
- __ bind(&done);
- // Push function.
- __ push(v0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ push(a1);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found
- // by LoadContextSlot.
+ PushCalleeAndWithBaseObject(expr);
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
@@ -3181,10 +3248,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
- {
- PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
- }
if (is_named_call) {
EmitCallWithLoadIC(expr);
} else {
@@ -3196,9 +3260,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
- }
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ push(a1);
// Emit function call.
@@ -3221,7 +3283,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- DCHECK(!expr->expression()->IsSuperReference());
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
@@ -3233,7 +3295,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
@@ -3257,11 +3319,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor();
+ VariableProxy* new_target_proxy = super_call_ref->new_target_var();
+ VisitForStackValue(new_target_proxy);
+
+ EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
// Push the arguments ("left-to-right") on the stack.
@@ -3273,7 +3338,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
@@ -3299,7 +3364,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
+ EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(v0);
}
@@ -3589,6 +3654,28 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_TYPED_ARRAY_TYPE), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3835,6 +3922,28 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = nullptr;
+ Label* if_false = nullptr;
+ Label* fall_through = nullptr;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_DATE_TYPE), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3843,20 +3952,15 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done, not_date_object;
Register object = v0;
Register result = v0;
Register scratch0 = t1;
Register scratch1 = a1;
- __ JumpIfSmi(object, &not_date_object);
- __ GetObjectType(object, scratch1, scratch1);
- __ Branch(&not_date_object, ne, scratch1, Operand(JS_DATE_TYPE));
-
if (index->value() == 0) {
__ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ li(scratch1, Operand(stamp));
@@ -3872,13 +3976,10 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ li(a1, Operand(index));
__ Move(a0, object);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
+ __ bind(&done);
}
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(v0);
+ context()->Plug(result);
}
@@ -4187,11 +4288,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
- EmitLoadSuperConstructor();
+ // new.target
+ VisitForStackValue(args->at(0));
+
+ // .this_function
+ VisitForStackValue(args->at(1));
+ __ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
// Check if the calling frame is an arguments adaptor frame.
@@ -4210,8 +4315,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(a1, a1);
- // Subtract 1 from arguments count, for new.target.
- __ Daddu(a1, a1, Operand(-1));
__ mov(a0, a1);
// Get arguments pointer in a2.
@@ -4425,7 +4528,7 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
__ ld(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
+ __ DadduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
__ Branch(&loop, lt, element, Operand(elements_end));
@@ -4593,11 +4696,14 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr === CallRuntime("ReflectConstruct")
+ DCHECK_EQ(1, expr->arguments()->length());
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
+
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
- SuperReference* super_reference = args->at(0)->AsSuperReference();
+ SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
@@ -4607,8 +4713,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ push(at);
__ sd(v0, MemOperand(sp, kPointerSize));
- // Push super
- EmitLoadSuperConstructor();
+ // Push super constructor
+ EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
// Push arguments array
@@ -4624,7 +4730,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, v0);
- EmitInitializeThisAfterSuper(super_reference);
+ // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
+ EmitInitializeThisAfterSuper(super_call_ref);
}
@@ -4637,13 +4744,9 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Load the function from the receiver.
__ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- }
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
}
@@ -4651,8 +4754,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4677,6 +4779,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
@@ -4701,6 +4804,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime function.
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
context()->Plug(v0);
}
@@ -4725,10 +4829,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- DCHECK(is_sloppy(language_mode()) || var->is_this());
- if (var->IsUnallocated()) {
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
__ ld(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
__ li(a0, Operand(Smi::FromInt(SLOPPY)));
@@ -4738,7 +4843,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- context()->Plug(var->is_this());
+ context()->Plug(is_this);
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
@@ -4826,10 +4931,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4852,8 +4956,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
const Register scratch = a1;
__ ld(scratch, MemOperand(sp, kPointerSize));
@@ -4863,8 +4968,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
const Register scratch = a1;
const Register scratch1 = a4;
__ Move(scratch, result_register());
@@ -4938,16 +5044,18 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Register scratch1 = a1;
Register scratch2 = a4;
__ li(scratch1, Operand(Smi::FromInt(count_value)));
- __ AdduAndCheckForOverflow(v0, v0, scratch1, scratch2);
+ __ DadduAndCheckForOverflow(v0, v0, scratch1, scratch2);
__ BranchOnNoOverflow(&done, scratch2);
// Call stub. Undo operation first.
__ Move(v0, a0);
__ jmp(&stub_call);
__ bind(&slow);
}
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ if (!is_strong(language_mode())) {
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4979,22 +5087,25 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a1, v0);
__ li(a0, Operand(Smi::FromInt(count_value)));
- // Record position before stub call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
+
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), Token::ADD, language_mode()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
+ strength(language_mode())).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
+ if (is_strong(language_mode())) {
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Store the value returned in v0.
switch (assign_type) {
case VARIABLE:
if (expr->is_postfix()) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(v0);
}
@@ -5005,7 +5116,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
}
@@ -5015,7 +5126,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -5054,7 +5170,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -5069,45 +5190,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- DCHECK(!context()->IsEffect());
- DCHECK(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
- __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(proxy->name()));
- if (FLAG_vector_ics) {
- __ li(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallLoadIC(NOT_CONTEXTUAL);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(v0);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ li(a0, Operand(proxy->name()));
- __ Push(cp, a0);
- __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(v0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Expression* sub_expr,
Handle<String> check) {
@@ -5185,7 +5267,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -5236,9 +5318,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Split(cc, a1, Operand(a0), if_true, if_false, NULL);
__ bind(&slow_case);
}
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+
+ Handle<Code> ic = CodeFactory::CompareIC(
+ isolate(), op, strength(language_mode())).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -5351,6 +5433,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ li(at, Operand(pending_message_obj));
__ ld(a1, MemOperand(at));
__ push(a1);
+
+ ClearPendingMessage();
}
@@ -5375,6 +5459,23 @@ void FullCodeGenerator::ExitFinallyBlock() {
}
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(a1));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+ __ li(at, Operand(pending_message_obj));
+ __ sd(a1, MemOperand(at));
+}
+
+
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+ DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ __ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(slot)));
+}
+
+
#undef __
@@ -5458,6 +5559,7 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 2d6cabeb0b..689bbbb9ee 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return a1; }
const Register LoadDescriptor::NameRegister() { return a2; }
+const Register LoadDescriptor::SlotRegister() { return a0; }
-const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return a0; }
-
-
-const Register VectorLoadICDescriptor::VectorRegister() { return a3; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
const Register StoreDescriptor::ReceiverRegister() { return a1; }
@@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return a4; }
+
+
+const Register VectorStoreICDescriptor::VectorRegister() { return a3; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
@@ -56,109 +60,101 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-const Register GrowArrayElementsDescriptor::CapacityRegister() { return a2; }
-void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewContextDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void NumberToStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a3};
- data->Initialize(arraysize(registers), registers, NULL);
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastCloneShallowArrayDescriptor::Initialize(
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a3, a2, a1};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a3, a2, a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowObjectDescriptor::Initialize(
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a3, a2, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a3, a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CreateAllocationSiteDescriptor::Initialize(
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2, a3};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a2, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2, a3, a1};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a2, a3, a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreArrayLiteralElementDescriptor::Initialize(
+void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a3, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a3, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CallFunctionWithFeedbackDescriptor::Initialize(
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a3};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a1, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a3, a2};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a1, a3, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1};
- data->Initialize(arraysize(registers), registers, NULL);
+void CallFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
@@ -166,210 +162,183 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// vector (Smi)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {cp, a0, a1, a2};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a0, a1, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void RegExpConstructResultDescriptor::Initialize(
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void TransitionElementsKindDescriptor::Initialize(
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0, a1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a0, a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void AllocateHeapNumberDescriptor::Initialize(
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
- Register registers[] = {cp};
- data->Initialize(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-void ArrayConstructorConstantArgCountDescriptor::Initialize(
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
// a0 -- number of arguments
// a1 -- function
// a2 -- allocation site with elements kind
- Register registers[] = {cp, a1, a2};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a1, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, a1, a2, a0};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(),
- Representation::Tagged(), Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a1, a2, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
- CallInterfaceDescriptorData* data) {
+void InternalArrayConstructorConstantArgCountDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
// a0 -- number of arguments
// a1 -- constructor function
- Register registers[] = {cp, a1};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void InternalArrayConstructorDescriptor::Initialize(
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, a1, a0};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareNilDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToBooleanDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void BinaryOpWithAllocationSiteDescriptor::Initialize(
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a2, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, a1, a0};
- data->Initialize(arraysize(registers), registers, NULL);
+void StringAddDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void KeyedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a2, // key
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void NamedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a2, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallHandlerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a0, // receiver
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a1, // JSFunction
a0, // actual number of arguments
a2, // expected number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a0, // callee
a4, // call_data
a2, // holder
a1, // api_function_address
a3, // actual number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Integer32(), // actual number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiAccessorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
a0, // callee
a4, // call_data
a2, // holder
a1, // api_function_address
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void MathRoundVariantDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a1, // math rounding function
+ a3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.cc b/deps/v8/src/mips64/lithium-codegen-mips64.cc
index 223370f711..1273d856cc 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.cc
@@ -118,8 +118,8 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info_->is_native()) {
+ if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
+ !info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -192,8 +192,9 @@ bool LCodeGen::GeneratePrologue() {
__ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -488,8 +489,8 @@ int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
}
-int32_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
- const Representation& r) const {
+int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
int32_t value = constant->Integer32Value();
if (r.IsInteger32()) return value;
@@ -581,52 +582,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- bool has_closure_id = !info()->closure().is_null() &&
- !info()->closure().is_identical_to(environment->closure());
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- }
+ WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
+ AddToTranslation(
+ environment, translation, value, environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@@ -916,28 +882,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (auto function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
}
-
inlined_function_count_ = deoptimization_literals_.length();
}
@@ -1470,7 +1419,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
}
-void LCodeGen::DoMulI(LMulI* instr) {
+void LCodeGen::DoMulS(LMulS* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
// Note that result may alias left.
@@ -1493,9 +1442,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
if (overflow) {
- __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, scratch,
- Operand(kMaxInt));
+ __ DsubuAndCheckForOverflow(result, zero_reg, left, scratch);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
+ Operand(zero_reg));
} else {
__ Dsubu(result, zero_reg, left);
}
@@ -1524,25 +1473,127 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t shift = WhichPowerOf2(constant_abs);
__ dsll(result, left, shift);
// Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Dsubu(result, zero_reg, result);
+ if (constant < 0) __ Dsubu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
__ dsll(scratch, left, shift);
__ Daddu(result, scratch, left);
// Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Dsubu(result, zero_reg, result);
+ if (constant < 0) __ Dsubu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
int32_t shift = WhichPowerOf2(constant_abs + 1);
__ dsll(scratch, left, shift);
__ Dsubu(result, scratch, left);
// Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Dsubu(result, zero_reg, result);
+ if (constant < 0) __ Dsubu(result, zero_reg, result);
} else {
// Generate standard code.
__ li(at, constant);
__ Dmul(result, left, at);
}
}
+ } else {
+ DCHECK(right_op->IsRegister());
+ Register right = ToRegister(right_op);
+
+ if (overflow) {
+ // hi:lo = left * right.
+ __ Dmulh(result, left, right);
+ __ dsra32(scratch, result, 0);
+ __ sra(at, result, 31);
+ __ SmiTag(result);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
+ } else {
+ __ SmiUntag(result, left);
+ __ dmul(result, result, right);
+ }
+
+ if (bailout_on_minus_zero) {
+ Label done;
+ __ Xor(at, left, right);
+ __ Branch(&done, ge, at, Operand(zero_reg));
+ // Bail out if the result is minus zero.
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
+ Operand(zero_reg));
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
+ Register left = ToRegister(instr->left());
+ LOperand* right_op = instr->right();
+
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (right_op->IsConstantOperand()) {
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
+ }
+
+ switch (constant) {
+ case -1:
+ if (overflow) {
+ __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
+ Operand(zero_reg));
+ } else {
+ __ Subu(result, zero_reg, left);
+ }
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+ // If left is strictly negative and the constant is null, the
+ // result is -0. Deoptimize if required, otherwise return 0.
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
+ Operand(zero_reg));
+ }
+ __ mov(result, zero_reg);
+ break;
+ case 1:
+ // Nothing to do.
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ sll(result, left, shift);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ sll(scratch, left, shift);
+ __ addu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ sll(scratch, left, shift);
+ __ Subu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
+ } else {
+ // Generate standard code.
+ __ li(at, constant);
+ __ Mul(result, left, at);
+ }
+ }
} else {
DCHECK(right_op->IsRegister());
@@ -1550,24 +1601,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (overflow) {
// hi:lo = left * right.
- if (instr->hydrogen()->representation().IsSmi()) {
- __ Dmulh(result, left, right);
- } else {
- __ Dmul(result, left, right);
- }
+ __ Dmul(result, left, right);
__ dsra32(scratch, result, 0);
__ sra(at, result, 31);
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiTag(result);
- }
+
DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
} else {
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ Dmul(result, result, right);
- } else {
- __ Dmul(result, left, right);
- }
+ __ mul(result, left, right);
}
if (bailout_on_minus_zero) {
@@ -1703,6 +1743,27 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
}
+void LCodeGen::DoSubS(LSubS* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (!can_overflow) {
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
+ } else { // can_overflow.
+ Register overflow = scratch0();
+ Register scratch = scratch1();
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ DsubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
+ ToOperand(right), overflow, scratch);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+ Operand(zero_reg));
+ }
+}
+
+
void LCodeGen::DoSubI(LSubI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
@@ -1710,39 +1771,16 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot()) {
- Register right_reg = EmitLoadRegister(right, at);
- __ Dsubu(ToRegister(result), ToRegister(left), Operand(right_reg));
- } else {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
- }
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
Register overflow = scratch0();
Register scratch = scratch1();
- if (right->IsStackSlot() || right->IsConstantOperand()) {
- Register right_reg = EmitLoadRegister(right, scratch);
- __ SubuAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- right_reg,
- overflow); // Reg at also used as scratch.
- } else {
- DCHECK(right->IsRegister());
- // Due to overflow check macros not supporting constant operands,
- // handling the IsConstantOperand case was moved to prev if clause.
- __ SubuAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- ToRegister(right),
- overflow); // Reg at also used as scratch.
- }
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
+ ToOperand(right), overflow, scratch);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
Operand(zero_reg));
- if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, ToRegister(result),
- Operand(kMaxInt));
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, ToRegister(result),
- Operand(kMinInt));
- }
}
}
@@ -1789,21 +1827,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
- Label runtime, done;
DCHECK(object.is(a0));
DCHECK(result.is(v0));
DCHECK(!scratch.is(scratch0()));
DCHECK(!scratch.is(object));
- __ SmiTst(object, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
- __ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject, scratch,
- Operand(JS_DATE_TYPE));
-
if (index->value() == 0) {
__ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ li(scratch, Operand(stamp));
@@ -1901,6 +1933,38 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
+void LCodeGen::DoAddE(LAddE* instr) {
+ LOperand* result = instr->result();
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+
+ DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
+}
+
+
+void LCodeGen::DoAddS(LAddS* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (!can_overflow) {
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
+ } else { // can_overflow.
+ Register overflow = scratch0();
+ Register scratch = scratch1();
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ DadduAndCheckForOverflow(ToRegister(result), ToRegister(left),
+ ToOperand(right), overflow, scratch);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+ Operand(zero_reg));
+ }
+}
+
+
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
@@ -1908,41 +1972,16 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot()) {
- Register right_reg = EmitLoadRegister(right, at);
- __ Daddu(ToRegister(result), ToRegister(left), Operand(right_reg));
- } else {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
- }
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
Register overflow = scratch0();
Register scratch = scratch1();
- if (right->IsStackSlot() ||
- right->IsConstantOperand()) {
- Register right_reg = EmitLoadRegister(right, scratch);
- __ AdduAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- right_reg,
- overflow); // Reg at also used as scratch.
- } else {
- DCHECK(right->IsRegister());
- // Due to overflow check macros not supporting constant operands,
- // handling the IsConstantOperand case was moved to prev if clause.
- __ AdduAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- ToRegister(right),
- overflow); // Reg at also used as scratch.
- }
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
+ __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
+ ToOperand(right), overflow, scratch);
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
Operand(zero_reg));
- // if not smi, it must int32.
- if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, ToRegister(result),
- Operand(kMaxInt));
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, ToRegister(result),
- Operand(kMinInt));
- }
}
}
@@ -2055,8 +2094,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(a0));
DCHECK(ToRegister(instr->result()).is(v0));
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), instr->op(), instr->language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
@@ -2541,7 +2580,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2843,7 +2883,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// On MIPS there is no need for a "no inlined smi code" marker (nop).
@@ -2905,10 +2946,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = VectorLoadICDescriptor::SlotRegister();
- DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(a0));
AllowDeferredHandleDereference vector_structure_check;
@@ -2921,6 +2961,20 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ li(vector_register, vector);
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ li(slot_register, Operand(Smi::FromInt(index)));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object())
@@ -2928,11 +2982,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3046,12 +3098,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in a2.
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- }
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL,
- instr->hydrogen()->initialization_state()).code();
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3227,7 +3278,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3418,9 +3470,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3737,13 +3789,27 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label done;
__ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
__ mov(result, input);
- __ dsubu(result, zero_reg, input);
+ __ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
__ bind(&done);
}
+void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label done;
+ __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+ __ mov(result, input);
+ __ dsubu(result, zero_reg, input);
+ // Overflow if result is still negative, i.e. 0x80000000 00000000.
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
+ __ bind(&done);
+}
+
+
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
@@ -3764,8 +3830,10 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
FPURegister input = ToDoubleRegister(instr->value());
FPURegister result = ToDoubleRegister(instr->result());
__ abs_d(result, input);
- } else if (r.IsSmiOrInteger32()) {
+ } else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
+ } else if (r.IsSmi()) {
+ EmitSmiMathAbs(instr);
} else {
// Representation is tagged.
DeferredMathAbsTaggedHeapNumber* deferred =
@@ -3774,7 +3842,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Smi check.
__ JumpIfNotSmi(input, deferred->entry());
// If smi, handle it directly.
- EmitIntegerMathAbs(instr);
+ EmitSmiMathAbs(instr);
__ bind(deferred->exit());
}
}
@@ -4014,30 +4082,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
-void LCodeGen::DoTailCallThroughMegamorphicCache(
- LTailCallThroughMegamorphicCache* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register name = ToRegister(instr->name());
- DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(name.is(LoadDescriptor::NameRegister()));
- DCHECK(receiver.is(a1));
- DCHECK(name.is(a2));
-
- Register scratch = a4;
- Register extra = a5;
- Register extra2 = a6;
- Register extra3 = t1;
-
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(
- masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Tail call to miss if we ended up here.
- LoadIC::GenerateMiss(masm());
-}
-
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
@@ -4316,10 +4360,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
__ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- StoreIC::initialize_stub(isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state());
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4438,7 +4486,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4572,6 +4621,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@@ -4579,6 +4632,101 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = v0;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ jmp(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ Branch(deferred->entry(), le, ToRegister(current_capacity),
+ Operand(constant_key));
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ Branch(deferred->entry(), ge, ToRegister(key),
+ Operand(constant_capacity));
+ } else {
+ __ Branch(deferred->entry(), ge, ToRegister(key),
+ Operand(ToRegister(current_capacity)));
+ }
+
+ if (instr->elements()->IsRegister()) {
+ __ mov(result, ToRegister(instr->elements()));
+ } else {
+ __ ld(result, ToMemOperand(instr->elements()));
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = v0;
+ __ mov(result, zero_reg);
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ if (instr->object()->IsRegister()) {
+ __ mov(result, ToRegister(instr->object()));
+ } else {
+ __ ld(result, ToMemOperand(instr->object()));
+ }
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
+ } else {
+ __ mov(a3, ToRegister(key));
+ __ SmiTag(a3);
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ mov(a0, result);
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ __ SmiTst(result, at);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+}
+
+
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Register scratch = scratch0();
@@ -6067,4 +6215,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.h b/deps/v8/src/mips64/lithium-codegen-mips64.h
index 0db3677d51..6fb7bc3c85 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.h
@@ -26,7 +26,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@@ -76,7 +75,7 @@ class LCodeGen: public LCodeGenBase {
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
- int32_t ToRepresentation_donotuse(LConstantOperand* op,
+ int64_t ToRepresentation_donotuse(LConstantOperand* op,
const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
@@ -111,6 +110,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -240,7 +240,6 @@ class LCodeGen: public LCodeGenBase {
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -252,6 +251,7 @@ class LCodeGen: public LCodeGenBase {
String::Encoding encoding);
void EmitIntegerMathAbs(LMathAbs* instr);
+ void EmitSmiMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@@ -356,10 +356,11 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
diff --git a/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc b/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc
index d965f651a3..1006d72a4e 100644
--- a/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc
+++ b/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc
@@ -297,4 +297,5 @@ void LGapResolver::EmitMove(int index) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/mips64/lithium-mips64.cc b/deps/v8/src/mips64/lithium-mips64.cc
index 1f518d347e..3df4defc7a 100644
--- a/deps/v8/src/mips64/lithium-mips64.cc
+++ b/deps/v8/src/mips64/lithium-mips64.cc
@@ -1097,11 +1097,19 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
ops.Add(target, zone());
- for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op =
- UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
- ops.Add(op, zone());
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), cp);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ ops.Add(op, zone());
}
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
@@ -1110,20 +1118,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
-LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
- HTailCallThroughMegamorphicCache* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* receiver_register =
- UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
- LOperand* name_register =
- UseFixed(instr->name(), LoadDescriptor::NameRegister());
-
- // Not marked as call. It can't deoptimize, and it never returns.
- return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
-}
-
-
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
@@ -1541,14 +1535,17 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
}
right_op = UseRegister(right);
}
- LMulI* mul = new(zone()) LMulI(left_op, right_op);
+ LInstruction* result =
+ instr->representation().IsSmi()
+ ? DefineAsRegister(new (zone()) LMulS(left_op, right_op))
+ : DefineAsRegister(new (zone()) LMulI(left_op, right_op));
if (right_op->IsConstantOperand()
? ((can_overflow && constant_value == -1) ||
(bailout_on_minus_zero && constant_value <= 0))
: (can_overflow || bailout_on_minus_zero)) {
- AssignEnvironment(mul);
+ AssignEnvironment(result);
}
- return DefineAsRegister(mul);
+ return result;
} else if (instr->representation().IsDouble()) {
if (kArchVariant == kMips64r2) {
@@ -1578,9 +1575,11 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineAsRegister(sub);
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ LInstruction* result =
+ instr->representation().IsSmi()
+ ? DefineAsRegister(new (zone()) LSubS(left, right))
+ : DefineAsRegister(new (zone()) LSubI(left, right));
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
@@ -1607,9 +1606,11 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
+ LOperand* right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ LInstruction* result =
+ instr->representation().IsSmi()
+ ? DefineAsRegister(new (zone()) LAddS(left, right))
+ : DefineAsRegister(new (zone()) LAddI(left, right));
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
@@ -1619,10 +1620,8 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
DCHECK(instr->right()->representation().IsInteger32());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- return result;
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return DefineAsRegister(new (zone()) LAddE(left, right));
} else if (instr->representation().IsDouble()) {
if (kArchVariant == kMips64r2) {
if (instr->left()->IsMul())
@@ -1817,7 +1816,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), a0);
LDateField* result =
new(zone()) LDateField(object, FixedTemp(a1), instr->index());
- return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+ return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -2093,7 +2092,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@@ -2142,7 +2141,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2217,7 +2216,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2283,8 +2282,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- return MarkAsCall(
- new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result =
+ new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+ return MarkAsCall(result, instr);
}
@@ -2316,6 +2323,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = Use(instr->object());
+ LOperand* elements = Use(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, v0);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2354,8 +2376,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
- LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
+ LStoreNamedGeneric* result =
+ new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2432,7 +2461,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@@ -2549,7 +2578,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
+ chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@@ -2619,6 +2648,7 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/lithium-mips64.h b/deps/v8/src/mips64/lithium-mips64.h
index adc2a4faa8..cb1f56ecc6 100644
--- a/deps/v8/src/mips64/lithium-mips64.h
+++ b/deps/v8/src/mips64/lithium-mips64.h
@@ -19,7 +19,9 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
+ V(AddE) \
V(AddI) \
+ V(AddS) \
V(Allocate) \
V(AllocateBlockContext) \
V(ApplyArguments) \
@@ -117,10 +119,12 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
+ V(MulS) \
V(MultiplyAddD) \
V(NumberTagD) \
V(NumberTagU) \
@@ -149,8 +153,8 @@ class LCodeGen;
V(StringCharFromCode) \
V(StringCompareAndBranch) \
V(SubI) \
+ V(SubS) \
V(TaggedToI) \
- V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -470,26 +474,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
};
-class LTailCallThroughMegamorphicCache final
- : public LTemplateInstruction<0, 3, 0> {
- public:
- LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
- LOperand* name) {
- inputs_[0] = context;
- inputs_[1] = receiver;
- inputs_[2] = name;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* name() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
- "tail-call-through-megamorphic-cache")
- DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -773,6 +757,21 @@ class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
};
+class LMulS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulS, "mul-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
class LMulI final : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
@@ -1173,6 +1172,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ Strength strength() { return hydrogen()->strength(); }
+
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1291,6 +1292,21 @@ class LSubI final : public LTemplateInstruction<1, 2, 0> {
};
+class LSubS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
class LConstantI final : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
@@ -1439,6 +1455,21 @@ class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
};
+class LAddE final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddE(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
class LAddI final : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
@@ -1454,6 +1485,21 @@ class LAddI final : public LTemplateInstruction<1, 2, 0> {
};
+class LAddS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
@@ -1528,7 +1574,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
+ Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@@ -1826,8 +1872,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
inputs_.AddAll(operands, zone);
}
@@ -1837,6 +1887,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@@ -2133,17 +2187,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2186,22 +2245,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* obj,
- LOperand* key,
- LOperand* value) {
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = obj;
+ inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2257,6 +2318,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
};
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 1542766722..f7a77dd1b1 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -528,6 +528,7 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// hash = hash ^ (hash >> 16);
srl(at, reg0, 16);
xor_(reg0, reg0, at);
+ And(reg0, reg0, Operand(0x3fffffff));
}
@@ -621,7 +622,7 @@ void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
addu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, rt.imm64_);
+ addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -637,7 +638,7 @@ void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
daddu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- daddiu(rd, rs, rt.imm64_);
+ daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -653,7 +654,8 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
subu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
+ addiu(rd, rs, static_cast<int32_t>(
+ -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -669,7 +671,9 @@ void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
dsubu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
+ daddiu(rd, rs,
+ static_cast<int32_t>(
+ -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1066,7 +1070,7 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
and_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- andi(rd, rs, rt.imm64_);
+ andi(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1082,7 +1086,7 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
or_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- ori(rd, rs, rt.imm64_);
+ ori(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1098,7 +1102,7 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
xor_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- xori(rd, rs, rt.imm64_);
+ xori(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1135,7 +1139,7 @@ void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
slt(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- slti(rd, rs, rt.imm64_);
+ slti(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1151,7 +1155,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
sltu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- sltiu(rd, rs, rt.imm64_);
+ sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1651,7 +1655,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
c(UN, D, cmp1, cmp2);
bc1f(&skip);
nop();
- Jr(nan, bd);
+ J(nan, bd);
bind(&skip);
} else {
c(UN, D, cmp1, cmp2);
@@ -1670,7 +1674,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
- Jr(nan, bd);
+ J(nan, bd);
bind(&skip);
} else {
cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
@@ -1689,7 +1693,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
- Jr(target, bd);
+ J(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
@@ -2125,11 +2129,11 @@ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (is_near(L)) {
BranchShort(L, bdslot);
} else {
- Jr(L, bdslot);
+ J(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- Jr(L, bdslot);
+ J(L, bdslot);
} else {
BranchShort(L, bdslot);
}
@@ -2148,10 +2152,10 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
+ J(L, bdslot);
bind(&skip);
} else {
- Jr(L, bdslot);
+ J(L, bdslot);
}
}
} else {
@@ -2160,10 +2164,10 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
+ J(L, bdslot);
bind(&skip);
} else {
- Jr(L, bdslot);
+ J(L, bdslot);
}
} else {
BranchShort(L, cond, rs, rt, bdslot);
@@ -2330,7 +2334,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
if (rt.imm64_ == 0) {
bgez(rs, offset);
} else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, rt.imm64_);
+ slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
beq(scratch, zero_reg, offset);
} else {
r2 = scratch;
@@ -2343,7 +2347,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
if (rt.imm64_ == 0) {
bltz(rs, offset);
} else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, rt.imm64_);
+ slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
bne(scratch, zero_reg, offset);
} else {
r2 = scratch;
@@ -2377,7 +2381,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
if (rt.imm64_ == 0) {
b(offset);
} else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, rt.imm64_);
+ sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
beq(scratch, zero_reg, offset);
} else {
r2 = scratch;
@@ -2391,7 +2395,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// No code needs to be emitted.
return;
} else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, rt.imm64_);
+ sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
bne(scratch, zero_reg, offset);
} else {
r2 = scratch;
@@ -2597,7 +2601,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
offset = shifted_branch_offset(L, false);
bgez(rs, offset);
} else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, rt.imm64_);
+ slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
} else {
@@ -2614,7 +2618,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
offset = shifted_branch_offset(L, false);
bltz(rs, offset);
} else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, rt.imm64_);
+ slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
} else {
@@ -2658,7 +2662,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
offset = shifted_branch_offset(L, false);
b(offset);
} else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, rt.imm64_);
+ sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
} else {
@@ -2675,7 +2679,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
// No code needs to be emitted.
return;
} else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, rt.imm64_);
+ sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
} else {
@@ -2729,11 +2733,11 @@ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (is_near(L)) {
BranchAndLinkShort(L, bdslot);
} else {
- Jalr(L, bdslot);
+ Jal(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- Jalr(L, bdslot);
+ Jal(L, bdslot);
} else {
BranchAndLinkShort(L, bdslot);
}
@@ -2751,7 +2755,7 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
+ J(L, bdslot);
bind(&skip);
}
} else {
@@ -2759,7 +2763,7 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
+ Jal(L, bdslot);
bind(&skip);
} else {
BranchAndLinkShort(L, cond, rs, rt, bdslot);
@@ -3187,6 +3191,40 @@ void MacroAssembler::Ret(Condition cond,
}
+void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint64_t imm28;
+ imm28 = jump_address(L);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ j(imm28);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
+}
+
+
+void MacroAssembler::Jal(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint64_t imm28;
+ imm28 = jump_address(L);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ jal(imm28);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
+}
+
+
void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3421,7 +3459,7 @@ void MacroAssembler::Allocate(int object_size,
Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
- ld(t9, MemOperand(topaddr, limit - top));
+ ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
}
DCHECK(kPointerSize == kDoubleSize);
@@ -3497,7 +3535,7 @@ void MacroAssembler::Allocate(Register object_size,
Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
- ld(t9, MemOperand(topaddr, limit - top));
+ ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
}
DCHECK(kPointerSize == kDoubleSize);
@@ -3819,7 +3857,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
sd(filler, MemOperand(start_offset));
Daddu(start_offset, start_offset, kPointerSize);
bind(&entry);
- Branch(&loop, lt, start_offset, Operand(end_offset));
+ Branch(&loop, ult, start_offset, Operand(end_offset));
}
@@ -4409,7 +4447,6 @@ void MacroAssembler::ObjectToDoubleFPURegister(Register object,
void MacroAssembler::SmiToDoubleFPURegister(Register smi,
FPURegister value,
Register scratch1) {
- // dsra(scratch1, smi, kSmiTagSize);
dsra32(scratch1, smi, 0);
mtc1(scratch1, value);
cvt_d_w(value, value);
@@ -4424,18 +4461,16 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
} else {
if (dst.is(left)) {
+ li(t9, right); // Load right.
mov(scratch, left); // Preserve left.
- daddiu(dst, left, right.immediate()); // Left is overwritten.
+ addu(dst, left, t9); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
- // Load right since xori takes uint16 as immediate.
- daddiu(t9, zero_reg, right.immediate());
xor_(overflow_dst, dst, t9);
and_(overflow_dst, overflow_dst, scratch);
} else {
- daddiu(dst, left, right.immediate());
+ li(t9, right);
+ addu(dst, left, t9);
xor_(overflow_dst, dst, left);
- // Load right since xori takes uint16 as immediate.
- daddiu(t9, zero_reg, right.immediate());
xor_(scratch, dst, t9);
and_(overflow_dst, scratch, overflow_dst);
}
@@ -4443,8 +4478,7 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst,
- Register left,
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -4465,6 +4499,72 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
}
if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ addu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ addu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
+void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ DadduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ li(t9, right); // Load right.
+ mov(scratch, left); // Preserve left.
+ daddu(dst, left, t9); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, t9);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ li(t9, right); // Load right.
+ Daddu(dst, left, t9);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
+void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+
+ if (left.is(right) && dst.is(left)) {
+ DCHECK(!dst.is(t9));
+ DCHECK(!scratch.is(t9));
+ DCHECK(!left.is(t9));
+ DCHECK(!right.is(t9));
+ DCHECK(!overflow_dst.is(t9));
+ mov(t9, right);
+ right = t9;
+ }
+
+ if (dst.is(left)) {
mov(scratch, left); // Preserve left.
daddu(dst, left, right); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
@@ -4493,18 +4593,16 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
} else {
if (dst.is(left)) {
+ li(t9, right); // Load right.
mov(scratch, left); // Preserve left.
- daddiu(dst, left, -(right.immediate())); // Left is overwritten.
+ Subu(dst, left, t9); // Left is overwritten.
xor_(overflow_dst, dst, scratch); // scratch is original left.
- // Load right since xori takes uint16 as immediate.
- daddiu(t9, zero_reg, right.immediate());
xor_(scratch, scratch, t9); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
} else {
- daddiu(dst, left, -(right.immediate()));
+ li(t9, right);
+ subu(dst, left, t9);
xor_(overflow_dst, dst, left);
- // Load right since xori takes uint16 as immediate.
- daddiu(t9, zero_reg, right.immediate());
xor_(scratch, left, t9);
and_(overflow_dst, scratch, overflow_dst);
}
@@ -4512,8 +4610,7 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst,
- Register left,
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -4534,6 +4631,72 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
}
if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ subu(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, right); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ subu(dst, left, right); // Right is overwritten.
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, scratch); // Original right.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ subu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
+void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ DsubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ li(t9, right); // Load right.
+ mov(scratch, left); // Preserve left.
+ dsubu(dst, left, t9); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, t9); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ li(t9, right);
+ dsubu(dst, left, t9);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
+void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+ DCHECK(!scratch.is(left));
+ DCHECK(!scratch.is(right));
+
+ // This happens with some crankshaft code. Since Subu works fine if
+ // left == right, let's not make that restriction here.
+ if (left.is(right)) {
+ mov(dst, zero_reg);
+ mov(overflow_dst, zero_reg);
+ return;
+ }
+
+ if (dst.is(left)) {
mov(scratch, left); // Preserve left.
dsubu(dst, left, right); // Left is overwritten.
xor_(overflow_dst, dst, scratch); // scratch is original left.
@@ -4553,7 +4716,6 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
}
}
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles) {
@@ -4805,8 +4967,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
ld(scratch,
MemOperand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
+ int offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
ld(at, FieldMemOperand(scratch, offset));
Branch(no_map_match, ne, map_in_out, Operand(at));
@@ -6198,6 +6359,7 @@ void MacroAssembler::TruncatingDiv(Register result,
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 7704929e72..7de3300908 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -1203,7 +1203,7 @@ class MacroAssembler: public Assembler {
void AdduAndCheckForOverflow(Register dst, Register left,
const Operand& right, Register overflow_dst,
- Register scratch = at);
+ Register scratch);
void SubuAndCheckForOverflow(Register dst,
Register left,
@@ -1213,7 +1213,21 @@ class MacroAssembler: public Assembler {
void SubuAndCheckForOverflow(Register dst, Register left,
const Operand& right, Register overflow_dst,
- Register scratch = at);
+ Register scratch);
+
+ void DadduAndCheckForOverflow(Register dst, Register left, Register right,
+ Register overflow_dst, Register scratch = at);
+
+ void DadduAndCheckForOverflow(Register dst, Register left,
+ const Operand& right, Register overflow_dst,
+ Register scratch);
+
+ void DsubuAndCheckForOverflow(Register dst, Register left, Register right,
+ Register overflow_dst, Register scratch = at);
+
+ void DsubuAndCheckForOverflow(Register dst, Register left,
+ const Operand& right, Register overflow_dst,
+ Register scratch);
void BranchOnOverflow(Label* label,
Register overflow_check,
@@ -1702,6 +1716,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void BranchAndLinkShort(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot = PROTECT);
+ void J(Label* L, BranchDelaySlot bdslot);
+ void Jal(Label* L, BranchDelaySlot bdslot);
void Jr(Label* L, BranchDelaySlot bdslot);
void Jalr(Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
index 5c28ab6dda..ca62f5b508 100644
--- a/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
@@ -1286,6 +1286,7 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 29fccd0b59..2382f44fb8 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -782,7 +782,7 @@ static uint32_t ICacheHash(void* key) {
}
-static bool AllOnOnePage(uintptr_t start, int size) {
+static bool AllOnOnePage(uintptr_t start, size_t size) {
intptr_t start_page = (start & ~CachePage::kPageMask);
intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
return start_page == end_page;
@@ -830,9 +830,8 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
- intptr_t start,
- int size) {
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ size_t size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -920,8 +919,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
-Simulator::~Simulator() {
-}
+Simulator::~Simulator() { free(stack_); }
// When the generated code calls an external reference we need to catch that in
@@ -967,7 +965,7 @@ class Redirection {
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
- addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+ addr_of_swi - offsetof(Redirection, swi_instruction_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
@@ -977,6 +975,14 @@ class Redirection {
return redirection->external_function();
}
+ static void DeleteChain(Redirection* redirection) {
+ while (redirection != nullptr) {
+ Redirection* next = redirection->next_;
+ delete redirection;
+ redirection = next;
+ }
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -985,6 +991,19 @@ class Redirection {
};
+// static
+void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+ Redirection::DeleteChain(first);
+ if (i_cache != nullptr) {
+ for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ entry = i_cache->Next(entry)) {
+ delete static_cast<CachePage*>(entry->value);
+ }
+ delete i_cache;
+ }
+}
+
+
void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
@@ -1135,7 +1154,7 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
const int fparg2 = (kMipsAbi == kN64) ? 13 : 14;
*x = get_fpu_register_double(12);
*y = get_fpu_register_double(fparg2);
- *z = get_register(a2);
+ *z = static_cast<int32_t>(get_register(a2));
} else {
// TODO(plind): bad ABI stuff, refactor or remove.
// We use a char buffer to get around the strict-aliasing rules which
@@ -1188,6 +1207,16 @@ bool Simulator::test_fcsr_bit(uint32_t cc) {
}
+void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
+ FCSR_ |= mode & kFPURoundingModeMask;
+}
+
+
+unsigned int Simulator::get_fcsr_rounding_mode() {
+ return FCSR_ & kFPURoundingModeMask;
+}
+
+
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
@@ -1252,7 +1281,71 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) {
}
-// for cvt instructions only
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(float original, float rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int32 || rounded < min_int32) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(float original, float rounded) {
+ bool ret = false;
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+// For cvt instructions only
void Simulator::round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
@@ -1335,6 +1428,89 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
}
+// for cvt instructions only
+void Simulator::round_according_to_fcsr(float toRound, float& rounded,
+ int32_t& rounded_int, float fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int32_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int32_t>(rounded);
+ break;
+ }
+}
+
+
+void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
+ int64_t& rounded_int, float fs) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero. Behave like round_w_d.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result. Behave like trunc_w_d.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up. Behave like ceil_w_d.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down. Behave like floor_w_d.
+ switch (FCSR_ & 3) {
+ case kRoundToNearest:
+ rounded = std::floor(fs + 0.5);
+ rounded_int = static_cast<int64_t>(rounded);
+ if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ rounded_int--;
+ }
+ break;
+ case kRoundToZero:
+ rounded = trunc(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToPlusInf:
+ rounded = std::ceil(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ case kRoundToMinusInf:
+ rounded = std::floor(fs);
+ rounded_int = static_cast<int64_t>(rounded);
+ break;
+ }
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int64_t value) {
pc_modified_ = true;
@@ -1454,7 +1630,7 @@ uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
}
-void Simulator::WriteW(int64_t addr, int value, Instruction* instr) {
+void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n",
@@ -2006,14 +2182,10 @@ void Simulator::SignalExceptions() {
// Handle execution based on instruction types.
-void Simulator::ConfigureTypeRegister(Instruction* instr,
- int64_t* alu_out,
- int64_t* i64hilo,
- uint64_t* u64hilo,
- int64_t* next_pc,
- int64_t* return_addr_reg,
- bool* do_interrupt,
- int64_t* i128resultH,
+void Simulator::ConfigureTypeRegister(Instruction* instr, int64_t* alu_out,
+ int64_t* i64hilo, uint64_t* u64hilo,
+ int64_t* next_pc, int* return_addr_reg,
+ bool* do_interrupt, int64_t* i128resultH,
int64_t* i128resultL) {
// Every local variable declared here needs to be const.
// This is to make sure that changed values are sent back to
@@ -2021,14 +2193,16 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// Instruction fields.
const Opcode op = instr->OpcodeFieldRaw();
- const int64_t rs_reg = instr->RsValue();
+ const int32_t rs_reg = instr->RsValue();
const int64_t rs = get_register(rs_reg);
const uint64_t rs_u = static_cast<uint64_t>(rs);
- const int64_t rt_reg = instr->RtValue();
+ const int32_t rt_reg = instr->RtValue();
const int64_t rt = get_register(rt_reg);
const uint64_t rt_u = static_cast<uint64_t>(rt);
- const int64_t rd_reg = instr->RdValue();
+ const int32_t rd_reg = instr->RdValue();
const uint64_t sa = instr->SaValue();
+ const uint8_t bp2 = instr->Bp2Value();
+ const uint8_t bp3 = instr->Bp3Value();
const int32_t fs_reg = instr->FsValue();
@@ -2096,7 +2270,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// is special case of SRL instruction, added in MIPS32 Release 2.
// RS field is equal to 00001.
*alu_out = static_cast<int32_t>(
- base::bits::RotateRight32((uint32_t)rt_u, sa));
+ base::bits::RotateRight32(static_cast<const uint32_t>(rt_u),
+ static_cast<const uint32_t>(sa)));
}
break;
case DSRL:
@@ -2130,7 +2305,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// This is special case od SRLV instruction, added in MIPS32
// Release 2. SA field is equal to 00001.
*alu_out = static_cast<int32_t>(
- base::bits::RotateRight32((uint32_t)rt_u, rs_u));
+ base::bits::RotateRight32(static_cast<const uint32_t>(rt_u),
+ static_cast<const uint32_t>(rs_u)));
}
break;
case DSRLV:
@@ -2142,7 +2318,9 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// Logical right-rotate of a word by a variable number of bits.
// This is special case od SRLV instruction, added in MIPS32
// Release 2. SA field is equal to 00001.
- *alu_out = base::bits::RotateRight32(rt_u, rs_u);
+ *alu_out =
+ base::bits::RotateRight32(static_cast<const uint32_t>(rt_u),
+ static_cast<const uint32_t>(rs_u));
}
break;
case SRAV:
@@ -2159,7 +2337,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// MIPS spec: If no bits were set in GPR rs, the result written to
// GPR rd is 32.
DCHECK(instr->SaValue() == 1);
- *alu_out = base::bits::CountLeadingZeros32(rs_u);
+ *alu_out =
+ base::bits::CountLeadingZeros32(static_cast<int32_t>(rs_u));
}
break;
case MFLO:
@@ -2208,11 +2387,11 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
*alu_out = rs + rt;
break;
case ADDU: {
- int32_t alu32_out = rs + rt;
- // Sign-extend result of 32bit operation into 64bit register.
- *alu_out = static_cast<int64_t>(alu32_out);
- }
+ int32_t alu32_out = static_cast<int32_t>(rs + rt);
+ // Sign-extend result of 32bit operation into 64bit register.
+ *alu_out = static_cast<int64_t>(alu32_out);
break;
+ }
case DADDU:
*alu_out = rs + rt;
break;
@@ -2228,11 +2407,11 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
*alu_out = rs - rt;
break;
case SUBU: {
- int32_t alu32_out = rs - rt;
- // Sign-extend result of 32bit operation into 64bit register.
- *alu_out = static_cast<int64_t>(alu32_out);
- }
+ int32_t alu32_out = static_cast<int32_t>(rs - rt);
+ // Sign-extend result of 32bit operation into 64bit register.
+ *alu_out = static_cast<int64_t>(alu32_out);
break;
+ }
case DSUBU:
*alu_out = rs - rt;
break;
@@ -2304,7 +2483,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case CLZ:
// MIPS32 spec: If no bits were set in GPR rs, the result written to
// GPR rd is 32.
- *alu_out = base::bits::CountLeadingZeros32(rs_u);
+ *alu_out =
+ base::bits::CountLeadingZeros32(static_cast<uint32_t>(rs_u));
break;
default:
UNREACHABLE();
@@ -2342,6 +2522,120 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
*alu_out = static_cast<int64_t>((rs_u & (mask << lsb)) >> lsb);
break;
}
+ case BSHFL: {
+ int sa = instr->SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP: {
+ uint32_t input = static_cast<uint32_t>(rt);
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xff;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte =
+ static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ *alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ break;
+ }
+ case SEB:
+ case SEH:
+ case WSBH:
+ UNREACHABLE();
+ break;
+ default: {
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN: {
+ if (bp2 == 0) {
+ *alu_out = static_cast<int32_t>(rt);
+ } else {
+ uint64_t rt_hi = rt << (8 * bp2);
+ uint64_t rs_lo = rs >> (8 * (4 - bp2));
+ *alu_out = static_cast<int32_t>(rt_hi | rs_lo);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ }
+ case DBSHFL: {
+ int sa = instr->SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case DBITSWAP: {
+ switch (instr->SaFieldRaw() >> kSaShift) {
+ case DBITSWAP_SA: { // Mips64r6
+ uint64_t input = static_cast<uint64_t>(rt);
+ uint64_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 8; i++) {
+ output = output >> 8;
+ i_byte = input & 0xff;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte =
+ static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | ((static_cast<uint64_t>(o_byte) << 56));
+ input = input >> 8;
+ }
+
+ *alu_out = static_cast<int64_t>(output);
+ break;
+ }
+ }
+ break;
+ }
+ case DSBH:
+ case DSHD:
+ UNREACHABLE();
+ break;
+ default: {
+ sa >>= kBp3Bits;
+ switch (sa) {
+ case DALIGN: {
+ if (bp3 == 0) {
+ *alu_out = static_cast<int64_t>(rt);
+ } else {
+ uint64_t rt_hi = rt << (8 * bp3);
+ uint64_t rs_lo = rs >> (8 * (8 - bp3));
+ *alu_out = static_cast<int64_t>(rt_hi | rs_lo);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ }
default:
UNREACHABLE();
}
@@ -2353,40 +2647,94 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
- const int32_t& fs_reg,
- const int32_t& ft_reg,
- const int32_t& fd_reg) {
- float fs, ft;
+ const int32_t fs_reg,
+ const int32_t ft_reg,
+ const int32_t fd_reg) {
+ float fs, ft, fd;
fs = get_fpu_register_float(fs_reg);
ft = get_fpu_register_float(ft_reg);
+ fd = get_fpu_register_float(fd_reg);
+ int32_t ft_int = bit_cast<int32_t>(ft);
+ int32_t fd_int = bit_cast<int32_t>(fd);
uint32_t cc, fcsr_cc;
cc = instr->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
- case ADD_D:
+ case RINT: {
+ DCHECK(kArchVariant == kMips64r6);
+ float result, temp_result;
+ double temp;
+ float upper = std::ceil(fs);
+ float lower = std::floor(fs);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ if (upper - fs < fs - lower) {
+ result = upper;
+ } else if (upper - fs > fs - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ float reminder = modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fs > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ set_fpu_register_float(fd_reg, result);
+ if (result != fs) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
+ case ADD_S:
set_fpu_register_float(fd_reg, fs + ft);
break;
- case SUB_D:
+ case SUB_S:
set_fpu_register_float(fd_reg, fs - ft);
break;
- case MUL_D:
+ case MUL_S:
set_fpu_register_float(fd_reg, fs * ft);
break;
- case DIV_D:
+ case DIV_S:
set_fpu_register_float(fd_reg, fs / ft);
break;
- case ABS_D:
+ case ABS_S:
set_fpu_register_float(fd_reg, fabs(fs));
break;
- case MOV_D:
+ case MOV_S:
set_fpu_register_float(fd_reg, fs);
break;
- case NEG_D:
+ case NEG_S:
set_fpu_register_float(fd_reg, -fs);
break;
- case SQRT_D:
+ case SQRT_S:
set_fpu_register_float(fd_reg, fast_sqrt(fs));
break;
+ case RSQRT_S: {
+ float result = 1.0 / fast_sqrt(fs);
+ set_fpu_register_float(fd_reg, result);
+ break;
+ }
+ case RECIP_S: {
+ float result = 1.0 / fs;
+ set_fpu_register_float(fd_reg, result);
+ break;
+ }
+ case C_F_D:
+ set_fcsr_bit(fcsr_cc, false);
+ break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
@@ -2411,8 +2759,289 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
case CVT_D_S:
set_fpu_register_double(fd_reg, static_cast<double>(fs));
break;
+ case CLASS_S: { // Mips64r6 instruction
+ // Convert float input to uint32_t for easier bit manipulation
+ uint32_t classed = bit_cast<uint32_t>(fs);
+
+ // Extracting sign, exponent and mantissa from the input float
+ uint32_t sign = (classed >> 31) & 1;
+ uint32_t exponent = (classed >> 23) & 0x000000ff;
+ uint32_t mantissa = classed & 0x007fffff;
+ uint32_t result;
+ float fResult;
+
+ // Setting flags if input float is negative infinity,
+ // positive infinity, negative zero or positive zero
+ bool negInf = (classed == 0xFF800000);
+ bool posInf = (classed == 0x7F800000);
+ bool negZero = (classed == 0x80000000);
+ bool posZero = (classed == 0x00000000);
+
+ bool signalingNan;
+ bool quietNan;
+ bool negSubnorm;
+ bool posSubnorm;
+ bool negNorm;
+ bool posNorm;
+
+ // Setting flags if float is NaN
+ signalingNan = false;
+ quietNan = false;
+ if (!negInf && !posInf && (exponent == 0xff)) {
+ quietNan = ((mantissa & 0x00200000) == 0) &&
+ ((mantissa & (0x00200000 - 1)) == 0);
+ signalingNan = !quietNan;
+ }
+
+ // Setting flags if float is subnormal number
+ posSubnorm = false;
+ negSubnorm = false;
+ if ((exponent == 0) && (mantissa != 0)) {
+ DCHECK(sign == 0 || sign == 1);
+ posSubnorm = (sign == 0);
+ negSubnorm = (sign == 1);
+ }
+
+ // Setting flags if float is normal number
+ posNorm = false;
+ negNorm = false;
+ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan &&
+ !quietNan && !negZero && !posZero) {
+ DCHECK(sign == 0 || sign == 1);
+ posNorm = (sign == 0);
+ negNorm = (sign == 1);
+ }
+
+ // Calculating result according to description of CLASS.S instruction
+ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) |
+ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
+ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
+
+ DCHECK(result != 0);
+
+ fResult = bit_cast<float>(result);
+ set_fpu_register_float(fd_reg, fResult);
+
+ break;
+ }
+ case CVT_L_S: {
+ float rounded;
+ int64_t result;
+ round64_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case CVT_W_S: {
+ float rounded;
+ int32_t result;
+ round_according_to_fcsr(fs, rounded, result, fs);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case TRUNC_W_S: { // Truncate single to word (round towards 0).
+ float rounded = trunc(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case TRUNC_L_S: { // Mips64r2 instruction.
+ float rounded = trunc(fs);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case ROUND_W_S: {
+ float rounded = std::floor(fs + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ROUND_L_S: { // Mips64r2 instruction.
+ float rounded = std::floor(fs + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ int64_t i64 = static_cast<int64_t>(result);
+ set_fpu_register(fd_reg, i64);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case FLOOR_L_S: { // Mips64r2 instruction.
+ float rounded = floor(fs);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case FLOOR_W_S: // Round double to word towards negative infinity.
+ {
+ float rounded = std::floor(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case CEIL_W_S: // Round double to word towards positive infinity.
+ {
+ float rounded = std::ceil(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register_word(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ } break;
+ case CEIL_L_S: { // Mips64r2 instruction.
+ float rounded = ceil(fs);
+ int64_t result = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round64_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
+ }
+ break;
+ }
+ case MINA:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ float result;
+ if (fabs(fs) > fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) < fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_float(fd_reg, result);
+ }
+ break;
+ case MAXA:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ float result;
+ if (fabs(fs) < fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) > fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_float(fd_reg, result);
+ }
+ break;
+ case MIN:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ set_fpu_register_float(fd_reg, (fs >= ft) ? ft : fs);
+ }
+ break;
+ case MAX:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_float(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_float(fd_reg, fs);
+ } else {
+ set_fpu_register_float(fd_reg, (fs <= ft) ? ft : fs);
+ }
+ break;
+ case SEL:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_float(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
+ break;
+ case SELEQZ_C:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_float(
+ fd_reg, (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ break;
+ case SELNEZ_C:
+ DCHECK(kArchVariant == kMips64r6);
+ set_fpu_register_float(
+ fd_reg, (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ break;
+ case MOVZ_C: {
+ DCHECK(kArchVariant == kMips64r2);
+ int32_t rt_reg = instr->RtValue();
+ int64_t rt = get_register(rt_reg);
+ if (rt == 0) {
+ set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVN_C: {
+ DCHECK(kArchVariant == kMips64r2);
+ int32_t rt_reg = instr->RtValue();
+ int64_t rt = get_register(rt_reg);
+ if (rt != 0) {
+ set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVF: {
+ // Same function field for MOVT.D and MOVF.D
+ uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ ft_cc = get_fcsr_condition_bit(ft_cc);
+
+ if (instr->Bit(16)) { // Read Tf bit.
+ // MOVT.D
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ } else {
+ // MOVF.D
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ }
+ break;
+ }
default:
- // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
+ // TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
// CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
UNREACHABLE();
}
@@ -2420,13 +3049,14 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
- const int32_t& fs_reg,
- const int32_t& ft_reg,
- const int32_t& fd_reg) {
+ const int32_t fs_reg,
+ const int32_t ft_reg,
+ const int32_t fd_reg) {
double ft, fs, fd;
uint32_t cc, fcsr_cc;
fs = get_fpu_register_double(fs_reg);
- ft = get_fpu_register_double(ft_reg);
+ ft = (instr->FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg)
+ : 0.0;
fd = get_fpu_register_double(fd_reg);
cc = instr->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
@@ -2438,7 +3068,7 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
double result, temp, temp_result;
double upper = std::ceil(fs);
double lower = std::floor(fs);
- switch (FCSR_ & 0x3) {
+ switch (get_fcsr_rounding_mode()) {
case kRoundToNearest:
if (upper - fs < fs - lower) {
result = upper;
@@ -2482,6 +3112,79 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
DCHECK(kArchVariant == kMips64r6);
set_fpu_register_double(fd_reg, (ft_int & 0x1) != 0 ? fs : 0.0);
break;
+ case MOVZ_C: {
+ DCHECK(kArchVariant == kMips64r2);
+ int32_t rt_reg = instr->RtValue();
+ int64_t rt = get_register(rt_reg);
+ if (rt == 0) {
+ set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVN_C: {
+ DCHECK(kArchVariant == kMips64r2);
+ int32_t rt_reg = instr->RtValue();
+ int64_t rt = get_register(rt_reg);
+ if (rt != 0) {
+ set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MOVF: {
+ // Same function field for MOVT.D and MOVF.D
+ uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ ft_cc = get_fcsr_condition_bit(ft_cc);
+ if (instr->Bit(16)) { // Read Tf bit.
+ // MOVT.D
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ } else {
+ // MOVF.D
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ }
+ break;
+ }
+ case MINA:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_double(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else {
+ double result;
+ if (fabs(fs) > fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) < fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_double(fd_reg, result);
+ }
+ break;
+ case MAXA:
+ DCHECK(kArchVariant == kMips64r6);
+ fs = get_fpu_register_double(fs_reg);
+ if (std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else if (std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, ft);
+ } else if (!std::isnan(fs) && std::isnan(ft)) {
+ set_fpu_register_double(fd_reg, fs);
+ } else {
+ double result;
+ if (fabs(fs) < fabs(ft)) {
+ result = ft;
+ } else if (fabs(fs) > fabs(ft)) {
+ result = fs;
+ } else {
+ result = (fs > ft ? fs : ft);
+ }
+ set_fpu_register_double(fd_reg, result);
+ }
+ break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
fs = get_fpu_register_double(fs_reg);
@@ -2532,6 +3235,16 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
case SQRT_D:
set_fpu_register_double(fd_reg, fast_sqrt(fs));
break;
+ case RSQRT_D: {
+ double result = 1.0 / fast_sqrt(fs);
+ set_fpu_register_double(fd_reg, result);
+ break;
+ }
+ case RECIP_D: {
+ double result = 1.0 / fs;
+ set_fpu_register_double(fd_reg, result);
+ break;
+ }
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
@@ -2613,15 +3326,20 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg, result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register(fd_reg, kFPU64InvalidResult);
}
break;
}
case ROUND_L_D: { // Mips64r2 instruction.
- // check error cases
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ double rounded = std::floor(fs + 0.5);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, result);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ int64_t i64 = static_cast<int64_t>(result);
+ set_fpu_register(fd_reg, i64);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPU64InvalidResult);
}
@@ -2654,9 +3372,75 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
}
break;
}
- case C_F_D:
- UNIMPLEMENTED_MIPS();
+ case CLASS_D: { // Mips64r6 instruction
+ // Convert double input to uint64_t for easier bit manipulation
+ uint64_t classed = bit_cast<uint64_t>(fs);
+
+ // Extracting sign, exponent and mantissa from the input double
+ uint32_t sign = (classed >> 63) & 1;
+ uint32_t exponent = (classed >> 52) & 0x00000000000007ff;
+ uint64_t mantissa = classed & 0x000fffffffffffff;
+ uint64_t result;
+ double dResult;
+
+ // Setting flags if input double is negative infinity,
+ // positive infinity, negative zero or positive zero
+ bool negInf = (classed == 0xFFF0000000000000);
+ bool posInf = (classed == 0x7FF0000000000000);
+ bool negZero = (classed == 0x8000000000000000);
+ bool posZero = (classed == 0x0000000000000000);
+
+ bool signalingNan;
+ bool quietNan;
+ bool negSubnorm;
+ bool posSubnorm;
+ bool negNorm;
+ bool posNorm;
+
+ // Setting flags if double is NaN
+ signalingNan = false;
+ quietNan = false;
+ if (!negInf && !posInf && exponent == 0x7ff) {
+ quietNan = ((mantissa & 0x0008000000000000) != 0) &&
+ ((mantissa & (0x0008000000000000 - 1)) == 0);
+ signalingNan = !quietNan;
+ }
+
+ // Setting flags if double is subnormal number
+ posSubnorm = false;
+ negSubnorm = false;
+ if ((exponent == 0) && (mantissa != 0)) {
+ DCHECK(sign == 0 || sign == 1);
+ posSubnorm = (sign == 0);
+ negSubnorm = (sign == 1);
+ }
+
+ // Setting flags if double is normal number
+ posNorm = false;
+ negNorm = false;
+ if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan &&
+ !quietNan && !negZero && !posZero) {
+ DCHECK(sign == 0 || sign == 1);
+ posNorm = (sign == 0);
+ negNorm = (sign == 1);
+ }
+
+ // Calculating result according to description of CLASS.D instruction
+ result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) |
+ (posInf << 6) | (negZero << 5) | (negSubnorm << 4) |
+ (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan;
+
+ DCHECK(result != 0);
+
+ dResult = bit_cast<double>(result);
+ set_fpu_register_double(fd_reg, dResult);
+
break;
+ }
+ case C_F_D: {
+ set_fcsr_bit(fcsr_cc, false);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -2664,9 +3448,12 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
void Simulator::DecodeTypeRegisterWRsType(Instruction* instr,
- const int32_t& fs_reg,
- const int32_t& fd_reg,
+ const int32_t fs_reg,
+ const int32_t fd_reg,
+ const int32_t ft_reg,
int64_t& alu_out) {
+ float fs = get_fpu_register_float(fs_reg);
+ float ft = get_fpu_register_float(ft_reg);
switch (instr->FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
alu_out = get_fpu_register_signed_word(fs_reg);
@@ -2676,16 +3463,89 @@ void Simulator::DecodeTypeRegisterWRsType(Instruction* instr,
alu_out = get_fpu_register_signed_word(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
- default: // Mips64r6 CMP.S instructions unimplemented.
+ case CMP_AF:
+ set_fpu_register_word(fd_reg, 0);
+ break;
+ case CMP_UN:
+ if (std::isnan(fs) || std::isnan(ft)) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_EQ:
+ if (fs == ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_UEQ:
+ if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_LT:
+ if (fs < ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_ULT:
+ if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_LE:
+ if (fs <= ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_ULE:
+ if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_OR:
+ if (!std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_UNE:
+ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ case CMP_NE:
+ if (fs != ft) {
+ set_fpu_register_word(fd_reg, -1);
+ } else {
+ set_fpu_register_word(fd_reg, 0);
+ }
+ break;
+ default:
UNREACHABLE();
}
}
void Simulator::DecodeTypeRegisterLRsType(Instruction* instr,
- const int32_t& fs_reg,
- const int32_t& fd_reg,
- const int32_t& ft_reg) {
+ const int32_t fs_reg,
+ const int32_t fd_reg,
+ const int32_t ft_reg) {
double fs = get_fpu_register_double(fs_reg);
double ft = get_fpu_register_double(ft_reg);
int64_t i64;
@@ -2695,10 +3555,11 @@ void Simulator::DecodeTypeRegisterLRsType(Instruction* instr,
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
- UNIMPLEMENTED_MIPS();
+ i64 = get_fpu_register(fs_reg);
+ set_fpu_register_float(fd_reg, static_cast<float>(i64));
break;
- case CMP_AF: // Mips64r6 CMP.D instructions.
- UNIMPLEMENTED_MIPS();
+ case CMP_AF:
+ set_fpu_register(fd_reg, 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
@@ -2749,17 +3610,37 @@ void Simulator::DecodeTypeRegisterLRsType(Instruction* instr,
set_fpu_register(fd_reg, 0);
}
break;
- default: // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED
+ case CMP_OR:
+ if (!std::isnan(fs) && !std::isnan(ft)) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_UNE:
+ if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ case CMP_NE:
+ if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
+ set_fpu_register(fd_reg, -1);
+ } else {
+ set_fpu_register(fd_reg, 0);
+ }
+ break;
+ default:
UNREACHABLE();
}
}
-
void Simulator::DecodeTypeRegisterCOP1(
- Instruction* instr, const int32_t& rs_reg, const int64_t& rs,
- const uint64_t& rs_u, const int32_t& rt_reg, const int64_t& rt,
- const uint64_t& rt_u, const int32_t& rd_reg, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg, const int32_t& fd_reg,
+ Instruction* instr, const int32_t rs_reg, const int64_t rs,
+ const uint64_t rs_u, const int32_t rt_reg, const int64_t rt,
+ const uint64_t rt_u, const int32_t rd_reg, const int32_t fr_reg,
+ const int32_t fs_reg, const int32_t ft_reg, const int32_t fd_reg,
int64_t& alu_out) {
switch (instr->RsFieldRaw()) {
case BC1: // Branch on coprocessor condition.
@@ -2778,18 +3659,19 @@ void Simulator::DecodeTypeRegisterCOP1(
case CTC1:
// At the moment only FCSR is supported.
DCHECK(fs_reg == kFCSRRegister);
- FCSR_ = registers_[rt_reg];
+ FCSR_ = static_cast<uint32_t>(registers_[rt_reg]);
break;
case MTC1:
// Hardware writes upper 32-bits to zero on mtc1.
set_fpu_register_hi_word(fs_reg, 0);
- set_fpu_register_word(fs_reg, registers_[rt_reg]);
+ set_fpu_register_word(fs_reg, static_cast<int32_t>(registers_[rt_reg]));
break;
case DMTC1:
set_fpu_register(fs_reg, registers_[rt_reg]);
break;
case MTHC1:
- set_fpu_register_hi_word(fs_reg, registers_[rt_reg]);
+ set_fpu_register_hi_word(fs_reg,
+ static_cast<int32_t>(registers_[rt_reg]));
break;
case S:
DecodeTypeRegisterSRsType(instr, fs_reg, ft_reg, fd_reg);
@@ -2798,7 +3680,7 @@ void Simulator::DecodeTypeRegisterCOP1(
DecodeTypeRegisterDRsType(instr, fs_reg, ft_reg, fd_reg);
break;
case W:
- DecodeTypeRegisterWRsType(instr, fs_reg, fd_reg, alu_out);
+ DecodeTypeRegisterWRsType(instr, fs_reg, fd_reg, ft_reg, alu_out);
break;
case L:
DecodeTypeRegisterLRsType(instr, fs_reg, fd_reg, ft_reg);
@@ -2810,10 +3692,10 @@ void Simulator::DecodeTypeRegisterCOP1(
void Simulator::DecodeTypeRegisterCOP1X(Instruction* instr,
- const int32_t& fr_reg,
- const int32_t& fs_reg,
- const int32_t& ft_reg,
- const int32_t& fd_reg) {
+ const int32_t fr_reg,
+ const int32_t fs_reg,
+ const int32_t ft_reg,
+ const int32_t fd_reg) {
switch (instr->FunctionFieldRaw()) {
case MADD_D:
double fr, ft, fs;
@@ -2829,13 +3711,14 @@ void Simulator::DecodeTypeRegisterCOP1X(Instruction* instr,
void Simulator::DecodeTypeRegisterSPECIAL(
- Instruction* instr, const int64_t& rs_reg, const int64_t& rs,
- const uint64_t& rs_u, const int64_t& rt_reg, const int64_t& rt,
- const uint64_t& rt_u, const int64_t& rd_reg, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg, const int64_t& fd_reg,
- int64_t& i64hilo, uint64_t& u64hilo, int64_t& alu_out, bool& do_interrupt,
- int64_t& current_pc, int64_t& next_pc, int64_t& return_addr_reg,
- int64_t& i128resultH, int64_t& i128resultL) {
+ Instruction* instr, const int32_t rs_reg, const int64_t rs,
+ const uint64_t rs_u, const int32_t rt_reg, const int64_t rt,
+ const uint64_t rt_u, const int32_t rd_reg, const int32_t fr_reg,
+ const int32_t fs_reg, const int32_t ft_reg, const int32_t fd_reg,
+ const int64_t i64hilo, const uint64_t u64hilo, const int64_t alu_out,
+ const bool do_interrupt, const int64_t current_pc, const int64_t next_pc,
+ const int32_t return_addr_reg, const int64_t i128resultH,
+ const int64_t i128resultL) {
switch (instr->FunctionFieldRaw()) {
case SELEQZ_S:
DCHECK(kArchVariant == kMips64r6);
@@ -2999,8 +3882,8 @@ void Simulator::DecodeTypeRegisterSPECIAL(
void Simulator::DecodeTypeRegisterSPECIAL2(Instruction* instr,
- const int64_t& rd_reg,
- int64_t& alu_out) {
+ const int32_t rd_reg,
+ int64_t alu_out) {
switch (instr->FunctionFieldRaw()) {
case MUL:
set_register(rd_reg, alu_out);
@@ -3016,8 +3899,9 @@ void Simulator::DecodeTypeRegisterSPECIAL2(Instruction* instr,
void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
- const int64_t& rt_reg,
- int64_t& alu_out) {
+ const int32_t rt_reg,
+ const int32_t rd_reg,
+ const int64_t alu_out) {
switch (instr->FunctionFieldRaw()) {
case INS:
// Ins instr leaves result in Rt, rather than Rd.
@@ -3030,6 +3914,11 @@ void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
set_register(rt_reg, alu_out);
TraceRegWr(alu_out);
break;
+ case BSHFL:
+ case DBSHFL:
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ break;
default:
UNREACHABLE();
}
@@ -3039,13 +3928,13 @@ void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
void Simulator::DecodeTypeRegister(Instruction* instr) {
// Instruction fields.
const Opcode op = instr->OpcodeFieldRaw();
- const int64_t rs_reg = instr->RsValue();
+ const int32_t rs_reg = instr->RsValue();
const int64_t rs = get_register(rs_reg);
const uint64_t rs_u = static_cast<uint32_t>(rs);
- const int64_t rt_reg = instr->RtValue();
+ const int32_t rt_reg = instr->RtValue();
const int64_t rt = get_register(rt_reg);
const uint64_t rt_u = static_cast<uint32_t>(rt);
- const int64_t rd_reg = instr->RdValue();
+ const int32_t rd_reg = instr->RdValue();
const int32_t fr_reg = instr->FrValue();
const int32_t fs_reg = instr->FsValue();
@@ -3067,7 +3956,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
int64_t current_pc = get_pc();
// Next pc
int64_t next_pc = 0;
- int64_t return_addr_reg = 31;
+ int32_t return_addr_reg = 31;
int64_t i128resultH;
int64_t i128resultL;
@@ -3105,7 +3994,31 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
DecodeTypeRegisterSPECIAL2(instr, rd_reg, alu_out);
break;
case SPECIAL3:
- DecodeTypeRegisterSPECIAL3(instr, rt_reg, alu_out);
+ switch (instr->FunctionFieldRaw()) {
+ case BSHFL: {
+ int sa = instr->SaValue();
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN: {
+ DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
+ break;
+ }
+ }
+ }
+ case DBSHFL: {
+ int sa = instr->SaValue();
+ sa >>= kBp3Bits;
+ switch (sa) {
+ case DALIGN: {
+ DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
+ break;
+ }
+ }
+ }
+ default:
+ DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
+ break;
+ }
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
@@ -3121,11 +4034,16 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
+ int32_t rs_reg = instr->RsValue();
int64_t rs = get_register(instr->RsValue());
uint64_t rs_u = static_cast<uint64_t>(rs);
- int64_t rt_reg = instr->RtValue(); // Destination register.
+ int32_t rt_reg = instr->RtValue(); // Destination register.
int64_t rt = get_register(rt_reg);
int16_t imm16 = instr->Imm16Value();
+ int32_t imm18 = instr->Imm18Value();
+ int32_t imm19 = instr->Imm19Value();
+ int32_t imm21 = instr->Imm21Value();
+ int32_t imm26 = instr->Imm26Value();
int32_t ft_reg = instr->FtValue(); // Destination register.
int64_t ft = get_fpu_register(ft_reg);
@@ -3134,11 +4052,17 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint64_t oe_imm16 = 0xffff & imm16;
// Sign extended immediate.
int64_t se_imm16 = imm16;
+ int64_t se_imm18 = imm18 | ((imm18 & 0x20000) ? 0xfffffffffffc0000 : 0);
+ int64_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xfffffffffff80000 : 0);
+ int64_t se_imm26 = imm26 | ((imm26 & 0x2000000) ? 0xfffffffffc000000 : 0);
+
// Get current pc.
int64_t current_pc = get_pc();
// Next pc.
int64_t next_pc = bad_ra;
+ // pc increment
+ int16_t pc_increment;
// Used for conditional branch instructions.
bool do_branch = false;
@@ -3252,6 +4176,33 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BGTZ:
do_branch = rs > 0;
break;
+ case POP66: {
+ if (rs_reg) { // BEQZC
+ int32_t se_imm21 =
+ static_cast<int32_t>(imm21 << (kOpcodeBits + kRsBits));
+ se_imm21 = se_imm21 >> (kOpcodeBits + kRsBits);
+ if (rs == 0)
+ next_pc = current_pc + 4 + (se_imm21 << 2);
+ else
+ next_pc = current_pc + 4;
+ } else { // JIC
+ next_pc = rt + imm16;
+ }
+ break;
+ }
+ case BC: {
+ next_pc = current_pc + 4 + (se_imm26 << 2);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case BALC: {
+ set_register(31, current_pc + 4);
+ next_pc = current_pc + 4 + (se_imm26 << 2);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
// ------------- Arithmetic instructions.
case ADDI:
case DADDI:
@@ -3266,11 +4217,11 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = rs + se_imm16;
break;
case ADDIU: {
- int32_t alu32_out = rs + se_imm16;
- // Sign-extend result of 32bit operation into 64bit register.
- alu_out = static_cast<int64_t>(alu32_out);
- }
+ int32_t alu32_out = static_cast<int32_t>(rs + se_imm16);
+ // Sign-extend result of 32bit operation into 64bit register.
+ alu_out = static_cast<int64_t>(alu32_out);
break;
+ }
case DADDIU:
alu_out = rs + se_imm16;
break;
@@ -3281,20 +4232,20 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = (rs_u < static_cast<uint64_t>(se_imm16)) ? 1 : 0;
break;
case ANDI:
- alu_out = rs & oe_imm16;
+ alu_out = rs & oe_imm16;
break;
case ORI:
- alu_out = rs | oe_imm16;
+ alu_out = rs | oe_imm16;
break;
case XORI:
- alu_out = rs ^ oe_imm16;
+ alu_out = rs ^ oe_imm16;
break;
case LUI: {
- int32_t alu32_out = (oe_imm16 << 16);
- // Sign-extend result of 32bit operation into 64bit register.
- alu_out = static_cast<int64_t>(alu32_out);
- }
+ int32_t alu32_out = static_cast<int32_t>(oe_imm16 << 16);
+ // Sign-extend result of 32bit operation into 64bit register.
+ alu_out = static_cast<int64_t>(alu32_out);
break;
+ }
// ------------- Memory instructions.
case LB:
addr = rs + se_imm16;
@@ -3385,10 +4336,83 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case SDC1:
addr = rs + se_imm16;
break;
+ // ------------- JIALC and BNEZC instructions.
+ case POP76:
+ // Next pc.
+ next_pc = rt + se_imm16;
+ // The instruction after the jump is NOT executed.
+ pc_increment = Instruction::kInstrSize;
+ if (instr->IsLinkingInstruction()) {
+ set_register(31, current_pc + pc_increment);
+ }
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ // ------------- PC-Relative instructions.
+ case PCREL: {
+ // rt field: checking 5-bits.
+ uint8_t rt = (imm21 >> kImm16Bits);
+ switch (rt) {
+ case ALUIPC:
+ addr = current_pc + (se_imm16 << 16);
+ alu_out = static_cast<int64_t>(~0x0FFFF) & addr;
+ break;
+ case AUIPC:
+ alu_out = current_pc + (se_imm16 << 16);
+ break;
+ default: {
+ // rt field: checking the most significant 3-bits.
+ rt = (imm21 >> kImm18Bits);
+ switch (rt) {
+ case LDPC:
+ addr =
+ (current_pc & static_cast<int64_t>(~0x7)) + (se_imm18 << 3);
+ alu_out = Read2W(addr, instr);
+ break;
+ default: {
+ // rt field: checking the most significant 2-bits.
+ rt = (imm21 >> kImm19Bits);
+ switch (rt) {
+ case LWUPC: {
+ int32_t offset = imm19;
+ // Set sign.
+ offset <<= (kOpcodeBits + kRsBits + 2);
+ offset >>= (kOpcodeBits + kRsBits + 2);
+ addr = current_pc + (offset << 2);
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ alu_out = *ptr;
+ break;
+ }
+ case LWPC: {
+ int32_t offset = imm19;
+ // Set sign.
+ offset <<= (kOpcodeBits + kRsBits + 2);
+ offset >>= (kOpcodeBits + kRsBits + 2);
+ addr = current_pc + (offset << 2);
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ alu_out = *ptr;
+ break;
+ }
+ case ADDIUPC:
+ alu_out = current_pc + (se_imm19 << 2);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ }
default:
UNREACHABLE();
}
+
// ---------- Raise exceptions triggered.
SignalExceptions();
@@ -3444,16 +4468,16 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
WriteH(addr, static_cast<uint16_t>(rt), instr);
break;
case SWL:
- WriteW(addr, mem_value, instr);
+ WriteW(addr, static_cast<int32_t>(mem_value), instr);
break;
case SW:
- WriteW(addr, rt, instr);
+ WriteW(addr, static_cast<int32_t>(rt), instr);
break;
case SD:
Write2W(addr, rt, instr);
break;
case SWR:
- WriteW(addr, mem_value, instr);
+ WriteW(addr, static_cast<int32_t>(mem_value), instr);
break;
case LWC1:
set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits.
@@ -3464,12 +4488,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
case SWC1:
addr = rs + se_imm16;
- WriteW(addr, get_fpu_register(ft_reg), instr);
+ WriteW(addr, static_cast<int32_t>(get_fpu_register(ft_reg)), instr);
break;
case SDC1:
addr = rs + se_imm16;
WriteD(addr, get_fpu_register_double(ft_reg), instr);
break;
+ case PCREL:
+ set_register(rs_reg, alu_out);
default:
break;
}
@@ -3494,11 +4520,11 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
// Get current pc.
- int32_t current_pc = get_pc();
+ int64_t current_pc = get_pc();
// Get unchanged bits of pc.
- int32_t pc_high_bits = current_pc & 0xf0000000;
+ int64_t pc_high_bits = current_pc & 0xfffffffff0000000;
// Next pc.
- int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
+ int64_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
// Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
@@ -3753,7 +4779,8 @@ uintptr_t Simulator::PopAddress() {
#undef UNSUPPORTED
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // USE_SIMULATOR
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index fba2b220e1..346d3584f4 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -207,11 +207,18 @@ class Simulator {
bool test_fcsr_bit(uint32_t cc);
bool set_fcsr_round_error(double original, double rounded);
bool set_fcsr_round64_error(double original, double rounded);
+ bool set_fcsr_round_error(float original, float rounded);
+ bool set_fcsr_round64_error(float original, float rounded);
void round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs);
void round64_according_to_fcsr(double toRound, double& rounded,
int64_t& rounded_int, double fs);
-
+ void round_according_to_fcsr(float toRound, float& rounded,
+ int32_t& rounded_int, float fs);
+ void round64_according_to_fcsr(float toRound, float& rounded,
+ int64_t& rounded_int, float fs);
+ void set_fcsr_rounding_mode(FPURoundingMode mode);
+ unsigned int get_fcsr_rounding_mode();
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
int64_t get_pc() const;
@@ -229,6 +236,8 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
+ static void TearDown(HashMap* i_cache, Redirection* first);
+
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
@@ -317,56 +326,55 @@ class Simulator {
inline int32_t SetDoubleLOW(double* addr);
// functions called from DecodeTypeRegister
- void DecodeTypeRegisterCOP1(Instruction* instr, const int32_t& rs_reg,
- const int64_t& rs, const uint64_t& rs_u,
- const int32_t& rt_reg, const int64_t& rt,
- const uint64_t& rt_u, const int32_t& rd_reg,
- const int32_t& fr_reg, const int32_t& fs_reg,
- const int32_t& ft_reg, const int32_t& fd_reg,
+ void DecodeTypeRegisterCOP1(Instruction* instr, const int32_t rs_reg,
+ const int64_t rs, const uint64_t rs_u,
+ const int32_t rt_reg, const int64_t rt,
+ const uint64_t rt_u, const int32_t rd_reg,
+ const int32_t fr_reg, const int32_t fs_reg,
+ const int32_t ft_reg, const int32_t fd_reg,
int64_t& alu_out);
- void DecodeTypeRegisterCOP1X(Instruction* instr, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg,
- const int32_t& fd_reg);
+ void DecodeTypeRegisterCOP1X(Instruction* instr, const int32_t fr_reg,
+ const int32_t fs_reg, const int32_t ft_reg,
+ const int32_t fd_reg);
void DecodeTypeRegisterSPECIAL(
- Instruction* instr, const int64_t& rs_reg, const int64_t& rs,
- const uint64_t& rs_u, const int64_t& rt_reg, const int64_t& rt,
- const uint64_t& rt_u, const int64_t& rd_reg, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg, const int64_t& fd_reg,
- int64_t& i64hilo, uint64_t& u64hilo, int64_t& alu_out, bool& do_interrupt,
- int64_t& current_pc, int64_t& next_pc, int64_t& return_addr_reg,
- int64_t& i128resultH, int64_t& i128resultL);
+ Instruction* instr, const int32_t rs_reg, const int64_t rs,
+ const uint64_t rs_u, const int32_t rt_reg, const int64_t rt,
+ const uint64_t rt_u, const int32_t rd_reg, const int32_t fr_reg,
+ const int32_t fs_reg, const int32_t ft_reg, const int32_t fd_reg,
+ const int64_t i64hilo, const uint64_t u64hilo, const int64_t alu_out,
+ const bool do_interrupt, const int64_t current_pc, const int64_t next_pc,
+ const int32_t return_addr_reg, const int64_t i128resultH,
+ const int64_t i128resultL);
+
- void DecodeTypeRegisterSPECIAL2(Instruction* instr, const int64_t& rd_reg,
- int64_t& alu_out);
+ void DecodeTypeRegisterSPECIAL2(Instruction* instr, const int32_t rd_reg,
+ const int64_t alu_out);
- void DecodeTypeRegisterSPECIAL3(Instruction* instr, const int64_t& rt_reg,
- int64_t& alu_out);
+ void DecodeTypeRegisterSPECIAL3(Instruction* instr, const int32_t rt_reg,
+ const int32_t rd_reg, const int64_t alu_out);
- void DecodeTypeRegisterSRsType(Instruction* instr, const int32_t& fs_reg,
- const int32_t& ft_reg, const int32_t& fd_reg);
+ void DecodeTypeRegisterSRsType(Instruction* instr, const int32_t fs_reg,
+ const int32_t ft_reg, const int32_t fd_reg);
- void DecodeTypeRegisterDRsType(Instruction* instr, const int32_t& fs_reg,
- const int32_t& ft_reg, const int32_t& fd_reg);
+ void DecodeTypeRegisterDRsType(Instruction* instr, const int32_t fs_reg,
+ const int32_t ft_reg, const int32_t fd_reg);
- void DecodeTypeRegisterWRsType(Instruction* instr, const int32_t& fs_reg,
- const int32_t& fd_reg, int64_t& alu_out);
+ void DecodeTypeRegisterWRsType(Instruction* instr, const int32_t fs_reg,
+ const int32_t ft_reg, const int32_t fd_reg,
+ int64_t& alu_out);
- void DecodeTypeRegisterLRsType(Instruction* instr, const int32_t& fs_reg,
- const int32_t& fd_reg, const int32_t& ft_reg);
+ void DecodeTypeRegisterLRsType(Instruction* instr, const int32_t fs_reg,
+ const int32_t fd_reg, const int32_t ft_reg);
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
// Helper function for DecodeTypeRegister.
- void ConfigureTypeRegister(Instruction* instr,
- int64_t* alu_out,
- int64_t* i64hilo,
- uint64_t* u64hilo,
- int64_t* next_pc,
- int64_t* return_addr_reg,
- bool* do_interrupt,
- int64_t* result128H,
+ void ConfigureTypeRegister(Instruction* instr, int64_t* alu_out,
+ int64_t* i64hilo, uint64_t* u64hilo,
+ int64_t* next_pc, int* return_addr_reg,
+ bool* do_interrupt, int64_t* result128H,
int64_t* result128L);
void DecodeTypeImmediate(Instruction* instr);
@@ -408,7 +416,7 @@ class Simulator {
// ICache.
static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
+ size_t size);
static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
enum Exception {
@@ -488,12 +496,14 @@ class Simulator {
#ifdef MIPS_ABI_N64
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
+ static_cast<int>( \
+ Simulator::current(Isolate::Current()) \
+ ->Call(entry, 10, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
#else // Must be O32 Abi.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
+ static_cast<int>( \
+ Simulator::current(Isolate::Current()) \
+ ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#endif // MIPS_ABI_N64
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 9ea23d7122..dbdc68e68f 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -904,57 +904,12 @@ ObjectMirror.prototype.toText = function() {
* @return {Array} array (possibly empty) of InternalProperty instances
*/
ObjectMirror.GetInternalProperties = function(value) {
- if (IS_STRING_WRAPPER(value) || IS_NUMBER_WRAPPER(value) ||
- IS_BOOLEAN_WRAPPER(value)) {
- var primitiveValue = %_ValueOf(value);
- return [new InternalPropertyMirror("[[PrimitiveValue]]", primitiveValue)];
- } else if (IS_FUNCTION(value)) {
- var bindings = %BoundFunctionGetBindings(value);
- var result = [];
- if (bindings && IS_ARRAY(bindings)) {
- result.push(new InternalPropertyMirror("[[TargetFunction]]",
- bindings[0]));
- result.push(new InternalPropertyMirror("[[BoundThis]]", bindings[1]));
- var boundArgs = [];
- for (var i = 2; i < bindings.length; i++) {
- boundArgs.push(bindings[i]);
- }
- result.push(new InternalPropertyMirror("[[BoundArgs]]", boundArgs));
- }
- return result;
- } else if (IS_MAP_ITERATOR(value) || IS_SET_ITERATOR(value)) {
- var details = IS_MAP_ITERATOR(value) ? %MapIteratorDetails(value)
- : %SetIteratorDetails(value);
- var kind;
- switch (details[2]) {
- case 1: kind = "keys"; break;
- case 2: kind = "values"; break;
- case 3: kind = "entries"; break;
- }
- var result = [
- new InternalPropertyMirror("[[IteratorHasMore]]", details[0]),
- new InternalPropertyMirror("[[IteratorIndex]]", details[1])
- ];
- if (kind) {
- result.push(new InternalPropertyMirror("[[IteratorKind]]", kind));
- }
- return result;
- } else if (IS_GENERATOR(value)) {
- return [
- new InternalPropertyMirror("[[GeneratorStatus]]",
- GeneratorGetStatus_(value)),
- new InternalPropertyMirror("[[GeneratorFunction]]",
- %GeneratorGetFunction(value)),
- new InternalPropertyMirror("[[GeneratorReceiver]]",
- %GeneratorGetReceiver(value))
- ];
- } else if (ObjectIsPromise(value)) {
- return [
- new InternalPropertyMirror("[[PromiseStatus]]", PromiseGetStatus_(value)),
- new InternalPropertyMirror("[[PromiseValue]]", PromiseGetValue_(value))
- ];
+ var properties = %DebugGetInternalProperties(value);
+ var result = [];
+ for (var i = 0; i < properties.length; i += 2) {
+ result.push(new InternalPropertyMirror(properties[i], properties[i + 1]));
}
- return [];
+ return result;
}
diff --git a/deps/v8/src/modules.cc b/deps/v8/src/modules.cc
index da643edcd1..2e6cfc0723 100644
--- a/deps/v8/src/modules.cc
+++ b/deps/v8/src/modules.cc
@@ -57,5 +57,5 @@ const AstRawString* ModuleDescriptor::LookupLocalExport(
DCHECK_NOT_NULL(entry->value);
return static_cast<const AstRawString*>(entry->value);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index 00bf85d8b2..56859a1c97 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -10,14 +10,26 @@ var $observeNativeObjectObserve;
var $observeNativeObjectGetNotifier;
var $observeNativeObjectNotifierPerformChange;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalArray = global.Array;
var GlobalObject = global.Object;
+var InternalArray = utils.InternalArray;
+
+var ObjectFreeze;
+var ObjectIsFrozen;
+
+utils.Import(function(from) {
+ ObjectFreeze = from.ObjectFreeze;
+ ObjectIsFrozen = from.ObjectIsFrozen;
+});
// -------------------------------------------------------------------
@@ -196,28 +208,30 @@ function ObjectInfoGetOrCreate(object) {
performingCount: 0,
};
%WeakCollectionSet(GetObservationStateJS().objectInfoMap,
- object, objectInfo);
+ object, objectInfo, $getHash(object));
}
return objectInfo;
}
function ObjectInfoGet(object) {
- return %WeakCollectionGet(GetObservationStateJS().objectInfoMap, object);
+ return %WeakCollectionGet(GetObservationStateJS().objectInfoMap, object,
+ $getHash(object));
}
function ObjectInfoGetFromNotifier(notifier) {
return %WeakCollectionGet(GetObservationStateJS().notifierObjectInfoMap,
- notifier);
+ notifier, $getHash(notifier));
}
function ObjectInfoGetNotifier(objectInfo) {
if (IS_NULL(objectInfo.notifier)) {
- objectInfo.notifier = { __proto__: notifierPrototype };
+ var notifier = { __proto__: notifierPrototype };
+ objectInfo.notifier = notifier;
%WeakCollectionSet(GetObservationStateJS().notifierObjectInfoMap,
- objectInfo.notifier, objectInfo);
+ notifier, objectInfo, $getHash(notifier));
}
return objectInfo.notifier;
@@ -328,13 +342,14 @@ function ConvertAcceptListToTypeMap(arg) {
// priority. When a change record must be enqueued for the callback, it
// normalizes. When delivery clears any pending change records, it re-optimizes.
function CallbackInfoGet(callback) {
- return %WeakCollectionGet(GetObservationStateJS().callbackInfoMap, callback);
+ return %WeakCollectionGet(GetObservationStateJS().callbackInfoMap, callback,
+ $getHash(callback));
}
function CallbackInfoSet(callback, callbackInfo) {
%WeakCollectionSet(GetObservationStateJS().callbackInfoMap,
- callback, callbackInfo);
+ callback, callbackInfo, $getHash(callback));
}
@@ -376,7 +391,7 @@ function ObjectObserve(object, callback, acceptList) {
throw MakeTypeError(kObserveGlobalProxy, "observe");
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError(kObserveNonFunction, "observe");
- if ($objectIsFrozen(callback))
+ if (ObjectIsFrozen(callback))
throw MakeTypeError(kObserveCallbackFrozen);
var objectObserveFn = %GetObjectContextObjectObserve(object);
@@ -469,7 +484,7 @@ function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
%DefineDataPropertyUnchecked(
newRecord, prop, changeRecord[prop], READ_ONLY + DONT_DELETE);
}
- $objectFreeze(newRecord);
+ ObjectFreeze(newRecord);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord);
}
@@ -521,8 +536,8 @@ function EnqueueSpliceRecord(array, index, removed, addedCount) {
addedCount: addedCount
};
- $objectFreeze(changeRecord);
- $objectFreeze(changeRecord.removed);
+ ObjectFreeze(changeRecord);
+ ObjectFreeze(changeRecord.removed);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
@@ -546,7 +561,7 @@ function NotifyChange(type, object, name, oldValue) {
};
}
- $objectFreeze(changeRecord);
+ ObjectFreeze(changeRecord);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
@@ -603,7 +618,7 @@ function ObjectGetNotifier(object) {
if (%IsJSGlobalProxy(object))
throw MakeTypeError(kObserveGlobalProxy, "getNotifier");
- if ($objectIsFrozen(object)) return null;
+ if (ObjectIsFrozen(object)) return null;
if (!%ObjectWasCreatedInCurrentOrigin(object)) return null;
@@ -661,17 +676,17 @@ function ObserveMicrotaskRunner() {
// -------------------------------------------------------------------
-$installFunctions(GlobalObject, DONT_ENUM, [
+utils.InstallFunctions(GlobalObject, DONT_ENUM, [
"deliverChangeRecords", ObjectDeliverChangeRecords,
"getNotifier", ObjectGetNotifier,
"observe", ObjectObserve,
"unobserve", ObjectUnobserve
]);
-$installFunctions(GlobalArray, DONT_ENUM, [
+utils.InstallFunctions(GlobalArray, DONT_ENUM, [
"observe", ArrayObserve,
"unobserve", ArrayUnobserve
]);
-$installFunctions(notifierPrototype, DONT_ENUM, [
+utils.InstallFunctions(notifierPrototype, DONT_ENUM, [
"notify", ObjectNotifierNotify,
"performChange", ObjectNotifierPerformChange
]);
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index de16dee26b..3474ebd8f7 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -47,10 +47,6 @@ void HeapObject::HeapObjectVerify() {
return;
}
- // TODO(yangguo): Use this check once crbug/436911 has been fixed.
- // DCHECK(!NeedsToEnsureDoubleAlignment() ||
- // IsAligned(OffsetFrom(address()), kDoubleAlignment));
-
switch (instance_type) {
case SYMBOL_TYPE:
Symbol::cast(this)->SymbolVerify();
@@ -62,15 +58,15 @@ void HeapObject::HeapObjectVerify() {
case MUTABLE_HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberVerify();
break;
+ case FLOAT32X4_TYPE:
+ Float32x4::cast(this)->Float32x4Verify();
+ break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
break;
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
break;
- case CONSTANT_POOL_ARRAY_TYPE:
- ConstantPoolArray::cast(this)->ConstantPoolArrayVerify();
- break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
@@ -218,6 +214,9 @@ void HeapNumber::HeapNumberVerify() {
}
+void Float32x4::Float32x4Verify() { CHECK(IsFloat32x4()); }
+
+
void ByteArray::ByteArrayVerify() {
CHECK(IsByteArray());
}
@@ -242,6 +241,7 @@ void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
CHECK(IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
Traits::kInstanceType);
+ CHECK(base_pointer() == this);
}
@@ -257,7 +257,7 @@ void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
- if (GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS) {
+ if (HasSloppyArgumentsElements()) {
CHECK(this->elements()->IsFixedArray());
CHECK_GE(this->elements()->length(), 2);
}
@@ -402,20 +402,6 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
-void ConstantPoolArray::ConstantPoolArrayVerify() {
- CHECK(IsConstantPoolArray());
- ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR);
- while (!code_iter.is_finished()) {
- Address code_entry = get_code_ptr_entry(code_iter.next_index());
- VerifyPointer(Code::GetCodeFromTargetAddress(code_entry));
- }
- ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR);
- while (!heap_iter.is_finished()) {
- VerifyObjectField(OffsetOfElementAt(heap_iter.next_index()));
- }
-}
-
-
void JSGeneratorObject::JSGeneratorObjectVerify() {
// In an expression like "new g()", there can be a point where a generator
// object is allocated but its fields are all undefined, as it hasn't yet been
@@ -493,8 +479,6 @@ void JSDate::JSDateVerify() {
void JSMessageObject::JSMessageObjectVerify() {
CHECK(IsJSMessageObject());
- CHECK(type()->IsString());
- CHECK(arguments()->IsJSArray());
VerifyObjectField(kStartPositionOffset);
VerifyObjectField(kEndPositionOffset);
VerifyObjectField(kArgumentsOffset);
@@ -1070,6 +1054,11 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_objects_with_fast_properties_++;
info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
info->number_of_fast_unused_fields_ += map()->unused_property_fields();
+ } else if (IsGlobalObject()) {
+ GlobalDictionary* dict = global_dictionary();
+ info->number_of_slow_used_properties_ += dict->NumberOfElements();
+ info->number_of_slow_unused_properties_ +=
+ dict->Capacity() - dict->NumberOfElements();
} else {
NameDictionary* dict = property_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
@@ -1115,7 +1104,8 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
dict->Capacity() - dict->NumberOfElements();
break;
}
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
break;
}
}
@@ -1288,4 +1278,5 @@ void Code::VerifyEmbeddedObjects(VerifyMode mode) {
#endif // DEBUG
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index cc264f6aef..fbc2c4ee76 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -16,7 +16,6 @@
#include "src/base/bits.h"
#include "src/contexts.h"
#include "src/conversions-inl.h"
-#include "src/elements.h"
#include "src/factory.h"
#include "src/field-index-inl.h"
#include "src/heap/heap-inl.h"
@@ -142,8 +141,8 @@ int PropertyDetails::field_width_in_words() const {
bool Object::IsFixedArrayBase() const {
- return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() ||
- IsFixedTypedArrayBase() || IsExternalArray();
+ return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase() ||
+ IsExternalArray();
}
@@ -170,6 +169,7 @@ bool Object::IsHeapObject() const {
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
+TYPE_CHECKER(Float32x4, FLOAT32X4_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
@@ -275,6 +275,24 @@ bool Object::HasValidElements() {
}
+bool Object::KeyEquals(Object* second) {
+ Object* first = this;
+ if (second->IsNumber()) {
+ if (first->IsNumber()) return first->Number() == second->Number();
+ Object* temp = first;
+ first = second;
+ second = temp;
+ }
+ if (first->IsNumber()) {
+ DCHECK_LE(0, first->Number());
+ uint32_t expected = static_cast<uint32_t>(first->Number());
+ uint32_t index;
+ return Name::cast(second)->AsArrayIndex(&index) && index == expected;
+ }
+ return Name::cast(first)->Equals(Name::cast(second));
+}
+
+
Handle<Object> Object::NewStorageFor(Isolate* isolate,
Handle<Object> object,
Representation representation) {
@@ -700,7 +718,6 @@ TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE)
bool Object::IsJSWeakCollection() const {
@@ -873,6 +890,9 @@ bool Object::IsWeakHashTable() const {
}
+bool Object::IsWeakValueHashTable() const { return IsHashTable(); }
+
+
bool Object::IsDictionary() const {
return IsHashTable() &&
this != HeapObject::cast(this)->GetHeap()->string_table();
@@ -884,6 +904,9 @@ bool Object::IsNameDictionary() const {
}
+bool Object::IsGlobalDictionary() const { return IsDictionary(); }
+
+
bool Object::IsSeededNumberDictionary() const {
return IsDictionary();
}
@@ -1005,10 +1028,7 @@ bool Object::IsJSGlobalProxy() const {
bool Object::IsGlobalObject() const {
if (!IsHeapObject()) return false;
-
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return type == JS_GLOBAL_OBJECT_TYPE ||
- type == JS_BUILTINS_OBJECT_TYPE;
+ return HeapObject::cast(this)->map()->IsGlobalObjectMap();
}
@@ -1138,20 +1158,18 @@ bool Object::HasSpecificClassOf(String* name) {
MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
- Handle<Name> name) {
+ Handle<Name> name,
+ LanguageMode language_mode) {
LookupIterator it(object, name);
- return GetProperty(&it);
+ return GetProperty(&it, language_mode);
}
-MaybeHandle<Object> Object::GetElement(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
- // GetElement can trigger a getter which can cause allocation.
- // This was not always the case. This DCHECK is here to catch
- // leftover incorrect uses.
- DCHECK(AllowHeapAllocation::IsAllowed());
- return Object::GetElementWithReceiver(isolate, object, object, index);
+MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
+ uint32_t index,
+ LanguageMode language_mode) {
+ LookupIterator it(isolate, object, index);
+ return GetProperty(&it, language_mode);
}
@@ -1168,52 +1186,11 @@ Handle<Object> Object::GetPrototypeSkipHiddenPrototypes(
}
-MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
- Handle<Name> name) {
- uint32_t index;
- Isolate* isolate = name->GetIsolate();
- if (name->AsArrayIndex(&index)) return GetElement(isolate, object, index);
- return GetProperty(object, name);
-}
-
-
-MaybeHandle<Object> Object::GetProperty(Isolate* isolate,
- Handle<Object> object,
- const char* name) {
+MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
+ const char* name,
+ LanguageMode language_mode) {
Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
- DCHECK(!str.is_null());
-#ifdef DEBUG
- uint32_t index; // Assert that the name is not an array index.
- DCHECK(!str->AsArrayIndex(&index));
-#endif // DEBUG
- return GetProperty(object, str);
-}
-
-
-MaybeHandle<Object> JSProxy::GetElementWithHandler(Handle<JSProxy> proxy,
- Handle<Object> receiver,
- uint32_t index) {
- return GetPropertyWithHandler(
- proxy, receiver, proxy->GetIsolate()->factory()->Uint32ToString(index));
-}
-
-
-MaybeHandle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- uint32_t index,
- Handle<Object> value,
- LanguageMode language_mode) {
- Isolate* isolate = proxy->GetIsolate();
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return SetPropertyWithHandler(proxy, receiver, name, value, language_mode);
-}
-
-
-Maybe<bool> JSProxy::HasElementWithHandler(Handle<JSProxy> proxy,
- uint32_t index) {
- Isolate* isolate = proxy->GetIsolate();
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return HasPropertyWithHandler(proxy, name);
+ return GetProperty(object, str, language_mode);
}
@@ -1254,55 +1231,22 @@ Maybe<bool> JSProxy::HasElementWithHandler(Handle<JSProxy> proxy,
heap->RecordWrite(object->address(), offset); \
}
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- if (heap->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- } \
- }
-
-#ifndef V8_TARGET_ARCH_MIPS
- #define READ_DOUBLE_FIELD(p, offset) \
- (*reinterpret_cast<const double*>(FIELD_ADDR_CONST(p, offset)))
-#else // V8_TARGET_ARCH_MIPS
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned HeapNumber::value.
- static inline double read_double_field(const void* p, int offset) {
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = (*reinterpret_cast<const uint32_t*>(
- FIELD_ADDR_CONST(p, offset)));
- c.u[1] = (*reinterpret_cast<const uint32_t*>(
- FIELD_ADDR_CONST(p, offset + 4)));
- return c.d;
- }
- #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
-#endif // V8_TARGET_ARCH_MIPS
-
-#ifndef V8_TARGET_ARCH_MIPS
- #define WRITE_DOUBLE_FIELD(p, offset, value) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
-#else // V8_TARGET_ARCH_MIPS
- // Prevent gcc from using store-double (mips sdc1) on (possibly)
- // non-64-bit aligned HeapNumber::value.
- static inline void write_double_field(void* p, int offset,
- double value) {
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.d = value;
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0];
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1];
- }
- #define WRITE_DOUBLE_FIELD(p, offset, value) \
- write_double_field(p, offset, value)
-#endif // V8_TARGET_ARCH_MIPS
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ heap->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ } \
+ if (heap->InNewSpace(value)) { \
+ heap->RecordWrite(object->address(), offset); \
+ } \
+ }
+
+#define READ_DOUBLE_FIELD(p, offset) \
+ ReadDoubleValue(FIELD_ADDR_CONST(p, offset))
+#define WRITE_DOUBLE_FIELD(p, offset, value) \
+ WriteDoubleValue(FIELD_ADDR(p, offset), value)
#define READ_INT_FIELD(p, offset) \
(*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset)))
@@ -1328,6 +1272,12 @@ Maybe<bool> JSProxy::HasElementWithHandler(Handle<JSProxy> proxy,
#define WRITE_INT32_FIELD(p, offset, value) \
(*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
+#define READ_FLOAT_FIELD(p, offset) \
+ (*reinterpret_cast<const float*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_FLOAT_FIELD(p, offset, value) \
+ (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_UINT64_FIELD(p, offset) \
(*reinterpret_cast<const uint64_t*>(FIELD_ADDR_CONST(p, offset)))
@@ -1530,22 +1480,39 @@ int HeapObject::Size() {
}
-bool HeapObject::MayContainRawValues() {
+HeapObjectContents HeapObject::ContentType() {
InstanceType type = map()->instance_type();
if (type <= LAST_NAME_TYPE) {
if (type == SYMBOL_TYPE) {
- return false;
+ return HeapObjectContents::kTaggedValues;
}
DCHECK(type < FIRST_NONSTRING_TYPE);
// There are four string representations: sequential strings, external
// strings, cons strings, and sliced strings.
// Only the former two contain raw values and no heap pointers (besides the
// map-word).
- return ((type & kIsIndirectStringMask) != kIsIndirectStringTag);
+ if (((type & kIsIndirectStringMask) != kIsIndirectStringTag))
+ return HeapObjectContents::kRawValues;
+ else
+ return HeapObjectContents::kTaggedValues;
+#if 0
+ // TODO(jochen): Enable eventually.
+ } else if (type == JS_FUNCTION_TYPE) {
+ return HeapObjectContents::kMixedValues;
+#endif
+ } else if (type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
+ type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
+ return HeapObjectContents::kMixedValues;
+ } else if (type <= LAST_DATA_TYPE) {
+ // TODO(jochen): Why do we claim that Code and Map contain only raw values?
+ return HeapObjectContents::kRawValues;
+ } else {
+ if (FLAG_unbox_double_fields) {
+ LayoutDescriptorHelper helper(map());
+ if (!helper.all_fields_tagged()) return HeapObjectContents::kMixedValues;
+ }
+ return HeapObjectContents::kTaggedValues;
}
- // The ConstantPoolArray contains heap pointers, but also raw values.
- if (type == CONSTANT_POOL_ARRAY_TYPE) return true;
- return (type <= LAST_DATA_TYPE);
}
@@ -1586,6 +1553,30 @@ int HeapNumber::get_sign() {
}
+float Float32x4::get_lane(int lane) const {
+ DCHECK(lane < 4 && lane >= 0);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ return READ_FLOAT_FIELD(this, kValueOffset + lane * kFloatSize);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ return READ_FLOAT_FIELD(this, kValueOffset + (3 - lane) * kFloatSize);
+#else
+#error Unknown byte ordering
+#endif
+}
+
+
+void Float32x4::set_lane(int lane, float value) {
+ DCHECK(lane < 4 && lane >= 0);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ WRITE_FLOAT_FIELD(this, kValueOffset + lane * kFloatSize, value);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ WRITE_FLOAT_FIELD(this, kValueOffset + (3 - lane) * kFloatSize, value);
+#else
+#error Unknown byte ordering
+#endif
+}
+
+
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
@@ -1611,16 +1602,6 @@ FixedArrayBase* JSObject::elements() const {
}
-void JSObject::ValidateElements(Handle<JSObject> object) {
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- ElementsAccessor* accessor = object->GetElementsAccessor();
- accessor->Validate(object);
- }
-#endif
-}
-
-
void AllocationSite::Initialize() {
set_transition_info(Smi::FromInt(0));
SetElementsKind(GetInitialFastElementsKind());
@@ -1850,7 +1831,7 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
bool JSObject::WouldConvertToSlowElements(Handle<Object> key) {
- uint32_t index;
+ uint32_t index = 0;
return key->ToArrayIndex(&index) && WouldConvertToSlowElements(index);
}
@@ -1904,6 +1885,7 @@ void Oddball::set_kind(byte value) {
ACCESSORS(Cell, value, Object, kValueOffset)
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
+ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset)
ACCESSORS(PropertyCell, value, Object, kValueOffset)
Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
@@ -1941,6 +1923,14 @@ void WeakCell::set_next(Object* val, WriteBarrierMode mode) {
}
+void WeakCell::clear_next(Heap* heap) {
+ set_next(heap->the_hole_value(), SKIP_WRITE_BARRIER);
+}
+
+
+bool WeakCell::next_cleared() { return next()->IsTheHole(); }
+
+
int JSObject::GetHeaderSize() {
InstanceType type = map()->instance_type();
// Check for the most common kind of JavaScript object before
@@ -2178,14 +2168,6 @@ bool JSObject::HasFastProperties() {
}
-MaybeHandle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- LanguageMode language_mode) {
- return JSObject::SetOwnElement(object, index, value, NONE, language_mode);
-}
-
-
bool Map::TooManyFastProperties(StoreFromKeyed store_mode) {
if (unused_property_fields() != 0) return false;
if (is_prototype_map()) return false;
@@ -2204,7 +2186,7 @@ void Struct::InitializeBody(int object_size) {
}
-bool Object::ToArrayIndex(uint32_t* index) {
+bool Object::ToArrayLength(uint32_t* index) {
if (IsSmi()) {
int value = Smi::cast(this)->value();
if (value < 0) return false;
@@ -2223,6 +2205,11 @@ bool Object::ToArrayIndex(uint32_t* index) {
}
+bool Object::ToArrayIndex(uint32_t* index) {
+ return ToArrayLength(index) && *index != kMaxUInt32;
+}
+
+
bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
if (!this->IsJSValue()) return false;
@@ -2421,387 +2408,6 @@ void ArrayList::Clear(int index, Object* undefined) {
}
-void ConstantPoolArray::NumberOfEntries::increment(Type type) {
- DCHECK(type < NUMBER_OF_TYPES);
- element_counts_[type]++;
-}
-
-
-int ConstantPoolArray::NumberOfEntries::equals(
- const ConstantPoolArray::NumberOfEntries& other) const {
- for (int i = 0; i < NUMBER_OF_TYPES; i++) {
- if (element_counts_[i] != other.element_counts_[i]) return false;
- }
- return true;
-}
-
-
-bool ConstantPoolArray::NumberOfEntries::is_empty() const {
- return total_count() == 0;
-}
-
-
-int ConstantPoolArray::NumberOfEntries::count_of(Type type) const {
- DCHECK(type < NUMBER_OF_TYPES);
- return element_counts_[type];
-}
-
-
-int ConstantPoolArray::NumberOfEntries::base_of(Type type) const {
- int base = 0;
- DCHECK(type < NUMBER_OF_TYPES);
- for (int i = 0; i < type; i++) {
- base += element_counts_[i];
- }
- return base;
-}
-
-
-int ConstantPoolArray::NumberOfEntries::total_count() const {
- int count = 0;
- for (int i = 0; i < NUMBER_OF_TYPES; i++) {
- count += element_counts_[i];
- }
- return count;
-}
-
-
-int ConstantPoolArray::NumberOfEntries::are_in_range(int min, int max) const {
- for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) {
- if (element_counts_[i] < min || element_counts_[i] > max) {
- return false;
- }
- }
- return true;
-}
-
-
-int ConstantPoolArray::Iterator::next_index() {
- DCHECK(!is_finished());
- int ret = next_index_++;
- update_section();
- return ret;
-}
-
-
-bool ConstantPoolArray::Iterator::is_finished() {
- return next_index_ > array_->last_index(type_, final_section_);
-}
-
-
-void ConstantPoolArray::Iterator::update_section() {
- if (next_index_ > array_->last_index(type_, current_section_) &&
- current_section_ != final_section_) {
- DCHECK(final_section_ == EXTENDED_SECTION);
- current_section_ = EXTENDED_SECTION;
- next_index_ = array_->first_index(type_, EXTENDED_SECTION);
- }
-}
-
-
-bool ConstantPoolArray::is_extended_layout() {
- uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
- return IsExtendedField::decode(small_layout_1);
-}
-
-
-ConstantPoolArray::LayoutSection ConstantPoolArray::final_section() {
- return is_extended_layout() ? EXTENDED_SECTION : SMALL_SECTION;
-}
-
-
-int ConstantPoolArray::first_extended_section_index() {
- DCHECK(is_extended_layout());
- uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
- return TotalCountField::decode(small_layout_2);
-}
-
-
-int ConstantPoolArray::get_extended_section_header_offset() {
- return RoundUp(SizeFor(NumberOfEntries(this, SMALL_SECTION)), kInt64Size);
-}
-
-
-ConstantPoolArray::WeakObjectState ConstantPoolArray::get_weak_object_state() {
- uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
- return WeakObjectStateField::decode(small_layout_2);
-}
-
-
-void ConstantPoolArray::set_weak_object_state(
- ConstantPoolArray::WeakObjectState state) {
- uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
- small_layout_2 = WeakObjectStateField::update(small_layout_2, state);
- WRITE_INT32_FIELD(this, kSmallLayout2Offset, small_layout_2);
-}
-
-
-int ConstantPoolArray::first_index(Type type, LayoutSection section) {
- int index = 0;
- if (section == EXTENDED_SECTION) {
- DCHECK(is_extended_layout());
- index += first_extended_section_index();
- }
-
- for (Type type_iter = FIRST_TYPE; type_iter < type;
- type_iter = next_type(type_iter)) {
- index += number_of_entries(type_iter, section);
- }
-
- return index;
-}
-
-
-int ConstantPoolArray::last_index(Type type, LayoutSection section) {
- return first_index(type, section) + number_of_entries(type, section) - 1;
-}
-
-
-int ConstantPoolArray::number_of_entries(Type type, LayoutSection section) {
- if (section == SMALL_SECTION) {
- uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
- uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
- switch (type) {
- case INT64:
- return Int64CountField::decode(small_layout_1);
- case CODE_PTR:
- return CodePtrCountField::decode(small_layout_1);
- case HEAP_PTR:
- return HeapPtrCountField::decode(small_layout_1);
- case INT32:
- return Int32CountField::decode(small_layout_2);
- default:
- UNREACHABLE();
- return 0;
- }
- } else {
- DCHECK(section == EXTENDED_SECTION && is_extended_layout());
- int offset = get_extended_section_header_offset();
- switch (type) {
- case INT64:
- offset += kExtendedInt64CountOffset;
- break;
- case CODE_PTR:
- offset += kExtendedCodePtrCountOffset;
- break;
- case HEAP_PTR:
- offset += kExtendedHeapPtrCountOffset;
- break;
- case INT32:
- offset += kExtendedInt32CountOffset;
- break;
- default:
- UNREACHABLE();
- }
- return READ_INT_FIELD(this, offset);
- }
-}
-
-
-bool ConstantPoolArray::offset_is_type(int offset, Type type) {
- return (offset >= OffsetOfElementAt(first_index(type, SMALL_SECTION)) &&
- offset <= OffsetOfElementAt(last_index(type, SMALL_SECTION))) ||
- (is_extended_layout() &&
- offset >= OffsetOfElementAt(first_index(type, EXTENDED_SECTION)) &&
- offset <= OffsetOfElementAt(last_index(type, EXTENDED_SECTION)));
-}
-
-
-ConstantPoolArray::Type ConstantPoolArray::get_type(int index) {
- LayoutSection section;
- if (is_extended_layout() && index >= first_extended_section_index()) {
- section = EXTENDED_SECTION;
- } else {
- section = SMALL_SECTION;
- }
-
- Type type = FIRST_TYPE;
- while (index > last_index(type, section)) {
- type = next_type(type);
- }
- DCHECK(type <= LAST_TYPE);
- return type;
-}
-
-
-int64_t ConstantPoolArray::get_int64_entry(int index) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(get_type(index) == INT64);
- return READ_INT64_FIELD(this, OffsetOfElementAt(index));
-}
-
-
-double ConstantPoolArray::get_int64_entry_as_double(int index) {
- STATIC_ASSERT(kDoubleSize == kInt64Size);
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(get_type(index) == INT64);
- return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
-}
-
-
-Address ConstantPoolArray::get_code_ptr_entry(int index) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(get_type(index) == CODE_PTR);
- return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index)));
-}
-
-
-Object* ConstantPoolArray::get_heap_ptr_entry(int index) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(get_type(index) == HEAP_PTR);
- return READ_FIELD(this, OffsetOfElementAt(index));
-}
-
-
-int32_t ConstantPoolArray::get_int32_entry(int index) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(get_type(index) == INT32);
- return READ_INT32_FIELD(this, OffsetOfElementAt(index));
-}
-
-
-void ConstantPoolArray::set(int index, int64_t value) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(get_type(index) == INT64);
- WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
-}
-
-
-void ConstantPoolArray::set(int index, double value) {
- STATIC_ASSERT(kDoubleSize == kInt64Size);
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(get_type(index) == INT64);
- WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
-}
-
-
-void ConstantPoolArray::set(int index, Address value) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(get_type(index) == CODE_PTR);
- WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value));
-}
-
-
-void ConstantPoolArray::set(int index, Object* value) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(!GetHeap()->InNewSpace(value));
- DCHECK(get_type(index) == HEAP_PTR);
- WRITE_FIELD(this, OffsetOfElementAt(index), value);
- WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
-}
-
-
-void ConstantPoolArray::set(int index, int32_t value) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(get_type(index) == INT32);
- WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value);
-}
-
-
-void ConstantPoolArray::set_at_offset(int offset, int32_t value) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(offset_is_type(offset, INT32));
- WRITE_INT32_FIELD(this, offset, value);
-}
-
-
-void ConstantPoolArray::set_at_offset(int offset, int64_t value) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(offset_is_type(offset, INT64));
- WRITE_INT64_FIELD(this, offset, value);
-}
-
-
-void ConstantPoolArray::set_at_offset(int offset, double value) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(offset_is_type(offset, INT64));
- WRITE_DOUBLE_FIELD(this, offset, value);
-}
-
-
-void ConstantPoolArray::set_at_offset(int offset, Address value) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(offset_is_type(offset, CODE_PTR));
- WRITE_FIELD(this, offset, reinterpret_cast<Object*>(value));
- WRITE_BARRIER(GetHeap(), this, offset, reinterpret_cast<Object*>(value));
-}
-
-
-void ConstantPoolArray::set_at_offset(int offset, Object* value) {
- DCHECK(map() == GetHeap()->constant_pool_array_map());
- DCHECK(!GetHeap()->InNewSpace(value));
- DCHECK(offset_is_type(offset, HEAP_PTR));
- WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
-}
-
-
-void ConstantPoolArray::Init(const NumberOfEntries& small) {
- uint32_t small_layout_1 =
- Int64CountField::encode(small.count_of(INT64)) |
- CodePtrCountField::encode(small.count_of(CODE_PTR)) |
- HeapPtrCountField::encode(small.count_of(HEAP_PTR)) |
- IsExtendedField::encode(false);
- uint32_t small_layout_2 =
- Int32CountField::encode(small.count_of(INT32)) |
- TotalCountField::encode(small.total_count()) |
- WeakObjectStateField::encode(NO_WEAK_OBJECTS);
- WRITE_UINT32_FIELD(this, kSmallLayout1Offset, small_layout_1);
- WRITE_UINT32_FIELD(this, kSmallLayout2Offset, small_layout_2);
- if (kHeaderSize != kFirstEntryOffset) {
- DCHECK(kFirstEntryOffset - kHeaderSize == kInt32Size);
- WRITE_UINT32_FIELD(this, kHeaderSize, 0); // Zero out header padding.
- }
-}
-
-
-void ConstantPoolArray::InitExtended(const NumberOfEntries& small,
- const NumberOfEntries& extended) {
- // Initialize small layout fields first.
- Init(small);
-
- // Set is_extended_layout field.
- uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
- small_layout_1 = IsExtendedField::update(small_layout_1, true);
- WRITE_INT32_FIELD(this, kSmallLayout1Offset, small_layout_1);
-
- // Initialize the extended layout fields.
- int extended_header_offset = get_extended_section_header_offset();
- WRITE_INT32_FIELD(this, extended_header_offset + kExtendedInt64CountOffset,
- extended.count_of(INT64));
- WRITE_INT32_FIELD(this, extended_header_offset + kExtendedCodePtrCountOffset,
- extended.count_of(CODE_PTR));
- WRITE_INT32_FIELD(this, extended_header_offset + kExtendedHeapPtrCountOffset,
- extended.count_of(HEAP_PTR));
- WRITE_INT32_FIELD(this, extended_header_offset + kExtendedInt32CountOffset,
- extended.count_of(INT32));
-}
-
-
-int ConstantPoolArray::size() {
- NumberOfEntries small(this, SMALL_SECTION);
- if (!is_extended_layout()) {
- return SizeFor(small);
- } else {
- NumberOfEntries extended(this, EXTENDED_SECTION);
- return SizeForExtended(small, extended);
- }
-}
-
-
-int ConstantPoolArray::length() {
- uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
- int length = TotalCountField::decode(small_layout_2);
- if (is_extended_layout()) {
- length += number_of_entries(INT64, EXTENDED_SECTION) +
- number_of_entries(CODE_PTR, EXTENDED_SECTION) +
- number_of_entries(HEAP_PTR, EXTENDED_SECTION) +
- number_of_entries(INT32, EXTENDED_SECTION);
- }
- return length;
-}
-
-
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
@@ -2811,14 +2417,16 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
}
-bool HeapObject::NeedsToEnsureDoubleAlignment() {
-#ifndef V8_HOST_ARCH_64_BIT
- return (IsFixedFloat64Array() || IsFixedDoubleArray() ||
- IsConstantPoolArray()) &&
- FixedArrayBase::cast(this)->length() != 0;
-#else
- return false;
-#endif // V8_HOST_ARCH_64_BIT
+AllocationAlignment HeapObject::RequiredAlignment() {
+#ifdef V8_HOST_ARCH_32_BIT
+ if ((IsFixedFloat64Array() || IsFixedDoubleArray()) &&
+ FixedArrayBase::cast(this)->length() != 0) {
+ return kDoubleAligned;
+ }
+ if (IsHeapNumber()) return kDoubleUnaligned;
+ if (IsFloat32x4()) return kSimd128Unaligned;
+#endif // V8_HOST_ARCH_32_BIT
+ return kWordAligned;
}
@@ -3277,11 +2885,18 @@ int HashTable<Derived, Shape, Key>::FindEntry(Key key) {
}
-// Find entry for key otherwise return kNotFound.
template<typename Derived, typename Shape, typename Key>
int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key) {
+ return FindEntry(isolate, key, HashTable::Hash(key));
+}
+
+
+// Find entry for key otherwise return kNotFound.
+template <typename Derived, typename Shape, typename Key>
+int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key,
+ int32_t hash) {
uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(HashTable::Hash(key), capacity);
+ uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
while (true) {
@@ -3304,6 +2919,7 @@ bool SeededNumberDictionary::requires_slow_elements() {
(Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
}
+
uint32_t SeededNumberDictionary::max_number_key() {
DCHECK(!requires_slow_elements());
Object* max_index_object = get(kMaxNumberKeyIndex);
@@ -3312,6 +2928,7 @@ uint32_t SeededNumberDictionary::max_number_key() {
return value >> kRequiresSlowElementsTagSize;
}
+
void SeededNumberDictionary::set_requires_slow_elements() {
set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
}
@@ -3329,7 +2946,6 @@ CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeCacheHashTable)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(ConsString)
-CAST_ACCESSOR(ConstantPoolArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
@@ -3351,7 +2967,9 @@ CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(Float32x4)
CAST_ACCESSOR(Foreign)
+CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(GlobalObject)
CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
@@ -3409,6 +3027,7 @@ CAST_ACCESSOR(UnseededNumberDictionary)
CAST_ACCESSOR(WeakCell)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
+CAST_ACCESSOR(WeakValueHashTable)
// static
@@ -3541,7 +3160,6 @@ bool Name::Equals(Handle<Name> one, Handle<Name> two) {
ACCESSORS(Symbol, name, Object, kNameOffset)
ACCESSORS(Symbol, flags, Smi, kFlagsOffset)
BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
-BOOL_ACCESSORS(Symbol, flags, is_own, kOwnBit)
bool String::Equals(String* other) {
@@ -3570,6 +3188,12 @@ Handle<String> String::Flatten(Handle<String> string, PretenureFlag pretenure) {
}
+Handle<Name> Name::Flatten(Handle<Name> name, PretenureFlag pretenure) {
+ if (name->IsSymbol()) return name;
+ return String::Flatten(Handle<String>::cast(name));
+}
+
+
uint16_t String::Get(int index) {
DCHECK(index >= 0 && index < length());
switch (StringShape(this).full_representation_tag()) {
@@ -4223,6 +3847,9 @@ void ExternalFloat64Array::set(int index, double value) {
}
+ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
+
+
void* FixedTypedArrayBase::DataPtr() {
return FIELD_ADDR(this, kDataOffset);
}
@@ -4371,28 +3998,20 @@ Handle<Object> FixedTypedArray<Traits>::get(
template <class Traits>
-Handle<Object> FixedTypedArray<Traits>::SetValue(
- Handle<JSObject> holder, Handle<FixedTypedArray<Traits> > array,
- uint32_t index, Handle<Object> value) {
+void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
ElementType cast_value = Traits::defaultValue();
- Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(holder);
- if (!view->WasNeutered()) {
- if (index < static_cast<uint32_t>(array->length())) {
- if (value->IsSmi()) {
- int int_value = Handle<Smi>::cast(value)->value();
- cast_value = from_int(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = Handle<HeapNumber>::cast(value)->value();
- cast_value = from_double(double_value);
- } else {
- // Clamp undefined to the default value. All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- array->set(index, cast_value);
- }
+ if (value->IsSmi()) {
+ int int_value = Smi::cast(value)->value();
+ cast_value = from_int(int_value);
+ } else if (value->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value)->value();
+ cast_value = from_double(double_value);
+ } else {
+ // Clamp undefined to the default value. All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
}
- return Traits::ToHandle(array->GetIsolate(), cast_value);
+ set(index, cast_value);
}
@@ -4517,9 +4136,6 @@ int HeapObject::SizeFromMap(Map* map) {
return FixedDoubleArray::SizeFor(
reinterpret_cast<FixedDoubleArray*>(this)->length());
}
- if (instance_type == CONSTANT_POOL_ARRAY_TYPE) {
- return reinterpret_cast<ConstantPoolArray*>(this)->size();
- }
if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
return reinterpret_cast<FixedTypedArrayBase*>(
@@ -4706,6 +4322,16 @@ bool Map::is_migration_target() {
}
+void Map::set_is_strong() {
+ set_bit_field3(IsStrong::update(bit_field3(), true));
+}
+
+
+bool Map::is_strong() {
+ return IsStrong::decode(bit_field3());
+}
+
+
void Map::set_counter(int value) {
set_bit_field3(Counter::update(bit_field3(), value));
}
@@ -4899,18 +4525,6 @@ inline void Code::set_can_have_weak_objects(bool value) {
}
-bool Code::optimizable() {
- DCHECK_EQ(FUNCTION, kind());
- return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
-}
-
-
-void Code::set_optimizable(bool value) {
- DCHECK_EQ(FUNCTION, kind());
- WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
-}
-
-
bool Code::has_deoptimization_support() {
DCHECK_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
@@ -5067,9 +4681,7 @@ bool Code::back_edges_patched_for_osr() {
}
-byte Code::to_boolean_state() {
- return extra_ic_state();
-}
+uint16_t Code::to_boolean_state() { return extra_ic_state(); }
bool Code::has_function_cache() {
@@ -5124,15 +4736,15 @@ bool Code::is_debug_stub() {
}
-ConstantPoolArray* Code::constant_pool() {
- return ConstantPoolArray::cast(READ_FIELD(this, kConstantPoolOffset));
-}
-
-
-void Code::set_constant_pool(Object* value) {
- DCHECK(value->IsConstantPoolArray());
- WRITE_FIELD(this, kConstantPoolOffset, value);
- WRITE_BARRIER(GetHeap(), this, kConstantPoolOffset, value);
+Address Code::constant_pool() {
+ Address constant_pool = NULL;
+ if (FLAG_enable_embedded_constant_pool) {
+ int offset = constant_pool_offset();
+ if (offset < instruction_size()) {
+ constant_pool = FIELD_ADDR(this, kHeaderSize + offset);
+ }
+ }
+ return constant_pool;
}
@@ -5218,7 +4830,6 @@ Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
bool Code::IsWeakObjectInOptimizedCode(Object* object) {
- if (!FLAG_collect_maps) return false;
if (object->IsMap()) {
return Map::cast(object)->CanTransition() &&
FLAG_weak_embedded_maps_in_optimized_code;
@@ -5455,6 +5066,7 @@ ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
ACCESSORS(Box, value, Object, kValueOffset)
ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
+SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
ACCESSORS(PrototypeInfo, constructor_name, Object, kConstructorNameOffset)
@@ -5481,6 +5093,7 @@ ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
+SMI_ACCESSORS(TemplateInfo, number_of_properties, kNumberOfProperties)
ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
ACCESSORS(TemplateInfo, property_accessors, Object, kPropertyAccessorsOffset)
@@ -5531,10 +5144,8 @@ ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
ACCESSORS_TO_SMI(Script, eval_from_instructions_offset,
kEvalFrominstructionsOffsetOffset)
+ACCESSORS(Script, shared_function_infos, Object, kSharedFunctionInfosOffset)
ACCESSORS_TO_SMI(Script, flags, kFlagsOffset)
-BOOL_ACCESSORS(Script, flags, is_embedder_debug_script,
- kIsEmbedderDebugScriptBit)
-BOOL_ACCESSORS(Script, flags, is_shared_cross_origin, kIsSharedCrossOriginBit)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
@@ -5554,6 +5165,15 @@ void Script::set_compilation_state(CompilationState state) {
set_flags(BooleanBit::set(flags(), kCompilationStateBit,
state == COMPILATION_STATE_COMPILED));
}
+ScriptOriginOptions Script::origin_options() {
+ return ScriptOriginOptions((flags()->value() & kOriginOptionsMask) >>
+ kOriginOptionsShift);
+}
+void Script::set_origin_options(ScriptOriginOptions origin_options) {
+ DCHECK(!(origin_options.Flags() & ~((1 << kOriginOptionsSize) - 1)));
+ set_flags(Smi::FromInt((flags()->value() & ~kOriginOptionsMask) |
+ (origin_options.Flags() << kOriginOptionsShift)));
+}
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
@@ -5619,6 +5239,8 @@ BOOL_ACCESSORS(SharedFunctionInfo,
kHasDuplicateParameters)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, deserialized, kDeserialized)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, never_compiled,
+ kNeverCompiled)
#if V8_HOST_ARCH_32_BIT
@@ -5716,11 +5338,6 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
set_compiler_hints(BooleanBit::set(compiler_hints(),
kOptimizationDisabled,
disable));
- // If disabling optimizations we reflect that in the code object so
- // it will not be counted as optimizable code.
- if ((code()->kind() == Code::FUNCTION) && disable) {
- code()->set_optimizable(false);
- }
}
@@ -5757,18 +5374,18 @@ void SharedFunctionInfo::set_kind(FunctionKind kind) {
}
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_super_property,
- kUsesSuperProperty)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
+ kNeedsHomeObject)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
- kInlineBuiltin)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
name_should_print_as_anonymous,
kNameShouldPrintAsAnonymous)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
+ kDontCrankshaft)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
@@ -5838,6 +5455,8 @@ void SharedFunctionInfo::ReplaceCode(Code* value) {
DCHECK(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
set_code(value);
+
+ if (is_compiled()) set_never_compiled(false);
}
@@ -5962,7 +5581,6 @@ void SharedFunctionInfo::TryReenableOptimization() {
set_optimization_disabled(false);
set_opt_count(0);
set_deopt_count(0);
- code()->set_optimizable(true);
}
}
@@ -5988,6 +5606,11 @@ bool JSFunction::IsFromExtensionScript() {
}
+bool JSFunction::IsSubjectToDebugging() {
+ return !IsFromNativeScript() && !IsFromExtensionScript();
+}
+
+
bool JSFunction::NeedsArgumentsAdaption() {
return shared()->internal_formal_parameter_count() !=
SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -5999,11 +5622,6 @@ bool JSFunction::IsOptimized() {
}
-bool JSFunction::IsOptimizable() {
- return code()->kind() == Code::FUNCTION && code()->optimizable();
-}
-
-
bool JSFunction::IsMarkedForOptimization() {
return code() == GetIsolate()->builtins()->builtin(
Builtins::kCompileOptimized);
@@ -6304,8 +5922,8 @@ ACCESSORS(JSDate, min, Object, kMinOffset)
ACCESSORS(JSDate, sec, Object, kSecOffset)
-ACCESSORS(JSMessageObject, type, String, kTypeOffset)
-ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
+SMI_ACCESSORS(JSMessageObject, type, kTypeOffset)
+ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
@@ -6314,6 +5932,7 @@ SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
+INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
@@ -6325,7 +5944,6 @@ void Code::WipeOutHeader() {
WRITE_FIELD(this, kRelocationInfoOffset, NULL);
WRITE_FIELD(this, kHandlerTableOffset, NULL);
WRITE_FIELD(this, kDeoptimizationDataOffset, NULL);
- WRITE_FIELD(this, kConstantPoolOffset, NULL);
// Do not wipe out major/minor keys on a code stub or IC
if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
@@ -6465,6 +6083,14 @@ void JSArrayBuffer::set_was_neutered(bool value) {
}
+bool JSArrayBuffer::is_shared() { return IsShared::decode(bit_field()); }
+
+
+void JSArrayBuffer::set_is_shared(bool value) {
+ set_bit_field(IsShared::update(bit_field(), value));
+}
+
+
Object* JSArrayBufferView::byte_offset() const {
if (WasNeutered()) return Smi::FromInt(0);
return Object::cast(READ_FIELD(this, kByteOffsetOffset));
@@ -6507,6 +6133,14 @@ Object* JSTypedArray::length() const {
}
+uint32_t JSTypedArray::length_value() const {
+ if (WasNeutered()) return 0;
+ uint32_t index = 0;
+ CHECK(Object::cast(READ_FIELD(this, kLengthOffset))->ToArrayLength(&index));
+ return index;
+}
+
+
void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kLengthOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
@@ -6591,7 +6225,7 @@ ElementsKind JSObject::GetElementsKind() {
fixed_array->IsFixedArray() &&
fixed_array->IsDictionary()) ||
(kind > DICTIONARY_ELEMENTS));
- DCHECK((kind != SLOPPY_ARGUMENTS_ELEMENTS) ||
+ DCHECK(!IsSloppyArgumentsElements(kind) ||
(elements()->IsFixedArray() && elements()->length() >= 2));
}
#endif
@@ -6599,11 +6233,6 @@ ElementsKind JSObject::GetElementsKind() {
}
-ElementsAccessor* JSObject::GetElementsAccessor() {
- return ElementsAccessor::ForKind(GetElementsKind());
-}
-
-
bool JSObject::HasFastObjectElements() {
return IsFastObjectElementsKind(GetElementsKind());
}
@@ -6639,8 +6268,18 @@ bool JSObject::HasDictionaryElements() {
}
+bool JSObject::HasFastArgumentsElements() {
+ return GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+
+bool JSObject::HasSlowArgumentsElements() {
+ return GetElementsKind() == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+
bool JSObject::HasSloppyArgumentsElements() {
- return GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS;
+ return IsSloppyArgumentsElements(GetElementsKind());
}
@@ -6698,10 +6337,18 @@ bool JSObject::HasIndexedInterceptor() {
NameDictionary* JSObject::property_dictionary() {
DCHECK(!HasFastProperties());
+ DCHECK(!IsGlobalObject());
return NameDictionary::cast(properties());
}
+GlobalDictionary* JSObject::global_dictionary() {
+ DCHECK(!HasFastProperties());
+ DCHECK(IsGlobalObject());
+ return GlobalDictionary::cast(properties());
+}
+
+
SeededNumberDictionary* JSObject::element_dictionary() {
DCHECK(HasDictionaryElements());
return SeededNumberDictionary::cast(elements());
@@ -6726,8 +6373,9 @@ uint32_t Name::Hash() {
return String::cast(this)->ComputeAndSetHash();
}
-bool Name::IsOwn() {
- return this->IsSymbol() && Symbol::cast(this)->is_own();
+
+bool Name::IsPrivate() {
+ return this->IsSymbol() && Symbol::cast(this)->is_private();
}
@@ -6810,7 +6458,7 @@ bool StringHasher::UpdateIndex(uint16_t c) {
return false;
}
}
- if (array_index_ > 429496729U - ((d + 2) >> 3)) {
+ if (array_index_ > 429496729U - ((d + 3) >> 3)) {
is_array_index_ = false;
return false;
}
@@ -6912,12 +6560,23 @@ String* String::GetForwardedInternalizedString() {
}
+MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
+ Handle<Name> name,
+ LanguageMode language_mode) {
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
+ return GetProperty(&it, language_mode);
+}
+
+
Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
Handle<Name> name) {
+ // Call the "has" trap on proxies.
if (object->IsJSProxy()) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
+
Maybe<PropertyAttributes> result = GetPropertyAttributes(object, name);
return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
}
@@ -6925,34 +6584,75 @@ Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
Handle<Name> name) {
+ // Call the "has" trap on proxies.
if (object->IsJSProxy()) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
+
Maybe<PropertyAttributes> result = GetOwnPropertyAttributes(object, name);
return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
}
Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
- Handle<JSReceiver> object, Handle<Name> key) {
- uint32_t index;
- if (object->IsJSObject() && key->AsArrayIndex(&index)) {
- return GetElementAttribute(object, index);
- }
- LookupIterator it(object, key);
+ Handle<JSReceiver> object, Handle<Name> name) {
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
return GetPropertyAttributes(&it);
}
-Maybe<PropertyAttributes> JSReceiver::GetElementAttribute(
- Handle<JSReceiver> object, uint32_t index) {
+Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
+ Handle<JSReceiver> object, Handle<Name> name) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ name->GetIsolate(), object, name, LookupIterator::HIDDEN);
+ return GetPropertyAttributes(&it);
+}
+
+
+Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
+ // Call the "has" trap on proxies.
if (object->IsJSProxy()) {
- return JSProxy::GetElementAttributeWithHandler(
- Handle<JSProxy>::cast(object), object, index);
+ Isolate* isolate = object->GetIsolate();
+ Handle<Name> name = isolate->factory()->Uint32ToString(index);
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return JSObject::GetElementAttributeWithReceiver(
- Handle<JSObject>::cast(object), object, index, true);
+
+ Maybe<PropertyAttributes> result = GetElementAttributes(object, index);
+ return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+}
+
+
+Maybe<bool> JSReceiver::HasOwnElement(Handle<JSReceiver> object,
+ uint32_t index) {
+ // Call the "has" trap on proxies.
+ if (object->IsJSProxy()) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<Name> name = isolate->factory()->Uint32ToString(index);
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
+ }
+
+ Maybe<PropertyAttributes> result = GetOwnElementAttributes(object, index);
+ return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+}
+
+
+Maybe<PropertyAttributes> JSReceiver::GetElementAttributes(
+ Handle<JSReceiver> object, uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ LookupIterator it(isolate, object, index);
+ return GetPropertyAttributes(&it);
+}
+
+
+Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
+ Handle<JSReceiver> object, uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ LookupIterator it(isolate, object, index, LookupIterator::HIDDEN);
+ return GetPropertyAttributes(&it);
}
@@ -6982,40 +6682,6 @@ Object* JSReceiver::GetIdentityHash() {
}
-Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
- if (object->IsJSProxy()) {
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasElementWithHandler(proxy, index);
- }
- Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver(
- Handle<JSObject>::cast(object), object, index, true);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
-}
-
-
-Maybe<bool> JSReceiver::HasOwnElement(Handle<JSReceiver> object,
- uint32_t index) {
- if (object->IsJSProxy()) {
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasElementWithHandler(proxy, index);
- }
- Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver(
- Handle<JSObject>::cast(object), object, index, false);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
-}
-
-
-Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttribute(
- Handle<JSReceiver> object, uint32_t index) {
- if (object->IsJSProxy()) {
- return JSProxy::GetElementAttributeWithHandler(
- Handle<JSProxy>::cast(object), object, index);
- }
- return JSObject::GetElementAttributeWithReceiver(
- Handle<JSObject>::cast(object), object, index, false);
-}
-
-
bool AccessorInfo::all_can_read() {
return BooleanBit::get(flag(), kAllCanReadBit);
}
@@ -7036,6 +6702,16 @@ void AccessorInfo::set_all_can_write(bool value) {
}
+bool AccessorInfo::is_special_data_property() {
+ return BooleanBit::get(flag(), kSpecialDataProperty);
+}
+
+
+void AccessorInfo::set_is_special_data_property(bool value) {
+ set_flag(BooleanBit::set(flag(), kSpecialDataProperty, value));
+}
+
+
PropertyAttributes AccessorInfo::property_attributes() {
return AttributesField::decode(static_cast<uint32_t>(flag()->value()));
}
@@ -7054,20 +6730,11 @@ bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
}
-// static
-void ExecutableAccessorInfo::ClearSetter(Handle<ExecutableAccessorInfo> info) {
- auto foreign = info->GetIsolate()->factory()->NewForeign(
- reinterpret_cast<v8::internal::Address>(
- reinterpret_cast<intptr_t>(nullptr)));
- info->set_setter(*foreign);
-}
-
-
template<typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
Handle<Object> key,
Handle<Object> value) {
- SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0)));
+ this->SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0)));
}
@@ -7076,13 +6743,40 @@ void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
Handle<Object> key,
Handle<Object> value,
PropertyDetails details) {
+ Shape::SetEntry(static_cast<Derived*>(this), entry, key, value, details);
+}
+
+
+template <typename Key>
+template <typename Dictionary>
+void BaseDictionaryShape<Key>::SetEntry(Dictionary* dict, int entry,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyDetails details) {
+ STATIC_ASSERT(Dictionary::kEntrySize == 3);
DCHECK(!key->IsName() || details.dictionary_index() > 0);
- int index = DerivedHashTable::EntryToIndex(entry);
+ int index = dict->EntryToIndex(entry);
DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
- FixedArray::set(index, *key, mode);
- FixedArray::set(index+1, *value, mode);
- FixedArray::set(index+2, details.AsSmi());
+ WriteBarrierMode mode = dict->GetWriteBarrierMode(no_gc);
+ dict->set(index, *key, mode);
+ dict->set(index + 1, *value, mode);
+ dict->set(index + 2, details.AsSmi());
+}
+
+
+template <typename Dictionary>
+void GlobalDictionaryShape::SetEntry(Dictionary* dict, int entry,
+ Handle<Object> key, Handle<Object> value,
+ PropertyDetails details) {
+ STATIC_ASSERT(Dictionary::kEntrySize == 2);
+ DCHECK(!key->IsName() || details.dictionary_index() > 0);
+ DCHECK(value->IsPropertyCell());
+ int index = dict->EntryToIndex(entry);
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = dict->GetWriteBarrierMode(no_gc);
+ dict->set(index, *key, mode);
+ dict->set(index + 1, *value, mode);
+ PropertyCell::cast(*value)->set_property_details(details);
}
@@ -7153,6 +6847,34 @@ Handle<FixedArray> NameDictionary::DoGenerateNewEnumerationIndices(
}
+template <typename Dictionary>
+PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary* dict, int entry) {
+ DCHECK(entry >= 0); // Not found is -1, which is not caught by get().
+ Object* raw_value = dict->ValueAt(entry);
+ DCHECK(raw_value->IsPropertyCell());
+ PropertyCell* cell = PropertyCell::cast(raw_value);
+ return cell->property_details();
+}
+
+
+template <typename Dictionary>
+void GlobalDictionaryShape::DetailsAtPut(Dictionary* dict, int entry,
+ PropertyDetails value) {
+ DCHECK(entry >= 0); // Not found is -1, which is not caught by get().
+ Object* raw_value = dict->ValueAt(entry);
+ DCHECK(raw_value->IsPropertyCell());
+ PropertyCell* cell = PropertyCell::cast(raw_value);
+ cell->set_property_details(value);
+}
+
+
+template <typename Dictionary>
+bool GlobalDictionaryShape::IsDeleted(Dictionary* dict, int entry) {
+ DCHECK(dict->ValueAt(entry)->IsPropertyCell());
+ return PropertyCell::cast(dict->ValueAt(entry))->value()->IsTheHole();
+}
+
+
bool ObjectHashTableShape::IsMatch(Handle<Object> key, Object* other) {
return key->SameValue(other);
}
@@ -7225,29 +6947,13 @@ void Map::ClearCodeCache(Heap* heap) {
}
-int Map::SlackForArraySize(int old_size, int size_limit) {
+int Map::SlackForArraySize(bool is_prototype_map, int old_size,
+ int size_limit) {
const int max_slack = size_limit - old_size;
- CHECK(max_slack >= 0);
+ CHECK_LE(0, max_slack);
if (old_size < 4) return Min(max_slack, 1);
- return Min(max_slack, old_size / 2);
-}
-
-
-void JSArray::EnsureSize(Handle<JSArray> array, int required_size) {
- DCHECK(array->HasFastSmiOrObjectElements());
- Handle<FixedArray> elts = handle(FixedArray::cast(array->elements()));
- const int kArraySizeThatFitsComfortablyInNewSpace = 128;
- if (elts->length() < required_size) {
- // Doubling in size would be overkill, but leave some slack to avoid
- // constantly growing.
- Expand(array, required_size + (required_size >> 3));
- // It's a performance benefit to keep a frequently used array in new-space.
- } else if (!array->GetHeap()->new_space()->Contains(*elts) &&
- required_size < kArraySizeThatFitsComfortablyInNewSpace) {
- // Expand will allocate a new backing store in new space even if the size
- // we asked for isn't larger than what we had before.
- Expand(array, required_size);
- }
+ if (is_prototype_map) return Min(max_slack, 4);
+ return Min(max_slack, old_size / 4);
}
@@ -7257,18 +6963,16 @@ void JSArray::set_length(Smi* length) {
}
-bool JSArray::SetElementsLengthWouldNormalize(
- Heap* heap, Handle<Object> new_length_handle) {
+bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
// If the new array won't fit in a some non-trivial fraction of the max old
// space size, then force it to go dictionary mode.
- int max_fast_array_size =
- static_cast<int>((heap->MaxOldGenerationSize() / kDoubleSize) / 4);
- return new_length_handle->IsNumber() &&
- NumberToInt32(*new_length_handle) >= max_fast_array_size;
+ uint32_t max_fast_array_size =
+ static_cast<uint32_t>((heap->MaxOldGenerationSize() / kDoubleSize) / 4);
+ return new_length >= max_fast_array_size;
}
-bool JSArray::AllowsSetElementsLength() {
+bool JSArray::AllowsSetLength() {
bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
DCHECK(result == !HasExternalArrayElements());
return result;
@@ -7420,6 +7124,19 @@ void Foreign::ForeignIterateBody() {
}
+void FixedTypedArrayBase::FixedTypedArrayBaseIterateBody(ObjectVisitor* v) {
+ v->VisitPointer(
+ reinterpret_cast<Object**>(FIELD_ADDR(this, kBasePointerOffset)));
+}
+
+
+template <typename StaticVisitor>
+void FixedTypedArrayBase::FixedTypedArrayBaseIterateBody() {
+ StaticVisitor::VisitPointer(
+ reinterpret_cast<Object**>(FIELD_ADDR(this, kBasePointerOffset)));
+}
+
+
void ExternalOneByteString::ExternalOneByteStringIterateBody(ObjectVisitor* v) {
typedef v8::String::ExternalOneByteStringResource Resource;
v->VisitExternalOneByteString(
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index aa4f4ded19..b514a9443f 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -60,12 +60,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
HeapNumber::cast(this)->HeapNumberPrint(os);
os << ">";
break;
+ case FLOAT32X4_TYPE:
+ Float32x4::cast(this)->Float32x4Print(os);
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
break;
- case CONSTANT_POOL_ARRAY_TYPE:
- ConstantPoolArray::cast(this)->ConstantPoolArrayPrint(os);
- break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayPrint(os);
break;
@@ -255,6 +255,8 @@ void JSObject::PrintProperties(std::ostream& os) { // NOLINT
break;
}
}
+ } else if (IsGlobalObject()) {
+ global_dictionary()->Print(os);
} else {
property_dictionary()->Print(os);
}
@@ -338,7 +340,8 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
case DICTIONARY_ELEMENTS:
elements()->Print(os);
break;
- case SLOPPY_ARGUMENTS_ELEMENTS: {
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
os << " parameter map:";
for (int i = 2; i < p->length(); i++) {
@@ -400,7 +403,6 @@ void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
os << " (" << PrivateSymbolToName() << ")";
}
os << "\n - private: " << is_private();
- os << "\n - own: " << is_own();
os << "\n";
}
@@ -417,12 +419,6 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (is_deprecated()) os << " - deprecated_map\n";
if (is_stable()) os << " - stable_map\n";
if (is_dictionary_map()) os << " - dictionary_map\n";
- if (is_prototype_map()) {
- os << " - prototype_map\n";
- os << " - prototype info: " << Brief(prototype_info());
- } else {
- os << " - back pointer: " << Brief(GetBackPointer());
- }
if (is_hidden_prototype()) os << " - hidden_prototype\n";
if (has_named_interceptor()) os << " - named_interceptor\n";
if (has_indexed_interceptor()) os << " - indexed_interceptor\n";
@@ -431,6 +427,12 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (is_access_check_needed()) os << " - access_check_needed\n";
if (!is_extensible()) os << " - non-extensible\n";
if (is_observed()) os << " - observed\n";
+ if (is_prototype_map()) {
+ os << " - prototype_map\n";
+ os << " - prototype info: " << Brief(prototype_info());
+ } else {
+ os << " - back pointer: " << Brief(GetBackPointer());
+ }
os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
<< "#" << NumberOfOwnDescriptors() << ": "
<< Brief(instance_descriptors());
@@ -503,43 +505,6 @@ void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
}
-void ConstantPoolArray::ConstantPoolArrayPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "ConstantPoolArray");
- os << " - length: " << length();
- for (int i = 0; i <= last_index(INT32, SMALL_SECTION); i++) {
- if (i < last_index(INT64, SMALL_SECTION)) {
- os << "\n [" << i << "]: double: " << get_int64_entry_as_double(i);
- } else if (i <= last_index(CODE_PTR, SMALL_SECTION)) {
- os << "\n [" << i << "]: code target pointer: "
- << reinterpret_cast<void*>(get_code_ptr_entry(i));
- } else if (i <= last_index(HEAP_PTR, SMALL_SECTION)) {
- os << "\n [" << i << "]: heap pointer: "
- << reinterpret_cast<void*>(get_heap_ptr_entry(i));
- } else if (i <= last_index(INT32, SMALL_SECTION)) {
- os << "\n [" << i << "]: int32: " << get_int32_entry(i);
- }
- }
- if (is_extended_layout()) {
- os << "\n Extended section:";
- for (int i = first_extended_section_index();
- i <= last_index(INT32, EXTENDED_SECTION); i++) {
- if (i < last_index(INT64, EXTENDED_SECTION)) {
- os << "\n [" << i << "]: double: " << get_int64_entry_as_double(i);
- } else if (i <= last_index(CODE_PTR, EXTENDED_SECTION)) {
- os << "\n [" << i << "]: code target pointer: "
- << reinterpret_cast<void*>(get_code_ptr_entry(i));
- } else if (i <= last_index(HEAP_PTR, EXTENDED_SECTION)) {
- os << "\n [" << i << "]: heap pointer: "
- << reinterpret_cast<void*>(get_heap_ptr_entry(i));
- } else if (i <= last_index(INT32, EXTENDED_SECTION)) {
- os << "\n [" << i << "]: int32: " << get_int32_entry(i);
- }
- }
- }
- os << "\n";
-}
-
-
void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "ValueObject");
value()->Print(os);
@@ -548,8 +513,8 @@ void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSMessageObject");
- os << " - type: " << Brief(type());
- os << "\n - arguments: " << Brief(arguments());
+ os << " - type: " << type();
+ os << "\n - arguments: " << Brief(argument());
os << "\n - start_position: " << start_position();
os << "\n - end_position: " << end_position();
os << "\n - script: " << Brief(script());
@@ -610,7 +575,7 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
os << " - time = NaN\n";
} else {
// TODO(svenpanne) Add some basic formatting to our streams.
- Vector<char> buf = Vector<char>::New(100);
+ ScopedVector<char> buf(100);
SNPrintF(
buf, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
@@ -884,6 +849,7 @@ void Box::BoxPrint(std::ostream& os) { // NOLINT
void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PrototypeInfo");
os << "\n - prototype users: " << Brief(prototype_users());
+ os << "\n - registry slot: " << registry_slot();
os << "\n - validity cell: " << Brief(validity_cell());
os << "\n - constructor name: " << Brief(constructor_name());
os << "\n";
@@ -1019,6 +985,7 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n - eval from shared: " << Brief(eval_from_shared());
os << "\n - eval from instructions offset: "
<< Brief(eval_from_instructions_offset());
+ os << "\n - shared function infos: " << Brief(shared_function_infos());
os << "\n";
}
@@ -1212,4 +1179,5 @@ void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
TransitionArray::PrintTransitions(os, map()->raw_transitions());
}
#endif // defined(DEBUG) || defined(OBJECT_PRINT)
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index f0dcaab937..2b042fda8e 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -97,6 +97,7 @@ bool Object::BooleanValue() {
if (IsUndetectableObject()) return false; // Undetectable object is false.
if (IsString()) return String::cast(this)->length() != 0;
if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
+ if (IsFloat32x4()) return true; // Simd value types always evaluate to true.
return true;
}
@@ -112,48 +113,63 @@ bool Object::IsCallable() const {
}
-MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
+bool Object::IsPromise(Handle<Object> object) {
+ if (!object->IsJSObject()) return false;
+ auto js_object = Handle<JSObject>::cast(object);
+ // Promises can't have access checks.
+ if (js_object->map()->is_access_check_needed()) return false;
+ auto isolate = js_object->GetIsolate();
+ // TODO(dcarney): this should just be read from the symbol registry so as not
+ // to be context dependent.
+ auto key = isolate->promise_status();
+ // Shouldn't be possible to throw here.
+ return JSObject::HasRealNamedProperty(js_object, key).FromJust();
+}
+
+
+MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
+ LanguageMode language_mode) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
- return JSProxy::GetPropertyWithHandler(it->GetHolder<JSProxy>(),
- it->GetReceiver(), it->name());
+ return JSProxy::GetPropertyWithHandler(
+ it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName());
case LookupIterator::INTERCEPTOR: {
- MaybeHandle<Object> maybe_result = JSObject::GetPropertyWithInterceptor(
- it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
- if (!maybe_result.is_null()) return maybe_result;
- if (it->isolate()->has_pending_exception()) return maybe_result;
+ bool done;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ it->isolate(), result,
+ JSObject::GetPropertyWithInterceptor(it, &done), Object);
+ if (done) return result;
break;
}
case LookupIterator::ACCESS_CHECK:
if (it->HasAccess()) break;
return JSObject::GetPropertyWithFailedAccessCheck(it);
case LookupIterator::ACCESSOR:
- return GetPropertyWithAccessor(it->GetReceiver(), it->name(),
- it->GetHolder<JSObject>(),
- it->GetAccessors());
+ return GetPropertyWithAccessor(it, language_mode);
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return it->factory()->undefined_value();
+ return ReadAbsentProperty(it, language_mode);
case LookupIterator::DATA:
return it->GetDataValue();
}
}
- return it->factory()->undefined_value();
+ return ReadAbsentProperty(it, language_mode);
}
-Handle<Object> JSObject::GetDataProperty(Handle<JSObject> object,
- Handle<Name> key) {
- LookupIterator it(object, key,
+Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ LookupIterator it(object, name,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
return GetDataProperty(&it);
}
-Handle<Object> JSObject::GetDataProperty(LookupIterator* it) {
+Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::INTERCEPTOR:
@@ -289,15 +305,22 @@ MaybeHandle<Object> JSProxy::GetPropertyWithHandler(Handle<JSProxy> proxy,
}
-MaybeHandle<Object> Object::GetPropertyWithAccessor(Handle<Object> receiver,
- Handle<Name> name,
- Handle<JSObject> holder,
- Handle<Object> structure) {
- Isolate* isolate = name->GetIsolate();
+MaybeHandle<Object> Object::GetPropertyWithAccessor(
+ LookupIterator* it, LanguageMode language_mode) {
+ Isolate* isolate = it->isolate();
+ Handle<Object> structure = it->GetAccessors();
+ Handle<Object> receiver = it->GetReceiver();
+
+ // We should never get here to initialize a const with the hole value since a
+ // const declaration would conflict with the getter.
DCHECK(!structure->IsForeign());
- // api style callbacks.
+
+ // API style callbacks.
if (structure->IsAccessorInfo()) {
- Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ Handle<Name> name = it->GetName();
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(structure);
if (!info->IsCompatibleReceiver(*receiver)) {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
@@ -305,19 +328,16 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(Handle<Object> receiver,
Object);
}
- Handle<ExecutableAccessorInfo> data =
- Handle<ExecutableAccessorInfo>::cast(structure);
v8::AccessorNameGetterCallback call_fun =
- v8::ToCData<v8::AccessorNameGetterCallback>(data->getter());
- if (call_fun == NULL) return isolate->factory()->undefined_value();
+ v8::ToCData<v8::AccessorNameGetterCallback>(info->getter());
+ if (call_fun == nullptr) return isolate->factory()->undefined_value();
LOG(isolate, ApiNamedPropertyAccess("load", *holder, *name));
- PropertyCallbackArguments args(isolate, data->data(), *receiver, *holder);
- v8::Handle<v8::Value> result =
- args.Call(call_fun, v8::Utils::ToLocal(name));
+ PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
+ v8::Local<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(name));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.IsEmpty()) {
- return isolate->factory()->undefined_value();
+ return ReadAbsentProperty(isolate, receiver, name, language_mode);
}
Handle<Object> return_value = v8::Utils::OpenHandle(*result);
return_value->VerifyApiCallResultType();
@@ -325,16 +345,15 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(Handle<Object> receiver,
return handle(*return_value, isolate);
}
- // __defineGetter__ callback
- Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(),
- isolate);
+ // Regular accessor.
+ Handle<Object> getter(AccessorPair::cast(*structure)->getter(), isolate);
if (getter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return Object::GetPropertyWithDefinedGetter(
receiver, Handle<JSReceiver>::cast(getter));
}
// Getter is not a function.
- return isolate->factory()->undefined_value();
+ return ReadAbsentProperty(isolate, receiver, it->GetName(), language_mode);
}
@@ -349,56 +368,53 @@ bool AccessorInfo::IsCompatibleReceiverMap(Isolate* isolate,
MaybeHandle<Object> Object::SetPropertyWithAccessor(
- Handle<Object> receiver, Handle<Name> name, Handle<Object> value,
- Handle<JSObject> holder, Handle<Object> structure,
- LanguageMode language_mode) {
- Isolate* isolate = name->GetIsolate();
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode) {
+ Isolate* isolate = it->isolate();
+ Handle<Object> structure = it->GetAccessors();
+ Handle<Object> receiver = it->GetReceiver();
- // We should never get here to initialize a const with the hole
- // value since a const declaration would conflict with the setter.
+ // We should never get here to initialize a const with the hole value since a
+ // const declaration would conflict with the setter.
DCHECK(!structure->IsForeign());
+
+ // API style callbacks.
if (structure->IsExecutableAccessorInfo()) {
- // Don't call executable accessor setters with non-JSObject receivers.
- if (!receiver->IsJSObject()) return value;
- // api style callbacks
- ExecutableAccessorInfo* info = ExecutableAccessorInfo::cast(*structure);
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ Handle<Name> name = it->GetName();
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(structure);
if (!info->IsCompatibleReceiver(*receiver)) {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
name, receiver),
Object);
}
- Object* call_obj = info->setter();
+
v8::AccessorNameSetterCallback call_fun =
- v8::ToCData<v8::AccessorNameSetterCallback>(call_obj);
- if (call_fun == NULL) return value;
+ v8::ToCData<v8::AccessorNameSetterCallback>(info->setter());
+ if (call_fun == nullptr) return value;
+
LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
- args.Call(call_fun,
- v8::Utils::ToLocal(name),
- v8::Utils::ToLocal(value));
+ args.Call(call_fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return value;
}
- if (structure->IsAccessorPair()) {
- Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
- if (setter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(
- receiver, Handle<JSReceiver>::cast(setter), value);
- } else {
- if (is_sloppy(language_mode)) return value;
- Handle<Object> args[] = {name, holder};
- THROW_NEW_ERROR(isolate,
- NewTypeError("no_setter_in_callback",
- HandleVector(args, arraysize(args))),
- Object);
- }
+ // Regular accessor.
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
+ if (setter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return SetPropertyWithDefinedSetter(
+ receiver, Handle<JSReceiver>::cast(setter), value);
}
- UNREACHABLE();
- return MaybeHandle<Object>();
+ if (is_sloppy(language_mode)) return value;
+
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kNoSetterInCallback,
+ it->GetName(), it->GetHolder<JSObject>()),
+ Object);
}
@@ -424,9 +440,7 @@ MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
Debug* debug = isolate->debug();
// Handle stepping into a getter if step into is active.
// TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->is_active()) {
- debug->HandleStepIn(getter, Handle<Object>::null(), 0, false);
- }
+ if (debug->is_active()) debug->HandleStepIn(getter, false);
return Execution::Call(isolate, getter, receiver, 0, NULL, true);
}
@@ -441,9 +455,7 @@ MaybeHandle<Object> Object::SetPropertyWithDefinedSetter(
Debug* debug = isolate->debug();
// Handle stepping into a setter if step into is active.
// TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->is_active()) {
- debug->HandleStepIn(setter, Handle<Object>::null(), 0, false);
- }
+ if (debug->is_active()) debug->HandleStepIn(setter, false);
Handle<Object> argv[] = { value };
RETURN_ON_EXCEPTION(isolate, Execution::Call(isolate, setter, receiver,
@@ -465,8 +477,7 @@ static bool FindAllCanReadHolder(LookupIterator* it) {
if (AccessorInfo::cast(*accessors)->all_can_read()) return true;
}
} else if (it->state() == LookupIterator::INTERCEPTOR) {
- auto holder = it->GetHolder<JSObject>();
- if (holder->GetNamedInterceptor()->all_can_read()) return true;
+ if (it->GetInterceptor()->all_can_read()) return true;
}
}
return false;
@@ -478,16 +489,14 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
Handle<JSObject> checked = it->GetHolder<JSObject>();
while (FindAllCanReadHolder(it)) {
if (it->state() == LookupIterator::ACCESSOR) {
- return GetPropertyWithAccessor(it->GetReceiver(), it->name(),
- it->GetHolder<JSObject>(),
- it->GetAccessors());
+ return GetPropertyWithAccessor(it, SLOPPY);
}
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- auto receiver = Handle<JSObject>::cast(it->GetReceiver());
- auto result = GetPropertyWithInterceptor(it->GetHolder<JSObject>(),
- receiver, it->name());
- if (it->isolate()->has_scheduled_exception()) break;
- if (!result.is_null()) return result;
+ bool done;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(it->isolate(), result,
+ GetPropertyWithInterceptor(it, &done), Object);
+ if (done) return result;
}
it->isolate()->ReportFailedAccessCheck(checked);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
@@ -503,8 +512,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
return Just(it->property_details().attributes());
}
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- auto result = GetPropertyAttributesWithInterceptor(
- it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
+ auto result = GetPropertyAttributesWithInterceptor(it);
if (it->isolate()->has_scheduled_exception()) break;
if (result.IsJust() && result.FromJust() != ABSENT) return result;
}
@@ -529,12 +537,11 @@ static bool FindAllCanWriteHolder(LookupIterator* it) {
MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode) {
+ LookupIterator* it, Handle<Object> value) {
Handle<JSObject> checked = it->GetHolder<JSObject>();
if (FindAllCanWriteHolder(it)) {
- return SetPropertyWithAccessor(it->GetReceiver(), it->name(), value,
- it->GetHolder<JSObject>(),
- it->GetAccessors(), language_mode);
+ // The supplied language-mode is ignored by SetPropertyWithAccessor.
+ return SetPropertyWithAccessor(it, value, SLOPPY);
}
it->isolate()->ReportFailedAccessCheck(checked);
@@ -548,229 +555,44 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<Object> value,
PropertyDetails details) {
DCHECK(!object->HasFastProperties());
- Handle<NameDictionary> property_dictionary(object->property_dictionary());
-
if (!name->IsUniqueName()) {
name = object->GetIsolate()->factory()->InternalizeString(
Handle<String>::cast(name));
}
- int entry = property_dictionary->FindEntry(name);
- if (entry == NameDictionary::kNotFound) {
- if (object->IsGlobalObject()) {
+ if (object->IsGlobalObject()) {
+ Handle<GlobalDictionary> property_dictionary(object->global_dictionary());
+
+ int entry = property_dictionary->FindEntry(name);
+ if (entry == GlobalDictionary::kNotFound) {
auto cell = object->GetIsolate()->factory()->NewPropertyCell();
cell->set_value(*value);
auto cell_type = value->IsUndefined() ? PropertyCellType::kUndefined
: PropertyCellType::kConstant;
details = details.set_cell_type(cell_type);
value = cell;
+ property_dictionary =
+ GlobalDictionary::Add(property_dictionary, name, value, details);
+ object->set_properties(*property_dictionary);
+ } else {
+ PropertyCell::UpdateCell(property_dictionary, entry, value, details);
}
- property_dictionary =
- NameDictionary::Add(property_dictionary, name, value, details);
- object->set_properties(*property_dictionary);
- return;
- }
-
- if (object->IsGlobalObject()) {
- PropertyCell::UpdateCell(property_dictionary, entry, value, details);
- return;
- }
-
- PropertyDetails original_details = property_dictionary->DetailsAt(entry);
- int enumeration_index = original_details.dictionary_index();
- DCHECK(enumeration_index > 0);
- details = details.set_index(enumeration_index);
- property_dictionary->SetEntry(entry, name, value, details);
-}
-
-
-static MaybeHandle<JSObject> FindIndexedAllCanReadHolder(
- Isolate* isolate, Handle<JSObject> js_object,
- PrototypeIterator::WhereToStart where_to_start) {
- for (PrototypeIterator iter(isolate, js_object, where_to_start);
- !iter.IsAtEnd(); iter.Advance()) {
- auto curr = PrototypeIterator::GetCurrent(iter);
- if (!curr->IsJSObject()) break;
- auto obj = Handle<JSObject>::cast(curr);
- if (!obj->HasIndexedInterceptor()) continue;
- if (obj->GetIndexedInterceptor()->all_can_read()) return obj;
- }
- return MaybeHandle<JSObject>();
-}
-
-
-MaybeHandle<Object> JSObject::GetElementWithFailedAccessCheck(
- Isolate* isolate, Handle<JSObject> object, Handle<Object> receiver,
- uint32_t index) {
- Handle<JSObject> holder = object;
- PrototypeIterator::WhereToStart where_to_start =
- PrototypeIterator::START_AT_RECEIVER;
- while (true) {
- auto all_can_read_holder =
- FindIndexedAllCanReadHolder(isolate, holder, where_to_start);
- if (!all_can_read_holder.ToHandle(&holder)) break;
- auto result =
- JSObject::GetElementWithInterceptor(holder, receiver, index, false);
- if (isolate->has_scheduled_exception()) break;
- if (!result.is_null()) return result;
- where_to_start = PrototypeIterator::START_AT_PROTOTYPE;
- }
- isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
-}
-
-
-Maybe<PropertyAttributes> JSObject::GetElementAttributesWithFailedAccessCheck(
- Isolate* isolate, Handle<JSObject> object, Handle<Object> receiver,
- uint32_t index) {
- Handle<JSObject> holder = object;
- PrototypeIterator::WhereToStart where_to_start =
- PrototypeIterator::START_AT_RECEIVER;
- while (true) {
- auto all_can_read_holder =
- FindIndexedAllCanReadHolder(isolate, holder, where_to_start);
- if (!all_can_read_holder.ToHandle(&holder)) break;
- auto result =
- JSObject::GetElementAttributeFromInterceptor(holder, receiver, index);
- if (isolate->has_scheduled_exception()) break;
- if (result.IsJust() && result.FromJust() != ABSENT) return result;
- where_to_start = PrototypeIterator::START_AT_PROTOTYPE;
- }
- isolate->ReportFailedAccessCheck(object);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
- return Just(ABSENT);
-}
-
-
-MaybeHandle<Object> Object::GetElementWithReceiver(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> receiver,
- uint32_t index) {
- if (object->IsUndefined()) {
- // TODO(verwaest): Why is this check here?
- UNREACHABLE();
- return isolate->factory()->undefined_value();
- }
-
- // Iterate up the prototype chain until an element is found or the null
- // prototype is encountered.
- for (PrototypeIterator iter(isolate, object,
- object->IsJSProxy() || object->IsJSObject()
- ? PrototypeIterator::START_AT_RECEIVER
- : PrototypeIterator::START_AT_PROTOTYPE);
- !iter.IsAtEnd(); iter.Advance()) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return JSProxy::GetElementWithHandler(
- Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
- index);
- }
-
- // Inline the case for JSObjects. Doing so significantly improves the
- // performance of fetching elements where checking the prototype chain is
- // necessary.
- Handle<JSObject> js_object =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
-
- // Check access rights if needed.
- if (js_object->IsAccessCheckNeeded()) {
- if (!isolate->MayAccess(js_object)) {
- return JSObject::GetElementWithFailedAccessCheck(isolate, js_object,
- receiver, index);
- }
- }
-
- if (js_object->HasIndexedInterceptor()) {
- return JSObject::GetElementWithInterceptor(js_object, receiver, index,
- true);
- }
-
- if (js_object->elements() != isolate->heap()->empty_fixed_array()) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- js_object->GetElementsAccessor()->Get(receiver, js_object, index),
- Object);
- if (!result->IsTheHole()) return result;
- }
- }
-
- return isolate->factory()->undefined_value();
-}
-
-
-MaybeHandle<Object> Object::SetElementWithReceiver(
- Isolate* isolate, Handle<Object> object, Handle<Object> receiver,
- uint32_t index, Handle<Object> value, LanguageMode language_mode) {
- // Iterate up the prototype chain until an element is found or the null
- // prototype is encountered.
- bool done = false;
- for (PrototypeIterator iter(isolate, object,
- object->IsJSProxy() || object->IsJSObject()
- ? PrototypeIterator::START_AT_RECEIVER
- : PrototypeIterator::START_AT_PROTOTYPE);
- !iter.IsAtEnd() && !done; iter.Advance()) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- // TODO(dslomov): implement.
- isolate->ThrowIllegalOperation();
- return MaybeHandle<Object>();
- }
-
- Handle<JSObject> js_object =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
-
- // Check access rights if needed.
- if (js_object->IsAccessCheckNeeded()) {
- if (!isolate->MayAccess(js_object)) {
- isolate->ReportFailedAccessCheck(js_object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
- }
- }
-
- if (js_object->HasIndexedInterceptor()) {
- Maybe<PropertyAttributes> from_interceptor =
- JSObject::GetElementAttributeFromInterceptor(js_object, receiver,
- index);
- if (!from_interceptor.IsJust()) return MaybeHandle<Object>();
- if ((from_interceptor.FromJust() & READ_ONLY) != 0) {
- return WriteToReadOnlyElement(isolate, receiver, index, value,
- language_mode);
- }
- done = from_interceptor.FromJust() != ABSENT;
- }
+ } else {
+ Handle<NameDictionary> property_dictionary(object->property_dictionary());
- if (!done &&
- js_object->elements() != isolate->heap()->empty_fixed_array()) {
- ElementsAccessor* accessor = js_object->GetElementsAccessor();
- PropertyAttributes attrs = accessor->GetAttributes(js_object, index);
- if ((attrs & READ_ONLY) != 0) {
- return WriteToReadOnlyElement(isolate, receiver, index, value,
- language_mode);
- }
- Handle<AccessorPair> pair;
- if (accessor->GetAccessorPair(js_object, index).ToHandle(&pair)) {
- return JSObject::SetElementWithCallback(receiver, pair, index, value,
- js_object, language_mode);
- } else {
- done = attrs != ABSENT;
- }
+ int entry = property_dictionary->FindEntry(name);
+ if (entry == NameDictionary::kNotFound) {
+ property_dictionary =
+ NameDictionary::Add(property_dictionary, name, value, details);
+ object->set_properties(*property_dictionary);
+ } else {
+ PropertyDetails original_details = property_dictionary->DetailsAt(entry);
+ int enumeration_index = original_details.dictionary_index();
+ DCHECK(enumeration_index > 0);
+ details = details.set_index(enumeration_index);
+ property_dictionary->SetEntry(entry, name, value, details);
}
}
-
- if (!receiver->IsJSObject()) {
- return WriteToReadOnlyElement(isolate, receiver, index, value,
- language_mode);
- }
- Handle<JSObject> target = Handle<JSObject>::cast(receiver);
- ElementsAccessor* accessor = target->GetElementsAccessor();
- PropertyAttributes attrs = accessor->GetAttributes(target, index);
- if (attrs == ABSENT) {
- return JSObject::SetElement(target, index, value, NONE, language_mode,
- false);
- }
- return JSObject::SetElement(target, index, value, attrs, language_mode, false,
- DEFINE_PROPERTY);
}
@@ -807,6 +629,15 @@ Map* Object::GetRootMap(Isolate* isolate) {
Object* Object::GetHash() {
+ Object* hash = GetSimpleHash();
+ if (hash->IsSmi()) return hash;
+
+ DCHECK(IsJSReceiver());
+ return JSReceiver::cast(this)->GetIdentityHash();
+}
+
+
+Object* Object::GetSimpleHash() {
// The object is either a Smi, a HeapNumber, a name, an odd-ball,
// a real JS object, or a Harmony proxy.
if (IsSmi()) {
@@ -831,14 +662,14 @@ Object* Object::GetHash() {
uint32_t hash = Oddball::cast(this)->to_string()->Hash();
return Smi::FromInt(hash);
}
-
DCHECK(IsJSReceiver());
- return JSReceiver::cast(this)->GetIdentityHash();
+ JSReceiver* receiver = JSReceiver::cast(this);
+ return receiver->GetHeap()->undefined_value();
}
Handle<Smi> Object::GetOrCreateHash(Isolate* isolate, Handle<Object> object) {
- Handle<Object> hash(object->GetHash(), isolate);
+ Handle<Object> hash(object->GetSimpleHash(), isolate);
if (hash->IsSmi()) return Handle<Smi>::cast(hash);
DCHECK(object->IsJSReceiver());
@@ -1493,6 +1324,12 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << '>';
break;
}
+ case FLOAT32X4_TYPE: {
+ os << "<Float32x4: ";
+ Float32x4::cast(this)->Float32x4Print(os);
+ os << ">";
+ break;
+ }
case JS_PROXY_TYPE:
os << "<JSProxy>";
break;
@@ -1514,8 +1351,9 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "PropertyCell for ";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- PropertyCell::cast(this)->value()->ShortPrint(&accumulator);
- os << accumulator.ToCString().get();
+ PropertyCell* cell = PropertyCell::cast(this);
+ cell->value()->ShortPrint(&accumulator);
+ os << accumulator.ToCString().get() << " " << cell->property_details();
break;
}
case WEAK_CELL_TYPE: {
@@ -1573,9 +1411,6 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case FIXED_ARRAY_TYPE:
FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
break;
- case CONSTANT_POOL_ARRAY_TYPE:
- reinterpret_cast<ConstantPoolArray*>(this)->ConstantPoolIterateBody(v);
- break;
case FIXED_DOUBLE_ARRAY_TYPE:
break;
case JS_OBJECT_TYPE:
@@ -1638,15 +1473,21 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
+ case FLOAT32X4_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
case FREE_SPACE_TYPE:
break;
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ARRAY_TYPE: \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- break;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ break; \
+ \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ reinterpret_cast<FixedTypedArrayBase*>(this) \
+ ->FixedTypedArrayBaseIterateBody(v); \
+ break;
+
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -1683,6 +1524,12 @@ void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
}
+void Float32x4::Float32x4Print(std::ostream& os) { // NOLINT
+ os << get_lane(0) << ", " << get_lane(1) << ", " << get_lane(2) << ", "
+ << get_lane(3);
+}
+
+
String* JSReceiver::class_name() {
if (IsJSFunction() || IsJSFunctionProxy()) {
return GetHeap()->Function_string();
@@ -1792,31 +1639,40 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
PropertyAttributes attributes) {
DCHECK(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
- Handle<NameDictionary> dict(object->property_dictionary());
- PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
if (object->IsGlobalObject()) {
+ Handle<GlobalDictionary> dict(object->global_dictionary());
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
int entry = dict->FindEntry(name);
// If there's a cell there, just invalidate and set the property.
- if (entry != NameDictionary::kNotFound) {
+ if (entry != GlobalDictionary::kNotFound) {
PropertyCell::UpdateCell(dict, entry, value, details);
- // TODO(dcarney): move this to UpdateCell.
+ // TODO(ishell): move this to UpdateCell.
// Need to adjust the details.
int index = dict->NextEnumerationIndex();
dict->SetNextEnumerationIndex(index + 1);
- details = dict->DetailsAt(entry).set_index(index);
- dict->DetailsAtPut(entry, details);
- return;
+ PropertyCell* cell = PropertyCell::cast(dict->ValueAt(entry));
+ details = cell->property_details().set_index(index);
+ cell->set_property_details(details);
+
+ } else {
+ auto cell = isolate->factory()->NewPropertyCell();
+ cell->set_value(*value);
+ auto cell_type = value->IsUndefined() ? PropertyCellType::kUndefined
+ : PropertyCellType::kConstant;
+ details = details.set_cell_type(cell_type);
+ value = cell;
+
+ Handle<GlobalDictionary> result =
+ GlobalDictionary::Add(dict, name, value, details);
+ if (*dict != *result) object->set_properties(*result);
}
- auto cell = isolate->factory()->NewPropertyCell();
- cell->set_value(*value);
- auto cell_type = value->IsUndefined() ? PropertyCellType::kUndefined
- : PropertyCellType::kConstant;
- details = details.set_cell_type(cell_type);
- value = cell;
+ } else {
+ Handle<NameDictionary> dict(object->property_dictionary());
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ Handle<NameDictionary> result =
+ NameDictionary::Add(dict, name, value, details);
+ if (*dict != *result) object->set_properties(*result);
}
- Handle<NameDictionary> result =
- NameDictionary::Add(dict, name, value, details);
- if (*dict != *result) object->set_properties(*result);
}
@@ -1904,10 +1760,30 @@ bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
}
-void Map::ConnectElementsTransition(Handle<Map> parent, Handle<Map> child) {
- Isolate* isolate = parent->GetIsolate();
- Handle<Name> name = isolate->factory()->elements_transition_symbol();
- ConnectTransition(parent, child, name, SPECIAL_TRANSITION);
+static void UpdatePrototypeUserRegistration(Handle<Map> old_map,
+ Handle<Map> new_map,
+ Isolate* isolate) {
+ if (!FLAG_track_prototype_users) return;
+ if (!old_map->is_prototype_map()) return;
+ DCHECK(new_map->is_prototype_map());
+ bool was_registered = JSObject::UnregisterPrototypeUser(old_map, isolate);
+ new_map->set_prototype_info(old_map->prototype_info());
+ old_map->set_prototype_info(Smi::FromInt(0));
+ if (FLAG_trace_prototype_users) {
+ PrintF("Moving prototype_info %p from map %p to map %p.\n",
+ reinterpret_cast<void*>(new_map->prototype_info()),
+ reinterpret_cast<void*>(*old_map),
+ reinterpret_cast<void*>(*new_map));
+ }
+ if (was_registered) {
+ if (new_map->prototype_info()->IsPrototypeInfo()) {
+ // The new map isn't registered with its prototype yet; reflect this fact
+ // in the PrototypeInfo it just inherited from the old map.
+ PrototypeInfo::cast(new_map->prototype_info())
+ ->set_registry_slot(PrototypeInfo::UNREGISTERED);
+ }
+ JSObject::LazyRegisterPrototypeUser(new_map, isolate);
+ }
}
@@ -1924,16 +1800,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
// when a map on a prototype chain is registered with its prototype, then
// all prototypes further up the chain are also registered with their
// respective prototypes.
- Object* maybe_old_prototype = old_map->prototype();
- if (FLAG_track_prototype_users && old_map->is_prototype_map() &&
- maybe_old_prototype->IsJSObject()) {
- Handle<JSObject> old_prototype(JSObject::cast(maybe_old_prototype));
- bool was_registered =
- JSObject::UnregisterPrototypeUser(old_prototype, old_map);
- if (was_registered) {
- JSObject::LazyRegisterPrototypeUser(new_map, new_map->GetIsolate());
- }
- }
+ UpdatePrototypeUserRegistration(old_map, new_map, new_map->GetIsolate());
if (object->HasFastProperties()) {
if (!new_map->is_dictionary_map()) {
@@ -1966,20 +1833,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
// state now: the new map might have a new elements_kind, but the object's
// elements pointer hasn't been updated yet. Callers will fix this, but in
// the meantime, (indirectly) calling JSObjectVerify() must be avoided.
- DisallowHeapAllocation no_object_verification;
-
- if (old_map->is_prototype_map() && FLAG_track_prototype_users) {
- DCHECK(new_map->is_prototype_map());
- DCHECK(object->map() == *new_map);
- new_map->set_prototype_info(old_map->prototype_info());
- old_map->set_prototype_info(Smi::FromInt(0));
- if (FLAG_trace_prototype_users) {
- PrintF("Moving prototype_info %p from map %p to map %p.\n",
- reinterpret_cast<void*>(new_map->prototype_info()),
- reinterpret_cast<void*>(*old_map),
- reinterpret_cast<void*>(*new_map));
- }
- }
+ // When adding code here, add a DisallowHeapAllocation too.
}
@@ -2571,7 +2425,9 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
ElementsKind from_kind = root_map->elements_kind();
ElementsKind to_kind = old_map->elements_kind();
+ // TODO(ishell): Add a test for SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
if (from_kind != to_kind && to_kind != DICTIONARY_ELEMENTS &&
+ to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
!(IsTransitionableFastElementsKind(from_kind) &&
IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
@@ -3116,27 +2972,50 @@ Handle<Map> Map::Update(Handle<Map> map) {
MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
Handle<Object> value) {
- Handle<Name> name = it->name();
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ Handle<InterceptorInfo> interceptor(it->GetInterceptor());
+ if (interceptor->setter()->IsUndefined()) return MaybeHandle<Object>();
+
Handle<JSObject> holder = it->GetHolder<JSObject>();
- Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
- if (interceptor->setter()->IsUndefined() ||
- (name->IsSymbol() && !interceptor->can_intercept_symbols())) {
- return MaybeHandle<Object>();
+ v8::Local<v8::Value> result;
+ PropertyCallbackArguments args(isolate, interceptor->data(),
+ *it->GetReceiver(), *holder);
+
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertySetterCallback setter =
+ v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-set", *holder, index));
+ result = args.Call(setter, index, v8::Utils::ToLocal(value));
+ } else {
+ Handle<Name> name = it->name();
+
+ if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+ return MaybeHandle<Object>();
+ }
+
+ v8::GenericNamedPropertySetterCallback setter =
+ v8::ToCData<v8::GenericNamedPropertySetterCallback>(
+ interceptor->setter());
+ LOG(it->isolate(),
+ ApiNamedPropertyAccess("interceptor-named-set", *holder, *name));
+ result =
+ args.Call(setter, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
}
- LOG(it->isolate(),
- ApiNamedPropertyAccess("interceptor-named-set", *holder, *name));
- PropertyCallbackArguments args(it->isolate(), interceptor->data(), *holder,
- *holder);
- v8::GenericNamedPropertySetterCallback setter =
- v8::ToCData<v8::GenericNamedPropertySetterCallback>(
- interceptor->setter());
- v8::Handle<v8::Value> result =
- args.Call(setter, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
- if (!result.IsEmpty()) return value;
-
- return MaybeHandle<Object>();
+ if (result.IsEmpty()) return MaybeHandle<Object>();
+#ifdef DEBUG
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+#endif
+ return value;
}
@@ -3167,24 +3046,22 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
UNREACHABLE();
case LookupIterator::ACCESS_CHECK:
- // TODO(verwaest): Remove the distinction. This is mostly bogus since we
- // don't know whether we'll want to fetch attributes or call a setter
- // until we find the property.
if (it->HasAccess()) break;
- return JSObject::SetPropertyWithFailedAccessCheck(it, value,
- language_mode);
+ // Check whether it makes sense to reuse the lookup iterator. Here it
+ // might still call into setters up the prototype chain.
+ return JSObject::SetPropertyWithFailedAccessCheck(it, value);
case LookupIterator::JSPROXY:
if (it->HolderIsReceiverOrHiddenPrototype()) {
- return JSProxy::SetPropertyWithHandler(it->GetHolder<JSProxy>(),
- it->GetReceiver(), it->name(),
- value, language_mode);
+ return JSProxy::SetPropertyWithHandler(
+ it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName(), value,
+ language_mode);
} else {
// TODO(verwaest): Use the MaybeHandle to indicate result.
bool has_result = false;
MaybeHandle<Object> maybe_result =
JSProxy::SetPropertyViaPrototypesWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->name(),
+ it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName(),
value, language_mode, &has_result);
if (has_result) return maybe_result;
done = true;
@@ -3199,8 +3076,7 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
if (it->isolate()->has_pending_exception()) return maybe_result;
} else {
Maybe<PropertyAttributes> maybe_attributes =
- JSObject::GetPropertyAttributesWithInterceptor(
- it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
+ JSObject::GetPropertyAttributesWithInterceptor(it);
if (!maybe_attributes.IsJust()) return MaybeHandle<Object>();
done = maybe_attributes.FromJust() != ABSENT;
if (done && (maybe_attributes.FromJust() & READ_ONLY) != 0) {
@@ -3209,20 +3085,25 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
}
break;
- case LookupIterator::ACCESSOR:
- if (it->property_details().IsReadOnly()) {
+ case LookupIterator::ACCESSOR: {
+ if (it->IsReadOnly()) {
return WriteToReadOnlyProperty(it, value, language_mode);
}
- return SetPropertyWithAccessor(it->GetReceiver(), it->name(), value,
- it->GetHolder<JSObject>(),
- it->GetAccessors(), language_mode);
-
+ Handle<Object> accessors = it->GetAccessors();
+ if (accessors->IsAccessorInfo() &&
+ !it->HolderIsReceiverOrHiddenPrototype() &&
+ AccessorInfo::cast(*accessors)->is_special_data_property()) {
+ done = true;
+ break;
+ }
+ return SetPropertyWithAccessor(it, value, language_mode);
+ }
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- done = true;
- break;
+ // TODO(verwaest): We should throw an exception.
+ return value;
case LookupIterator::DATA:
- if (it->property_details().IsReadOnly()) {
+ if (it->IsReadOnly()) {
return WriteToReadOnlyProperty(it, value, language_mode);
}
if (it->HolderIsReceiverOrHiddenPrototype()) {
@@ -3274,26 +3155,34 @@ MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
SetPropertyInternal(it, value, language_mode, store_mode, &found);
if (found) return result;
- LookupIterator own_lookup(it->GetReceiver(), it->name(), LookupIterator::OWN);
+ if (!it->GetReceiver()->IsJSReceiver()) {
+ return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(),
+ it->GetName(), value, language_mode);
+ }
+
+ LookupIterator::Configuration c = LookupIterator::OWN;
+ LookupIterator own_lookup =
+ it->IsElement()
+ ? LookupIterator(it->isolate(), it->GetReceiver(), it->index(), c)
+ : LookupIterator(it->GetReceiver(), it->name(), c);
+
for (; own_lookup.IsFound(); own_lookup.Next()) {
switch (own_lookup.state()) {
case LookupIterator::ACCESS_CHECK:
if (!own_lookup.HasAccess()) {
- return JSObject::SetPropertyWithFailedAccessCheck(&own_lookup, value,
- SLOPPY);
+ return JSObject::SetPropertyWithFailedAccessCheck(&own_lookup, value);
}
break;
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return RedefineNonconfigurableProperty(it->isolate(), it->name(), value,
- language_mode);
+ return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
+ value, language_mode);
case LookupIterator::DATA: {
PropertyDetails details = own_lookup.property_details();
if (details.IsConfigurable() || !details.IsReadOnly()) {
- return JSObject::SetOwnPropertyIgnoreAttributes(
- Handle<JSObject>::cast(it->GetReceiver()), it->name(), value,
- details.attributes());
+ return JSObject::DefineOwnPropertyIgnoreAttributes(
+ &own_lookup, value, details.attributes());
}
return WriteToReadOnlyProperty(&own_lookup, value, language_mode);
}
@@ -3301,13 +3190,12 @@ MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
case LookupIterator::ACCESSOR: {
PropertyDetails details = own_lookup.property_details();
if (details.IsConfigurable()) {
- return JSObject::SetOwnPropertyIgnoreAttributes(
- Handle<JSObject>::cast(it->GetReceiver()), it->name(), value,
- details.attributes());
+ return JSObject::DefineOwnPropertyIgnoreAttributes(
+ &own_lookup, value, details.attributes());
}
- return RedefineNonconfigurableProperty(it->isolate(), it->name(), value,
- language_mode);
+ return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
+ value, language_mode);
}
case LookupIterator::INTERCEPTOR:
@@ -3330,10 +3218,35 @@ MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
}
+MaybeHandle<Object> Object::ReadAbsentProperty(LookupIterator* it,
+ LanguageMode language_mode) {
+ if (is_strong(language_mode)) {
+ THROW_NEW_ERROR(it->isolate(),
+ NewTypeError(MessageTemplate::kStrongPropertyAccess,
+ it->GetName(), it->GetReceiver()),
+ Object);
+ }
+ return it->isolate()->factory()->undefined_value();
+}
+
+MaybeHandle<Object> Object::ReadAbsentProperty(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> name,
+ LanguageMode language_mode) {
+ if (is_strong(language_mode)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kStrongPropertyAccess, name, receiver),
+ Object);
+ }
+ return isolate->factory()->undefined_value();
+}
+
+
MaybeHandle<Object> Object::WriteToReadOnlyProperty(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode) {
- return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(), it->name(),
- value, language_mode);
+ return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(),
+ it->GetName(), value, language_mode);
}
@@ -3341,21 +3254,10 @@ MaybeHandle<Object> Object::WriteToReadOnlyProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
Handle<Object> value, LanguageMode language_mode) {
if (is_sloppy(language_mode)) return value;
- Handle<Object> args[] = {name, receiver};
- THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
- HandleVector(args, arraysize(args))),
- Object);
-}
-
-
-MaybeHandle<Object> Object::WriteToReadOnlyElement(Isolate* isolate,
- Handle<Object> receiver,
- uint32_t index,
- Handle<Object> value,
- LanguageMode language_mode) {
- return WriteToReadOnlyProperty(isolate, receiver,
- isolate->factory()->NewNumberFromUint(index),
- value, language_mode);
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kStrictReadOnlyProperty, name, receiver),
+ Object);
}
@@ -3382,21 +3284,45 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
// Fetch before transforming the object since the encoding may become
// incompatible with what's cached in |it|.
bool is_observed = receiver->map()->is_observed() &&
- !it->isolate()->IsInternallyUsedPropertyName(it->name());
+ (it->IsElement() ||
+ !it->isolate()->IsInternallyUsedPropertyName(it->name()));
MaybeHandle<Object> maybe_old;
if (is_observed) maybe_old = it->GetDataValue();
+ Handle<Object> to_assign = value;
+ // Convert the incoming value to a number for storing into typed arrays.
+ if (it->IsElement() && (receiver->HasExternalArrayElements() ||
+ receiver->HasFixedTypedArrayElements())) {
+ if (!value->IsNumber() && !value->IsUndefined()) {
+ ASSIGN_RETURN_ON_EXCEPTION(it->isolate(), to_assign,
+ Execution::ToNumber(it->isolate(), value),
+ Object);
+ // ToNumber above might modify the receiver, causing the cached
+ // holder_map to mismatch the actual holder->map() after this point.
+ // Reload the map to be in consistent state. Other cached state cannot
+ // have been invalidated since typed array elements cannot be reconfigured
+ // in any way.
+ it->ReloadHolderMap();
+
+ // We have to recheck the length. However, it can only change if the
+ // underlying buffer was neutered, so just check that.
+ if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
+ return value;
+ }
+ }
+ }
+
// Possibly migrate to the most up-to-date map that will be able to store
// |value| under it->name().
- it->PrepareForDataProperty(value);
+ it->PrepareForDataProperty(to_assign);
// Write the property value.
- it->WriteDataValue(value);
+ it->WriteDataValue(to_assign);
// Send the change record if there are observers.
if (is_observed && !value->SameValue(*maybe_old.ToHandleChecked())) {
RETURN_ON_EXCEPTION(it->isolate(), JSObject::EnqueueChangeRecord(
- receiver, "update", it->name(),
+ receiver, "update", it->GetName(),
maybe_old.ToHandleChecked()),
Object);
}
@@ -3405,6 +3331,47 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
}
+MUST_USE_RESULT static MaybeHandle<Object> BeginPerformSplice(
+ Handle<JSArray> object) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> args[] = {object};
+
+ return Execution::Call(
+ isolate, Handle<JSFunction>(isolate->observers_begin_perform_splice()),
+ isolate->factory()->undefined_value(), arraysize(args), args);
+}
+
+
+MUST_USE_RESULT static MaybeHandle<Object> EndPerformSplice(
+ Handle<JSArray> object) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> args[] = {object};
+
+ return Execution::Call(
+ isolate, Handle<JSFunction>(isolate->observers_end_perform_splice()),
+ isolate->factory()->undefined_value(), arraysize(args), args);
+}
+
+
+MUST_USE_RESULT static MaybeHandle<Object> EnqueueSpliceRecord(
+ Handle<JSArray> object, uint32_t index, Handle<JSArray> deleted,
+ uint32_t add_count) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> index_object = isolate->factory()->NewNumberFromUint(index);
+ Handle<Object> add_count_object =
+ isolate->factory()->NewNumberFromUint(add_count);
+
+ Handle<Object> args[] = {object, index_object, deleted, add_count_object};
+
+ return Execution::Call(
+ isolate, Handle<JSFunction>(isolate->observers_enqueue_splice()),
+ isolate->factory()->undefined_value(), arraysize(args), args);
+}
+
+
MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
Handle<Object> value,
PropertyAttributes attributes,
@@ -3416,7 +3383,7 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
return WriteToReadOnlyProperty(it, value, language_mode);
}
- if (it->state() == LookupIterator::INTEGER_INDEXED_EXOTIC) return value;
+ DCHECK_NE(LookupIterator::INTEGER_INDEXED_EXOTIC, it->state());
Handle<JSObject> receiver = it->GetStoreTarget();
@@ -3424,86 +3391,69 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
// instead. If the prototype is Null, the proxy is detached.
if (receiver->IsJSGlobalProxy()) return value;
- // Possibly migrate to the most up-to-date map that will be able to store
- // |value| under it->name() with |attributes|.
- it->PrepareTransitionToDataProperty(value, attributes, store_mode);
- if (it->state() != LookupIterator::TRANSITION) {
- if (is_sloppy(language_mode)) return value;
+ Isolate* isolate = it->isolate();
- Handle<Object> args[] = {it->name()};
- THROW_NEW_ERROR(it->isolate(),
- NewTypeError("object_not_extensible",
- HandleVector(args, arraysize(args))),
+ if (!receiver->map()->is_extensible() &&
+ (it->IsElement() || !isolate->IsInternallyUsedPropertyName(it->name()))) {
+ if (is_sloppy(language_mode)) return value;
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kObjectNotExtensible,
+ it->GetName()),
Object);
}
- it->ApplyTransitionToDataProperty();
-
- // TODO(verwaest): Encapsulate dictionary handling better.
- if (receiver->map()->is_dictionary_map()) {
- // TODO(verwaest): Probably should ensure this is done beforehand.
- it->InternalizeName();
- // TODO(dcarney): just populate TransitionPropertyCell here?
- JSObject::AddSlowProperty(receiver, it->name(), value, attributes);
- } else {
- // Write the property value.
- it->WriteDataValue(value);
- }
-
- // Send the change record if there are observers.
- if (receiver->map()->is_observed() &&
- !it->isolate()->IsInternallyUsedPropertyName(it->name())) {
- RETURN_ON_EXCEPTION(it->isolate(), JSObject::EnqueueChangeRecord(
- receiver, "add", it->name(),
- it->factory()->the_hole_value()),
- Object);
- }
-
- return value;
-}
+ if (it->IsElement()) {
+ if (receiver->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ if (JSArray::WouldChangeReadOnlyLength(array, it->index())) {
+ if (is_sloppy(language_mode)) return value;
+ return JSArray::ReadOnlyLengthError(array);
+ }
-MaybeHandle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
- Handle<JSObject> object, uint32_t index, Handle<Object> value, bool* found,
- LanguageMode language_mode) {
- Isolate* isolate = object->GetIsolate();
- for (PrototypeIterator iter(isolate, object); !iter.IsAtEnd();
- iter.Advance()) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return JSProxy::SetPropertyViaPrototypesWithHandler(
- Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), object,
- isolate->factory()->Uint32ToString(index), // name
- value, language_mode, found);
- }
- Handle<JSObject> js_proto =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ if (FLAG_trace_external_array_abuse &&
+ (array->HasExternalArrayElements() ||
+ array->HasFixedTypedArrayElements())) {
+ CheckArrayAbuse(array, "typed elements write", it->index(), true);
+ }
- if (js_proto->IsAccessCheckNeeded()) {
- if (!isolate->MayAccess(js_proto)) {
- *found = true;
- isolate->ReportFailedAccessCheck(js_proto);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return MaybeHandle<Object>();
+ if (FLAG_trace_js_array_abuse && !array->HasExternalArrayElements() &&
+ !array->HasFixedTypedArrayElements()) {
+ CheckArrayAbuse(array, "elements write", it->index(), false);
}
}
- if (!js_proto->HasDictionaryElements()) {
- continue;
+ MaybeHandle<Object> result =
+ JSObject::AddDataElement(receiver, it->index(), value, attributes);
+ JSObject::ValidateElements(receiver);
+ return result;
+ } else {
+ // Migrate to the most up-to-date map that will be able to store |value|
+ // under it->name() with |attributes|.
+ it->PrepareTransitionToDataProperty(value, attributes, store_mode);
+ DCHECK_EQ(LookupIterator::TRANSITION, it->state());
+ it->ApplyTransitionToDataProperty();
+
+ // TODO(verwaest): Encapsulate dictionary handling better.
+ if (receiver->map()->is_dictionary_map()) {
+ // TODO(verwaest): Probably should ensure this is done beforehand.
+ it->InternalizeName();
+ // TODO(dcarney): just populate TransitionPropertyCell here?
+ JSObject::AddSlowProperty(receiver, it->name(), value, attributes);
+ } else {
+ // Write the property value.
+ it->WriteDataValue(value);
}
- Handle<SeededNumberDictionary> dictionary(js_proto->element_dictionary());
- int entry = dictionary->FindEntry(index);
- if (entry != SeededNumberDictionary::kNotFound) {
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == ACCESSOR_CONSTANT) {
- *found = true;
- Handle<Object> structure(dictionary->ValueAt(entry), isolate);
- return SetElementWithCallback(object, structure, index, value, js_proto,
- language_mode);
- }
+ // Send the change record if there are observers.
+ if (receiver->map()->is_observed() &&
+ !isolate->IsInternallyUsedPropertyName(it->name())) {
+ RETURN_ON_EXCEPTION(isolate, JSObject::EnqueueChangeRecord(
+ receiver, "add", it->name(),
+ it->factory()->the_hole_value()),
+ Object);
}
}
- *found = false;
- return isolate->factory()->the_hole_value();
+
+ return value;
}
@@ -3650,51 +3600,38 @@ int AccessorInfo::AppendUnique(Handle<Object> descriptors,
}
-static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
- DCHECK(!map.is_null());
+static bool ContainsMap(MapHandleList* maps, Map* map) {
+ DCHECK_NOT_NULL(map);
for (int i = 0; i < maps->length(); ++i) {
- if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true;
+ if (!maps->at(i).is_null() && *maps->at(i) == map) return true;
}
return false;
}
-template <class T>
-static Handle<T> MaybeNull(T* p) {
- if (p == NULL) return Handle<T>::null();
- return Handle<T>(p);
-}
-
-
-Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
- ElementsKind kind = elements_kind();
- Handle<Map> transitioned_map = Handle<Map>::null();
- Handle<Map> current_map(this);
+Handle<Map> Map::FindTransitionedMap(Handle<Map> map,
+ MapHandleList* candidates) {
+ ElementsKind kind = map->elements_kind();
bool packed = IsFastPackedElementsKind(kind);
+
+ Map* transition = nullptr;
if (IsTransitionableFastElementsKind(kind)) {
- while (CanTransitionToMoreGeneralFastElementsKind(kind, false)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, false);
- Handle<Map> maybe_transitioned_map =
- MaybeNull(current_map->LookupElementsTransitionMap(kind));
- if (maybe_transitioned_map.is_null()) break;
- if (ContainsMap(candidates, maybe_transitioned_map) &&
- (packed || !IsFastPackedElementsKind(kind))) {
- transitioned_map = maybe_transitioned_map;
- if (!IsFastPackedElementsKind(kind)) packed = false;
+ for (Map* current = map->ElementsTransitionMap();
+ current != nullptr && current->has_fast_elements();
+ current = current->ElementsTransitionMap()) {
+ if (ContainsMap(candidates, current) &&
+ (packed || !IsFastPackedElementsKind(current->elements_kind()))) {
+ transition = current;
+ packed = packed && IsFastPackedElementsKind(current->elements_kind());
}
- current_map = maybe_transitioned_map;
}
}
- return transitioned_map;
+ return transition == nullptr ? Handle<Map>() : handle(transition);
}
static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
Map* current_map = map;
- int target_kind =
- IsFastElementsKind(to_kind) || IsExternalArrayElementsKind(to_kind)
- ? to_kind
- : TERMINAL_FAST_ELEMENTS_KIND;
// Support for legacy API: SetIndexedPropertiesTo{External,Pixel}Data
// allows to change elements from arbitrary kind to any ExternalArray
@@ -3710,20 +3647,14 @@ static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
}
ElementsKind kind = map->elements_kind();
- while (kind != target_kind) {
- kind = GetNextTransitionElementsKind(kind);
+ while (kind != to_kind) {
Map* next_map = current_map->ElementsTransitionMap();
- if (next_map == NULL) return current_map;
+ if (next_map == nullptr) return current_map;
+ kind = next_map->elements_kind();
current_map = next_map;
}
- Map* next_map = current_map->ElementsTransitionMap();
- if (to_kind != kind && next_map != NULL) {
- DCHECK(to_kind == DICTIONARY_ELEMENTS);
- if (next_map->elements_kind() == to_kind) return next_map;
- }
-
- DCHECK(current_map->elements_kind() == target_kind);
+ DCHECK_EQ(to_kind, current_map->elements_kind());
return current_map;
}
@@ -3731,7 +3662,7 @@ static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
Map* Map::LookupElementsTransitionMap(ElementsKind to_kind) {
Map* to_map = FindClosestElementsTransition(this, to_kind);
if (to_map->elements_kind() == to_kind) return to_map;
- return NULL;
+ return nullptr;
}
@@ -3772,9 +3703,11 @@ static Handle<Map> AddMissingElementsTransitions(Handle<Map> map,
flag = OMIT_TRANSITION;
} else {
flag = INSERT_TRANSITION;
- while (kind != to_kind && !IsTerminalElementsKind(kind)) {
- kind = GetNextTransitionElementsKind(kind);
- current_map = Map::CopyAsElementsKind(current_map, kind, flag);
+ if (IsFastElementsKind(kind)) {
+ while (kind != to_kind && !IsTerminalElementsKind(kind)) {
+ kind = GetNextTransitionElementsKind(kind);
+ current_map = Map::CopyAsElementsKind(current_map, kind, flag);
+ }
}
}
@@ -3796,40 +3729,38 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
Isolate* isolate = map->GetIsolate();
Context* native_context = isolate->context()->native_context();
- Object* maybe_array_maps = native_context->js_array_maps();
- if (maybe_array_maps->IsFixedArray()) {
- DisallowHeapAllocation no_gc;
- FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
- if (array_maps->get(from_kind) == *map) {
- Object* maybe_transitioned_map = array_maps->get(to_kind);
- if (maybe_transitioned_map->IsMap()) {
- return handle(Map::cast(maybe_transitioned_map));
+ if (from_kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
+ if (*map == native_context->fast_aliased_arguments_map()) {
+ DCHECK_EQ(SLOW_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
+ return handle(native_context->slow_aliased_arguments_map());
+ }
+ } else if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
+ if (*map == native_context->slow_aliased_arguments_map()) {
+ DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
+ return handle(native_context->fast_aliased_arguments_map());
+ }
+ } else {
+ Object* maybe_array_maps = map->is_strong()
+ ? native_context->js_array_strong_maps()
+ : native_context->js_array_maps();
+ if (maybe_array_maps->IsFixedArray()) {
+ DisallowHeapAllocation no_gc;
+ FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
+ if (array_maps->get(from_kind) == *map) {
+ Object* maybe_transitioned_map = array_maps->get(to_kind);
+ if (maybe_transitioned_map->IsMap()) {
+ return handle(Map::cast(maybe_transitioned_map));
+ }
}
}
}
- return TransitionElementsToSlow(map, to_kind);
-}
-
-
-Handle<Map> Map::TransitionElementsToSlow(Handle<Map> map,
- ElementsKind to_kind) {
- ElementsKind from_kind = map->elements_kind();
-
- if (from_kind == to_kind) {
- return map;
- }
-
- bool allow_store_transition =
- // Only remember the map transition if there is not an already existing
- // non-matching element transition.
- !map->IsUndefined() && !map->is_dictionary_map() &&
- IsTransitionElementsKind(from_kind);
-
+ DCHECK(!map->IsUndefined());
+ bool allow_store_transition = IsTransitionElementsKind(from_kind);
// Only store fast element maps in ascending generality.
if (IsFastElementsKind(to_kind)) {
- allow_store_transition &=
- IsTransitionableFastElementsKind(from_kind) &&
+ allow_store_transition =
+ allow_store_transition && IsTransitionableFastElementsKind(from_kind) &&
IsMoreGeneralElementsKindTransition(from_kind, to_kind);
}
@@ -3988,10 +3919,9 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
}
if (is_sloppy(language_mode)) return value;
- Handle<Object> args2[] = { name, proxy };
- THROW_NEW_ERROR(isolate, NewTypeError("no_setter_in_callback",
- HandleVector(args2, arraysize(args2))),
- Object);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kNoSetterInCallback, name, proxy),
+ Object);
}
@@ -4016,25 +3946,15 @@ MaybeHandle<Object> JSProxy::DeletePropertyWithHandler(
bool result_bool = result->BooleanValue();
if (is_strict(language_mode) && !result_bool) {
Handle<Object> handler(proxy->handler(), isolate);
- Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("delete"));
- Handle<Object> args[] = { handler, trap_name };
- THROW_NEW_ERROR(isolate, NewTypeError("handler_failed",
- HandleVector(args, arraysize(args))),
- Object);
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyHandlerDeleteFailed, handler),
+ Object);
}
return isolate->factory()->ToBoolean(result_bool);
}
-MaybeHandle<Object> JSProxy::DeleteElementWithHandler(
- Handle<JSProxy> proxy, uint32_t index, LanguageMode language_mode) {
- Isolate* isolate = proxy->GetIsolate();
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return JSProxy::DeletePropertyWithHandler(proxy, name, language_mode);
-}
-
-
Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name) {
Isolate* isolate = proxy->GetIsolate();
@@ -4107,14 +4027,6 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
}
-Maybe<PropertyAttributes> JSProxy::GetElementAttributeWithHandler(
- Handle<JSProxy> proxy, Handle<JSReceiver> receiver, uint32_t index) {
- Isolate* isolate = proxy->GetIsolate();
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return GetPropertyAttributesWithHandler(proxy, receiver, name);
-}
-
-
void JSProxy::Fix(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
@@ -4235,62 +4147,77 @@ void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
}
+// static
+void ExecutableAccessorInfo::ClearSetter(Handle<ExecutableAccessorInfo> info) {
+ Handle<Object> object = v8::FromCData(info->GetIsolate(), nullptr);
+ info->set_setter(*object);
+}
+
+
// Reconfigures a property to a data property with attributes, even if it is not
// reconfigurable.
-MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
- Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
+// Requires a LookupIterator that does not look at the prototype chain beyond
+// hidden prototypes.
+MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ExecutableAccessorInfoHandling handling) {
- DCHECK(!value->IsTheHole());
- LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
bool is_observed = object->map()->is_observed() &&
- !it.isolate()->IsInternallyUsedPropertyName(name);
- for (; it.IsFound(); it.Next()) {
- switch (it.state()) {
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return value;
+ (it->IsElement() ||
+ !it->isolate()->IsInternallyUsedPropertyName(it->name()));
- case LookupIterator::INTERCEPTOR:
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
case LookupIterator::JSPROXY:
case LookupIterator::NOT_FOUND:
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::ACCESS_CHECK:
- if (!it.isolate()->MayAccess(object)) {
- return SetPropertyWithFailedAccessCheck(&it, value, SLOPPY);
+ if (!it->HasAccess()) {
+ return SetPropertyWithFailedAccessCheck(it, value);
}
break;
- case LookupIterator::ACCESSOR: {
- PropertyDetails details = it.property_details();
- // Ensure the context isn't changed after calling into accessors.
- AssertNoContextChange ncc(it.isolate());
+ // If there's an interceptor, try to store the property with the
+ // interceptor.
+ // In case of success, the attributes will have been reset to the default
+ // attributes of the interceptor, rather than the incoming attributes.
+ //
+ // TODO(verwaest): JSProxy afterwards verify the attributes that the
+ // JSProxy claims it has, and verifies that they are compatible. If not,
+ // they throw. Here we should do the same.
+ case LookupIterator::INTERCEPTOR:
+ if (handling == DONT_FORCE_FIELD) {
+ MaybeHandle<Object> maybe_result =
+ JSObject::SetPropertyWithInterceptor(it, value);
+ if (!maybe_result.is_null()) return maybe_result;
+ if (it->isolate()->has_pending_exception()) return maybe_result;
+ }
+ break;
- Handle<Object> accessors = it.GetAccessors();
+ case LookupIterator::ACCESSOR: {
+ Handle<Object> accessors = it->GetAccessors();
// Special handling for ExecutableAccessorInfo, which behaves like a
// data property.
- if (handling == DONT_FORCE_FIELD &&
- accessors->IsExecutableAccessorInfo()) {
+ if (accessors->IsExecutableAccessorInfo() &&
+ handling == DONT_FORCE_FIELD) {
+ PropertyDetails details = it->property_details();
+ // Ensure the context isn't changed after calling into accessors.
+ AssertNoContextChange ncc(it->isolate());
+
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- it.isolate(), result,
- JSObject::SetPropertyWithAccessor(it.GetReceiver(), it.name(),
- value, it.GetHolder<JSObject>(),
- accessors, STRICT),
- Object);
+ it->isolate(), result,
+ JSObject::SetPropertyWithAccessor(it, value, STRICT), Object);
DCHECK(result->SameValue(*value));
- if (details.attributes() == attributes) {
- return value;
- }
+ if (details.attributes() == attributes) return value;
// Reconfigure the accessor if attributes mismatch.
Handle<ExecutableAccessorInfo> new_data = Accessors::CloneAccessor(
- it.isolate(), Handle<ExecutableAccessorInfo>::cast(accessors));
+ it->isolate(), Handle<ExecutableAccessorInfo>::cast(accessors));
new_data->set_property_attributes(attributes);
// By clearing the setter we don't have to introduce a lookup to
// the setter, simply make it unavailable to reflect the
@@ -4298,99 +4225,183 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
if (attributes & READ_ONLY) {
ExecutableAccessorInfo::ClearSetter(new_data);
}
- SetPropertyCallback(object, name, new_data, attributes);
- if (is_observed) {
- RETURN_ON_EXCEPTION(
- it.isolate(),
- EnqueueChangeRecord(object, "reconfigure", name,
- it.isolate()->factory()->the_hole_value()),
- Object);
+
+ if (it->IsElement()) {
+ SetElementCallback(it->GetHolder<JSObject>(), it->index(), new_data,
+ attributes);
+ } else {
+ SetPropertyCallback(it->GetHolder<JSObject>(), it->name(), new_data,
+ attributes);
}
- return value;
+ } else {
+ it->ReconfigureDataProperty(value, attributes);
+ it->WriteDataValue(value);
}
- it.ReconfigureDataProperty(value, attributes);
- it.WriteDataValue(value);
-
if (is_observed) {
RETURN_ON_EXCEPTION(
- it.isolate(),
- EnqueueChangeRecord(object, "reconfigure", name,
- it.isolate()->factory()->the_hole_value()),
+ it->isolate(),
+ EnqueueChangeRecord(object, "reconfigure", it->GetName(),
+ it->factory()->the_hole_value()),
Object);
}
return value;
}
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
+ value, STRICT);
case LookupIterator::DATA: {
- PropertyDetails details = it.property_details();
- Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
+ PropertyDetails details = it->property_details();
+ Handle<Object> old_value = it->factory()->the_hole_value();
// Regular property update if the attributes match.
if (details.attributes() == attributes) {
- return SetDataProperty(&it, value);
+ return SetDataProperty(it, value);
+ }
+
+ // Special case: properties of typed arrays cannot be reconfigured to
+ // non-writable nor to non-enumerable.
+ if (it->IsElement() && (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements())) {
+ return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
+ value, STRICT);
}
+
// Reconfigure the data property if the attributes mismatch.
- if (is_observed) old_value = it.GetDataValue();
+ if (is_observed) old_value = it->GetDataValue();
- it.ReconfigureDataProperty(value, attributes);
- it.WriteDataValue(value);
+ it->ReconfigureDataProperty(value, attributes);
+ it->WriteDataValue(value);
if (is_observed) {
if (old_value->SameValue(*value)) {
- old_value = it.isolate()->factory()->the_hole_value();
+ old_value = it->factory()->the_hole_value();
}
- RETURN_ON_EXCEPTION(
- it.isolate(),
- EnqueueChangeRecord(object, "reconfigure", name, old_value),
- Object);
+ RETURN_ON_EXCEPTION(it->isolate(),
+ EnqueueChangeRecord(object, "reconfigure",
+ it->GetName(), old_value),
+ Object);
}
-
return value;
}
}
}
- return AddDataProperty(&it, value, attributes, STRICT,
+ return AddDataProperty(it, value, attributes, STRICT,
CERTAINLY_NOT_STORE_FROM_KEYED);
}
-Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
- Handle<JSObject> holder,
- Handle<Object> receiver,
- Handle<Name> name) {
- Isolate* isolate = holder->GetIsolate();
- HandleScope scope(isolate);
+MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
+ Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes, ExecutableAccessorInfoHandling handling) {
+ DCHECK(!value->IsTheHole());
+ LookupIterator it(object, name, LookupIterator::OWN);
+ return DefineOwnPropertyIgnoreAttributes(&it, value, attributes, handling);
+}
+
+
+MaybeHandle<Object> JSObject::SetOwnElementIgnoreAttributes(
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, ExecutableAccessorInfoHandling handling) {
+ Isolate* isolate = object->GetIsolate();
+ LookupIterator it(isolate, object, index, LookupIterator::OWN);
+ return DefineOwnPropertyIgnoreAttributes(&it, value, attributes, handling);
+}
+
+
+MaybeHandle<Object> JSObject::DefinePropertyOrElementIgnoreAttributes(
+ Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes, ExecutableAccessorInfoHandling handling) {
+ Isolate* isolate = object->GetIsolate();
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
+ LookupIterator::OWN);
+ return DefineOwnPropertyIgnoreAttributes(&it, value, attributes, handling);
+}
+
+
+Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
+ Handle<Object> value) {
+ DCHECK(it->GetReceiver()->IsJSObject());
+ Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(it);
+ if (maybe.IsNothing()) return Nothing<bool>();
+
+ if (it->IsFound()) {
+ if (!it->IsConfigurable()) return Just(false);
+ } else {
+ if (!JSObject::cast(*it->GetReceiver())->IsExtensible()) return Just(false);
+ }
+
+ RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(),
+ DefineOwnPropertyIgnoreAttributes(it, value, NONE, DONT_FORCE_FIELD),
+ Nothing<bool>());
+ return Just(true);
+}
+
+
+Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
+ LookupIterator* it) {
+ Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
+ HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
- if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ Handle<InterceptorInfo> interceptor(it->GetInterceptor());
+ if (!it->IsElement() && it->name()->IsSymbol() &&
+ !interceptor->can_intercept_symbols()) {
return Just(ABSENT);
}
- PropertyCallbackArguments args(
- isolate, interceptor->data(), *receiver, *holder);
+ PropertyCallbackArguments args(isolate, interceptor->data(),
+ *it->GetReceiver(), *holder);
if (!interceptor->query()->IsUndefined()) {
- v8::GenericNamedPropertyQueryCallback query =
- v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
- interceptor->query());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-has", *holder, *name));
- v8::Handle<v8::Integer> result = args.Call(query, v8::Utils::ToLocal(name));
+ v8::Local<v8::Integer> result;
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertyQueryCallback query =
+ v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-has", *holder, index));
+ result = args.Call(query, index);
+ } else {
+ Handle<Name> name = it->name();
+ v8::GenericNamedPropertyQueryCallback query =
+ v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
+ interceptor->query());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-has", *holder, *name));
+ result = args.Call(query, v8::Utils::ToLocal(name));
+ }
if (!result.IsEmpty()) {
DCHECK(result->IsInt32());
- return Just(static_cast<PropertyAttributes>(result->Int32Value()));
+ return Just(static_cast<PropertyAttributes>(
+ result->Int32Value(reinterpret_cast<v8::Isolate*>(isolate)
+ ->GetCurrentContext()).FromJust()));
}
} else if (!interceptor->getter()->IsUndefined()) {
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get-has", *holder, *name));
- v8::Handle<v8::Value> result = args.Call(getter, v8::Utils::ToLocal(name));
+ // TODO(verwaest): Use GetPropertyWithInterceptor?
+ v8::Local<v8::Value> result;
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
+ LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-get-has",
+ *holder, index));
+ result = args.Call(getter, index);
+ } else {
+ Handle<Name> name = it->name();
+
+ v8::GenericNamedPropertyGetterCallback getter =
+ v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+ interceptor->getter());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-get-has", *holder, *name));
+ result = args.Call(getter, v8::Utils::ToLocal(name));
+ }
if (!result.IsEmpty()) return Just(DONT_ENUM);
}
@@ -4399,18 +4410,6 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
}
-Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
- Handle<JSReceiver> object, Handle<Name> name) {
- // Check whether the name is an array index.
- uint32_t index = 0;
- if (object->IsJSObject() && name->AsArrayIndex(&index)) {
- return GetOwnElementAttribute(object, index);
- }
- LookupIterator it(object, name, LookupIterator::HIDDEN);
- return GetPropertyAttributes(&it);
-}
-
-
Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
@@ -4420,11 +4419,10 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
UNREACHABLE();
case LookupIterator::JSPROXY:
return JSProxy::GetPropertyAttributesWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->name());
+ it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName());
case LookupIterator::INTERCEPTOR: {
Maybe<PropertyAttributes> result =
- JSObject::GetPropertyAttributesWithInterceptor(
- it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
+ JSObject::GetPropertyAttributesWithInterceptor(it);
if (!result.IsJust()) return result;
if (result.FromJust() != ABSENT) return result;
break;
@@ -4443,118 +4441,6 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
}
-Maybe<PropertyAttributes> JSObject::GetElementAttributeWithReceiver(
- Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index,
- bool check_prototype) {
- Isolate* isolate = object->GetIsolate();
-
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayAccess(object)) {
- return GetElementAttributesWithFailedAccessCheck(isolate, object,
- receiver, index);
- }
- }
-
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return Just(ABSENT);
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return JSObject::GetElementAttributeWithReceiver(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
- index, check_prototype);
- }
-
- // Check for lookup interceptor except when bootstrapping.
- if (object->HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
- return JSObject::GetElementAttributeWithInterceptor(
- object, receiver, index, check_prototype);
- }
-
- return GetElementAttributeWithoutInterceptor(
- object, receiver, index, check_prototype);
-}
-
-
-Maybe<PropertyAttributes> JSObject::GetElementAttributeWithInterceptor(
- Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index,
- bool check_prototype) {
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
-
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- Maybe<PropertyAttributes> from_interceptor =
- GetElementAttributeFromInterceptor(object, receiver, index);
- if (!from_interceptor.IsJust()) return Nothing<PropertyAttributes>();
- if (from_interceptor.FromJust() != ABSENT)
- return Just(from_interceptor.FromJust());
-
- return GetElementAttributeWithoutInterceptor(object, receiver, index,
- check_prototype);
-}
-
-
-Maybe<PropertyAttributes> JSObject::GetElementAttributeFromInterceptor(
- Handle<JSObject> object, Handle<Object> receiver, uint32_t index) {
- Isolate* isolate = object->GetIsolate();
- AssertNoContextChange ncc(isolate);
-
- Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- PropertyCallbackArguments args(
- isolate, interceptor->data(), *receiver, *object);
- if (!interceptor->query()->IsUndefined()) {
- v8::IndexedPropertyQueryCallback query =
- v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", *object, index));
- v8::Handle<v8::Integer> result = args.Call(query, index);
- if (!result.IsEmpty())
- return Just(static_cast<PropertyAttributes>(result->Int32Value()));
- } else if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess(
- "interceptor-indexed-get-has", *object, index));
- v8::Handle<v8::Value> result = args.Call(getter, index);
- if (!result.IsEmpty()) return Just(NONE);
- }
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
- return Just(ABSENT);
-}
-
-
-Maybe<PropertyAttributes> JSObject::GetElementAttributeWithoutInterceptor(
- Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index,
- bool check_prototype) {
- PropertyAttributes attr =
- object->GetElementsAccessor()->GetAttributes(object, index);
- if (attr != ABSENT) return Just(attr);
-
- // Handle [] on String objects.
- if (object->IsStringObjectWithCharacterAt(index)) {
- return Just(static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE));
- }
-
- if (!check_prototype) return Just(ABSENT);
-
- PrototypeIterator iter(object->GetIsolate(), object);
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- // We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::GetElementAttributeWithHandler(
- Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
- index);
- }
- if (iter.IsAtEnd()) return Just(ABSENT);
- return GetElementAttributeWithReceiver(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
- index, true);
-}
-
-
Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) {
Handle<FixedArray> array(
isolate->factory()->NewFixedArray(kEntries, TENURED));
@@ -4773,27 +4659,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Handle<Map> new_map = Map::CopyDropDescriptors(old_map);
new_map->set_dictionary_map(false);
- if (old_map->is_prototype_map() && FLAG_track_prototype_users) {
- DCHECK(new_map->is_prototype_map());
-
- Object* maybe_old_prototype = old_map->prototype();
- if (maybe_old_prototype->IsJSObject()) {
- Handle<JSObject> old_prototype(JSObject::cast(maybe_old_prototype));
- bool was_registered =
- JSObject::UnregisterPrototypeUser(old_prototype, old_map);
- if (was_registered) {
- JSObject::LazyRegisterPrototypeUser(new_map, isolate);
- }
- }
- new_map->set_prototype_info(old_map->prototype_info());
- old_map->set_prototype_info(Smi::FromInt(0));
- if (FLAG_trace_prototype_users) {
- PrintF("Moving prototype_info %p from map %p to map %p.\n",
- reinterpret_cast<void*>(new_map->prototype_info()),
- reinterpret_cast<void*>(*old_map),
- reinterpret_cast<void*>(*new_map));
- }
- }
+ UpdatePrototypeUserRegistration(old_map, new_map, isolate);
#if TRACE_MAPS
if (FLAG_trace_maps) {
@@ -4979,15 +4845,15 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
dictionary = CopyFastElementsToDictionary(array, length, dictionary);
// Switch to using the dictionary as the backing storage for elements.
+ ElementsKind target_kind =
+ is_arguments ? SLOW_SLOPPY_ARGUMENTS_ELEMENTS : DICTIONARY_ELEMENTS;
+ Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, target_kind);
+ // Set the new map first to satify the elements type assert in set_elements().
+ JSObject::MigrateToMap(object, new_map);
+
if (is_arguments) {
FixedArray::cast(object->elements())->set(1, *dictionary);
} else {
- // Set the new map first to satify the elements type assert in
- // set_elements().
- Handle<Map> new_map =
- JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
-
- JSObject::MigrateToMap(object, new_map);
object->set_elements(*dictionary);
}
@@ -5001,8 +4867,7 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
}
#endif
- DCHECK(object->HasDictionaryElements() ||
- object->HasDictionaryArgumentsElements());
+ DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
return dictionary;
}
@@ -5025,7 +4890,8 @@ static Smi* GenerateIdentityHash(Isolate* isolate) {
void JSObject::SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash) {
DCHECK(!object->IsJSGlobalProxy());
Isolate* isolate = object->GetIsolate();
- SetHiddenProperty(object, isolate->factory()->identity_hash_string(), hash);
+ Handle<Name> hash_code_symbol(isolate->heap()->hash_code_symbol());
+ JSObject::AddProperty(object, hash_code_symbol, hash, NONE);
}
@@ -5048,11 +4914,12 @@ Object* JSObject::GetIdentityHash() {
if (IsJSGlobalProxy()) {
return JSGlobalProxy::cast(this)->hash();
}
- Object* stored_value =
- GetHiddenProperty(isolate->factory()->identity_hash_string());
- return stored_value->IsSmi()
- ? stored_value
- : isolate->heap()->undefined_value();
+ Handle<Name> hash_code_symbol(isolate->heap()->hash_code_symbol());
+ Handle<Object> stored_value =
+ Object::GetPropertyOrElement(Handle<Object>(this, isolate),
+ hash_code_symbol).ToHandleChecked();
+ return stored_value->IsSmi() ? *stored_value
+ : isolate->heap()->undefined_value();
}
@@ -5067,7 +4934,8 @@ Handle<Smi> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
Handle<Smi> hash(GenerateIdentityHash(isolate), isolate);
- SetHiddenProperty(object, isolate->factory()->identity_hash_string(), hash);
+ Handle<Name> hash_code_symbol(isolate->heap()->hash_code_symbol());
+ JSObject::AddProperty(object, hash_code_symbol, hash, NONE);
return hash;
}
@@ -5086,8 +4954,6 @@ Object* JSObject::GetHiddenProperty(Handle<Name> key) {
DisallowHeapAllocation no_gc;
DCHECK(key->IsUniqueName());
if (IsJSGlobalProxy()) {
- // JSGlobalProxies store their hash internally.
- DCHECK(*key != GetHeap()->identity_hash_string());
// For a proxy, use the prototype as target object.
PrototypeIterator iter(GetIsolate(), this);
// If the proxy is detached, return undefined.
@@ -5098,15 +4964,6 @@ Object* JSObject::GetHiddenProperty(Handle<Name> key) {
DCHECK(!IsJSGlobalProxy());
Object* inline_value = GetHiddenPropertiesHashTable();
- if (inline_value->IsSmi()) {
- // Handle inline-stored identity hash.
- if (*key == GetHeap()->identity_hash_string()) {
- return inline_value;
- } else {
- return GetHeap()->the_hole_value();
- }
- }
-
if (inline_value->IsUndefined()) return GetHeap()->the_hole_value();
ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
@@ -5122,8 +4979,6 @@ Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> object,
DCHECK(key->IsUniqueName());
if (object->IsJSGlobalProxy()) {
- // JSGlobalProxies store their hash internally.
- DCHECK(*key != *isolate->factory()->identity_hash_string());
// For a proxy, use the prototype as target object.
PrototypeIterator iter(isolate, object);
// If the proxy is detached, return undefined.
@@ -5137,13 +4992,6 @@ Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> object,
Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
- // If there is no backing store yet, store the identity hash inline.
- if (value->IsSmi() &&
- *key == *isolate->factory()->identity_hash_string() &&
- (inline_value->IsUndefined() || inline_value->IsSmi())) {
- return JSObject::SetHiddenPropertiesHashTable(object, value);
- }
-
Handle<ObjectHashTable> hashtable =
GetOrCreateHiddenPropertiesHashtable(object);
@@ -5175,9 +5023,7 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
Object* inline_value = object->GetHiddenPropertiesHashTable();
- // We never delete (inline-stored) identity hashes.
- DCHECK(*key != *isolate->factory()->identity_hash_string());
- if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
+ if (inline_value->IsUndefined()) return;
Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
bool was_present = false;
@@ -5241,14 +5087,7 @@ Handle<ObjectHashTable> JSObject::GetOrCreateHiddenPropertiesHashtable(
Handle<ObjectHashTable> hashtable = ObjectHashTable::New(
isolate, kInitialCapacity, USE_CUSTOM_MINIMUM_CAPACITY);
- if (inline_value->IsSmi()) {
- // We were storing the identity hash inline and now allocated an actual
- // dictionary. Put the identity hash into the new dictionary.
- hashtable = ObjectHashTable::Put(hashtable,
- isolate->factory()->identity_hash_string(),
- inline_value);
- }
-
+ DCHECK(inline_value->IsUndefined());
SetHiddenPropertiesHashTable(object, hashtable);
return hashtable;
}
@@ -5265,23 +5104,40 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
- Handle<JSObject> holder, Handle<JSObject> receiver, Handle<Name> name) {
- Isolate* isolate = holder->GetIsolate();
+ LookupIterator* it) {
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ Handle<InterceptorInfo> interceptor(it->GetInterceptor());
+ if (interceptor->deleter()->IsUndefined()) return MaybeHandle<Object>();
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
- Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
- if (interceptor->deleter()->IsUndefined() ||
- (name->IsSymbol() && !interceptor->can_intercept_symbols())) {
+ PropertyCallbackArguments args(isolate, interceptor->data(),
+ *it->GetReceiver(), *holder);
+ v8::Local<v8::Boolean> result;
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-delete", *holder, index));
+ result = args.Call(deleter, index);
+ } else if (it->name()->IsSymbol() && !interceptor->can_intercept_symbols()) {
return MaybeHandle<Object>();
+ } else {
+ Handle<Name> name = it->name();
+ v8::GenericNamedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::GenericNamedPropertyDeleterCallback>(
+ interceptor->deleter());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-delete", *holder, *name));
+ result = args.Call(deleter, v8::Utils::ToLocal(name));
}
- v8::GenericNamedPropertyDeleterCallback deleter =
- v8::ToCData<v8::GenericNamedPropertyDeleterCallback>(
- interceptor->deleter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-delete", *holder, *name));
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder);
- v8::Handle<v8::Boolean> result = args.Call(deleter, v8::Utils::ToLocal(name));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.IsEmpty()) return MaybeHandle<Object>();
@@ -5293,251 +5149,139 @@ MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
}
-MaybeHandle<Object> JSObject::DeleteElementWithInterceptor(
- Handle<JSObject> object,
- uint32_t index) {
- Isolate* isolate = object->GetIsolate();
- Factory* factory = isolate->factory();
-
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- if (interceptor->deleter()->IsUndefined()) return factory->false_value();
- v8::IndexedPropertyDeleterCallback deleter =
- v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-delete", *object, index));
- PropertyCallbackArguments args(
- isolate, interceptor->data(), *object, *object);
- v8::Handle<v8::Boolean> result = args.Call(deleter, index);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (!result.IsEmpty()) {
- DCHECK(result->IsBoolean());
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- // Rebox CustomArguments::kReturnValueOffset before returning.
- return handle(*result_internal, isolate);
- }
- // TODO(verwaest): Shouldn't this be the mode that was passed in?
- MaybeHandle<Object> delete_result =
- object->GetElementsAccessor()->Delete(object, index, SLOPPY);
- return delete_result;
-}
-
-
-MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
- uint32_t index,
- LanguageMode language_mode) {
- Isolate* isolate = object->GetIsolate();
- Factory* factory = isolate->factory();
-
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return factory->false_value();
- }
-
- if (object->IsStringObjectWithCharacterAt(index)) {
- if (is_strict(language_mode)) {
- // Deleting a non-configurable property in strict mode.
- Handle<Object> name = factory->NewNumberFromUint(index);
- Handle<Object> args[] = {name, object};
- THROW_NEW_ERROR(isolate,
- NewTypeError("strict_delete_property",
- HandleVector(args, arraysize(args))),
- Object);
- }
- return factory->false_value();
- }
-
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return factory->false_value();
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return DeleteElement(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index,
- language_mode);
- }
-
- Handle<Object> old_value;
- bool should_enqueue_change_record = false;
- if (object->map()->is_observed()) {
- Maybe<bool> maybe = HasOwnElement(object, index);
- if (!maybe.IsJust()) return MaybeHandle<Object>();
- should_enqueue_change_record = maybe.FromJust();
- if (should_enqueue_change_record) {
- if (!GetOwnElementAccessorPair(object, index).is_null()) {
- old_value = Handle<Object>::cast(factory->the_hole_value());
- } else {
- old_value = Object::GetElement(
- isolate, object, index).ToHandleChecked();
- }
- }
- }
-
- // Skip interceptor if forcing deletion.
- MaybeHandle<Object> maybe_result;
- if (object->HasIndexedInterceptor()) {
- maybe_result = DeleteElementWithInterceptor(object, index);
- } else {
- maybe_result =
- object->GetElementsAccessor()->Delete(object, index, language_mode);
- }
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result, maybe_result, Object);
-
- if (should_enqueue_change_record) {
- Maybe<bool> maybe = HasOwnElement(object, index);
- if (!maybe.IsJust()) return MaybeHandle<Object>();
- if (!maybe.FromJust()) {
- Handle<String> name = factory->Uint32ToString(index);
- RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, "delete", name, old_value),
- Object);
- }
- }
-
- return result;
-}
-
-
void JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
- Handle<Name> name) {
+ Handle<Name> name, int entry) {
DCHECK(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
- Handle<NameDictionary> dictionary(object->property_dictionary());
- int entry = dictionary->FindEntry(name);
- DCHECK_NE(NameDictionary::kNotFound, entry);
- // If we have a global object, invalidate the cell and swap in a new one.
if (object->IsGlobalObject()) {
+ // If we have a global object, invalidate the cell and swap in a new one.
+ Handle<GlobalDictionary> dictionary(object->global_dictionary());
+ DCHECK_NE(GlobalDictionary::kNotFound, entry);
+
auto cell = PropertyCell::InvalidateEntry(dictionary, entry);
cell->set_value(isolate->heap()->the_hole_value());
- // TODO(dcarney): InvalidateForDelete
- dictionary->DetailsAtPut(entry, dictionary->DetailsAt(entry).set_cell_type(
- PropertyCellType::kInvalidated));
- return;
- }
+ // TODO(ishell): InvalidateForDelete
+ cell->set_property_details(
+ cell->property_details().set_cell_type(PropertyCellType::kInvalidated));
+ } else {
+ Handle<NameDictionary> dictionary(object->property_dictionary());
+ DCHECK_NE(NameDictionary::kNotFound, entry);
- NameDictionary::DeleteProperty(dictionary, entry);
- Handle<NameDictionary> new_properties =
- NameDictionary::Shrink(dictionary, name);
- object->set_properties(*new_properties);
+ NameDictionary::DeleteProperty(dictionary, entry);
+ Handle<NameDictionary> new_properties =
+ NameDictionary::Shrink(dictionary, name);
+ object->set_properties(*new_properties);
+ }
}
-MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
- Handle<Name> name,
- LanguageMode language_mode) {
- // ECMA-262, 3rd, 8.6.2.5
- DCHECK(name->IsName());
-
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- return DeleteElement(object, index, language_mode);
+// ECMA-262, 3rd, 8.6.2.5
+MaybeHandle<Object> JSReceiver::DeleteProperty(LookupIterator* it,
+ LanguageMode language_mode) {
+ Isolate* isolate = it->isolate();
+ if (it->state() == LookupIterator::JSPROXY) {
+ return JSProxy::DeletePropertyWithHandler(it->GetHolder<JSProxy>(),
+ it->GetName(), language_mode);
}
- LookupIterator it(object, name, LookupIterator::HIDDEN);
+ Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
- bool is_observed = object->map()->is_observed() &&
- !it.isolate()->IsInternallyUsedPropertyName(name);
- Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
+ bool is_observed =
+ receiver->map()->is_observed() &&
+ (it->IsElement() || !isolate->IsInternallyUsedPropertyName(it->name()));
- for (; it.IsFound(); it.Next()) {
- switch (it.state()) {
+ Handle<Object> old_value = it->factory()->the_hole_value();
+
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
case LookupIterator::JSPROXY:
case LookupIterator::NOT_FOUND:
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::ACCESS_CHECK:
- if (it.HasAccess()) break;
- it.isolate()->ReportFailedAccessCheck(it.GetHolder<JSObject>());
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it.isolate(), Object);
- return it.isolate()->factory()->false_value();
+ if (it->HasAccess()) break;
+ isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return it->factory()->false_value();
case LookupIterator::INTERCEPTOR: {
MaybeHandle<Object> maybe_result =
- JSObject::DeletePropertyWithInterceptor(it.GetHolder<JSObject>(),
- object, it.name());
+ JSObject::DeletePropertyWithInterceptor(it);
// Delete with interceptor succeeded. Return result.
if (!maybe_result.is_null()) return maybe_result;
// An exception was thrown in the interceptor. Propagate.
- if (it.isolate()->has_pending_exception()) return maybe_result;
+ if (isolate->has_pending_exception()) return maybe_result;
break;
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return it.isolate()->factory()->true_value();
+ return it->factory()->true_value();
case LookupIterator::DATA:
if (is_observed) {
- old_value = it.GetDataValue();
+ old_value = it->GetDataValue();
}
// Fall through.
case LookupIterator::ACCESSOR: {
- if (!it.IsConfigurable()) {
- // Fail if the property is not configurable.
+ if (!it->IsConfigurable() || receiver->map()->is_strong()) {
+ // Fail if the property is not configurable, or on a strong object.
if (is_strict(language_mode)) {
- Handle<Object> args[] = {name, object};
- THROW_NEW_ERROR(it.isolate(),
- NewTypeError("strict_delete_property",
- HandleVector(args, arraysize(args))),
- Object);
+ MessageTemplate::Template templ =
+ receiver->map()->is_strong()
+ ? MessageTemplate::kStrongDeleteProperty
+ : MessageTemplate::kStrictDeleteProperty;
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(templ, it->GetName(), receiver), Object);
}
- return it.isolate()->factory()->false_value();
+ return it->factory()->false_value();
}
- PropertyNormalizationMode mode = object->map()->is_prototype_map()
- ? KEEP_INOBJECT_PROPERTIES
- : CLEAR_INOBJECT_PROPERTIES;
- Handle<JSObject> holder = it.GetHolder<JSObject>();
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
// TODO(verwaest): Remove this temporary compatibility hack when blink
// tests are updated.
- if (!holder.is_identical_to(object) &&
- !(object->IsJSGlobalProxy() && holder->IsJSGlobalObject())) {
- return it.isolate()->factory()->true_value();
+ if (!holder.is_identical_to(receiver) &&
+ !(receiver->IsJSGlobalProxy() && holder->IsJSGlobalObject())) {
+ return it->factory()->true_value();
}
- NormalizeProperties(holder, mode, 0, "DeletingProperty");
- DeleteNormalizedProperty(holder, name);
- ReoptimizeIfPrototype(holder);
+ it->Delete();
if (is_observed) {
- RETURN_ON_EXCEPTION(
- it.isolate(),
- EnqueueChangeRecord(object, "delete", name, old_value), Object);
+ RETURN_ON_EXCEPTION(isolate,
+ JSObject::EnqueueChangeRecord(
+ receiver, "delete", it->GetName(), old_value),
+ Object);
}
- return it.isolate()->factory()->true_value();
+ return it->factory()->true_value();
}
}
}
- return it.isolate()->factory()->true_value();
+ return it->factory()->true_value();
}
MaybeHandle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object,
uint32_t index,
LanguageMode language_mode) {
- if (object->IsJSProxy()) {
- return JSProxy::DeleteElementWithHandler(Handle<JSProxy>::cast(object),
- index, language_mode);
- }
- return JSObject::DeleteElement(Handle<JSObject>::cast(object), index,
- language_mode);
+ LookupIterator it(object->GetIsolate(), object, index,
+ LookupIterator::HIDDEN);
+ return DeleteProperty(&it, language_mode);
}
MaybeHandle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
Handle<Name> name,
LanguageMode language_mode) {
- if (object->IsJSProxy()) {
- return JSProxy::DeletePropertyWithHandler(Handle<JSProxy>::cast(object),
- name, language_mode);
- }
- return JSObject::DeleteProperty(Handle<JSObject>::cast(object), name,
- language_mode);
+ LookupIterator it(object, name, LookupIterator::HIDDEN);
+ return JSObject::DeleteProperty(&it, language_mode);
+}
+
+
+MaybeHandle<Object> JSReceiver::DeletePropertyOrElement(
+ Handle<JSReceiver> object, Handle<Name> name, LanguageMode language_mode) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ name->GetIsolate(), object, name, LookupIterator::HIDDEN);
+ return JSObject::DeleteProperty(&it, language_mode);
}
@@ -5611,7 +5355,8 @@ bool JSObject::ReferencesObject(Object* obj) {
if (ReferencesObjectFromElements(elements, kind, obj)) return true;
break;
}
- case SLOPPY_ARGUMENTS_ELEMENTS: {
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
// Check the mapped parameters.
int length = parameter_map->length();
@@ -5702,16 +5447,14 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
// It's not possible to seal objects with external array elements
if (object->HasExternalArrayElements() ||
object->HasFixedTypedArrayElements()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError("cant_prevent_ext_external_array_elements",
- HandleVector(&object, 1)),
- Object);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kCannotPreventExtExternalArray),
+ Object);
}
// If there are fast elements we normalize.
Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
- DCHECK(object->HasDictionaryElements() ||
- object->HasDictionaryArgumentsElements());
+ DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
@@ -5736,6 +5479,17 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
}
+bool JSObject::IsExtensible() {
+ if (IsJSGlobalProxy()) {
+ PrototypeIterator iter(GetIsolate(), this);
+ if (iter.IsAtEnd()) return false;
+ DCHECK(iter.GetCurrent()->IsJSGlobalObject());
+ return JSObject::cast(iter.GetCurrent())->map()->is_extensible();
+ }
+ return map()->is_extensible();
+}
+
+
Handle<SeededNumberDictionary> JSObject::GetNormalizedElementDictionary(
Handle<JSObject> object) {
DCHECK(!object->elements()->IsDictionary());
@@ -5811,10 +5565,9 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
// It's not possible to seal or freeze objects with external array elements
if (object->HasExternalArrayElements() ||
object->HasFixedTypedArrayElements()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError("cant_prevent_ext_external_array_elements",
- HandleVector(&object, 1)),
- Object);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kCannotPreventExtExternalArray),
+ Object);
}
Handle<SeededNumberDictionary> new_element_dictionary;
@@ -5841,8 +5594,7 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
DCHECK(transition_map->has_dictionary_elements());
DCHECK(!transition_map->is_extensible());
JSObject::MigrateToMap(object, transition_map);
- } else if (object->HasFastProperties() &&
- TransitionArray::CanHaveMoreTransitions(old_map)) {
+ } else if (TransitionArray::CanHaveMoreTransitions(old_map)) {
// Create a new descriptor array with the appropriate property attributes
Handle<Map> new_map = Map::CopyForPreventExtensions(
old_map, attrs, transition_marker, "CopyForPreventExtensions");
@@ -5862,7 +5614,11 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
JSObject::MigrateToMap(object, new_map);
if (attrs != NONE) {
- ApplyAttributesToDictionary(object->property_dictionary(), attrs);
+ if (object->IsGlobalObject()) {
+ ApplyAttributesToDictionary(object->global_dictionary(), attrs);
+ } else {
+ ApplyAttributesToDictionary(object->property_dictionary(), attrs);
+ }
}
}
@@ -5906,8 +5662,7 @@ void JSObject::SetObserved(Handle<JSObject> object) {
if (transition != NULL) {
new_map = handle(transition, isolate);
DCHECK(new_map->is_observed());
- } else if (object->HasFastProperties() &&
- TransitionArray::CanHaveMoreTransitions(old_map)) {
+ } else if (TransitionArray::CanHaveMoreTransitions(old_map)) {
new_map = Map::CopyForObserved(old_map);
} else {
new_map = Map::Copy(old_map, "SlowObserved");
@@ -6127,7 +5882,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
break;
}
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNIMPLEMENTED();
break;
@@ -6354,14 +6110,23 @@ Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
object->map()->SetEnumLength(own_property_count);
}
return storage;
+ } else if (object->IsGlobalObject()) {
+ Handle<GlobalDictionary> dictionary(object->global_dictionary());
+ int length = dictionary->NumberOfEnumElements();
+ if (length == 0) {
+ return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
+ }
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
+ dictionary->CopyEnumKeysTo(*storage);
+ return storage;
} else {
Handle<NameDictionary> dictionary(object->property_dictionary());
- int length = dictionary->NumberOfEnumElements(*object);
+ int length = dictionary->NumberOfEnumElements();
if (length == 0) {
return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
}
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
- dictionary->CopyEnumKeysTo(*object, *storage);
+ dictionary->CopyEnumKeysTo(*storage);
return storage;
}
}
@@ -6535,7 +6300,8 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
return;
}
break;
- case SLOPPY_ARGUMENTS_ELEMENTS: {
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
// Ascertain whether we have read-only properties or an existing
// getter/setter pair in an arguments elements dictionary backing
// store.
@@ -6601,8 +6367,7 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
// Normalize elements to make this operation simple.
bool had_dictionary_elements = object->HasDictionaryElements();
Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
- DCHECK(object->HasDictionaryElements() ||
- object->HasDictionaryArgumentsElements());
+ DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
// Update the dictionary with the new ACCESSOR_CONSTANT property.
dictionary = SeededNumberDictionary::Set(dictionary, index, structure,
details);
@@ -6657,21 +6422,6 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
Handle<Object> setter,
PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
- }
-
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return isolate->factory()->undefined_value();
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- DefineAccessor(Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
- name, getter, setter, attributes);
- return isolate->factory()->undefined_value();
- }
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -6680,40 +6430,34 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
// Try to flatten before operating on the string.
if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
- uint32_t index = 0;
- bool is_element = name->AsArrayIndex(&index);
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ if (!it.HasAccess()) {
+ isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
+ }
+ it.Next();
+ }
Handle<Object> old_value = isolate->factory()->the_hole_value();
bool is_observed = object->map()->is_observed() &&
!isolate->IsInternallyUsedPropertyName(name);
bool preexists = false;
if (is_observed) {
- if (is_element) {
- Maybe<bool> maybe = HasOwnElement(object, index);
- // Workaround for a GCC 4.4.3 bug which leads to "‘preexists’ may be used
- // uninitialized in this function".
- if (!maybe.IsJust()) {
- DCHECK(false);
- return isolate->factory()->undefined_value();
- }
- preexists = maybe.FromJust();
- if (preexists && GetOwnElementAccessorPair(object, index).is_null()) {
- old_value =
- Object::GetElement(isolate, object, index).ToHandleChecked();
- }
- } else {
- LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
- CHECK(GetPropertyAttributes(&it).IsJust());
- preexists = it.IsFound();
- if (preexists && (it.state() == LookupIterator::DATA ||
- it.GetAccessors()->IsAccessorInfo())) {
- old_value = GetProperty(&it).ToHandleChecked();
- }
+ CHECK(GetPropertyAttributes(&it).IsJust());
+ preexists = it.IsFound();
+ if (preexists && (it.state() == LookupIterator::DATA ||
+ it.GetAccessors()->IsAccessorInfo())) {
+ old_value = GetProperty(&it).ToHandleChecked();
}
}
- if (is_element) {
- DefineElementAccessor(object, index, getter, setter, attributes);
+ if (it.IsElement()) {
+ DefineElementAccessor(it.GetStoreTarget(), it.index(), getter, setter,
+ attributes);
} else {
DCHECK(getter->IsSpecFunction() || getter->IsUndefined() ||
getter->IsNull());
@@ -6721,11 +6465,6 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
setter->IsNull());
// At least one of the accessors needs to be a new value.
DCHECK(!getter->IsNull() || !setter->IsNull());
- LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.state() == LookupIterator::ACCESS_CHECK) {
- // We already did an access check before. We do have access.
- it.Next();
- }
if (!getter->IsNull()) {
it.TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
}
@@ -6747,75 +6486,49 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Handle<AccessorInfo> info) {
Isolate* isolate = object->GetIsolate();
- Factory* factory = isolate->factory();
- Handle<Name> name(Name::cast(info->name()));
-
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return factory->undefined_value();
- }
-
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return object;
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return SetAccessor(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), info);
- }
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc(isolate);
// Try to flatten before operating on the string.
+ Handle<Name> name(Name::cast(info->name()));
if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
- uint32_t index = 0;
- bool is_element = name->AsArrayIndex(&index);
-
- if (is_element) {
- if (object->IsJSArray()) return factory->undefined_value();
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
- // Accessors overwrite previous callbacks (cf. with getters/setters).
- switch (object->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- break;
+ // Duplicate ACCESS_CHECK outside of GetPropertyAttributes for the case that
+ // the FailedAccessCheckCallbackFunction doesn't throw an exception.
+ //
+ // TODO(verwaest): Force throw an exception if the callback doesn't, so we can
+ // remove reliance on default return values.
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ if (!it.HasAccess()) {
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return it.factory()->undefined_value();
+ }
+ it.Next();
+ }
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
- case TYPE##_ELEMENTS: \
+ CHECK(GetPropertyAttributes(&it).IsJust());
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- // Ignore getters and setters on pixel and external array
- // elements.
- return factory->undefined_value();
+ // ES5 forbids turning a property into an accessor if it's not
+ // configurable. See 8.6.1 (Table 5).
+ if (it.IsFound() && (it.IsReadOnly() || !it.IsConfigurable())) {
+ return it.factory()->undefined_value();
+ }
- case DICTIONARY_ELEMENTS:
- break;
- case SLOPPY_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- }
+ // Ignore accessors on typed arrays.
+ if (it.IsElement() && (object->HasFixedTypedArrayElements() ||
+ object->HasExternalArrayElements())) {
+ return it.factory()->undefined_value();
+ }
- SetElementCallback(object, index, info, info->property_attributes());
+ if (it.IsElement()) {
+ SetElementCallback(object, it.index(), info, info->property_attributes());
} else {
- // Lookup the name.
- LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
- CHECK(GetPropertyAttributes(&it).IsJust());
- // ES5 forbids turning a property into an accessor if it's not
- // configurable. See 8.6.1 (Table 5).
- if (it.IsFound() && (it.IsReadOnly() || !it.IsConfigurable())) {
- return factory->undefined_value();
- }
-
SetPropertyCallback(object, name, info, info->property_attributes());
}
@@ -6832,70 +6545,40 @@ MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
// interceptor calls.
AssertNoContextChange ncc(isolate);
- // Make the lookup and include prototypes.
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- for (PrototypeIterator iter(isolate, object,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
- Handle<Object> current = PrototypeIterator::GetCurrent(iter);
- // Check access rights if needed.
- if (current->IsAccessCheckNeeded() &&
- !isolate->MayAccess(Handle<JSObject>::cast(current))) {
- isolate->ReportFailedAccessCheck(Handle<JSObject>::cast(current));
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, name, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+
+ for (; it.IsFound(); it.Next()) {
+ switch (it.state()) {
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+
+ case LookupIterator::ACCESS_CHECK:
+ if (it.HasAccess()) continue;
+ isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
- }
- if (current->IsJSObject() &&
- Handle<JSObject>::cast(current)->HasDictionaryElements()) {
- JSObject* js_object = JSObject::cast(*current);
- SeededNumberDictionary* dictionary = js_object->element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != SeededNumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- if (dictionary->DetailsAt(entry).type() == ACCESSOR_CONSTANT &&
- element->IsAccessorPair()) {
- return handle(AccessorPair::cast(element)->GetComponent(component),
- isolate);
- }
- }
- }
- }
- } else {
- LookupIterator it(object, name,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- for (; it.IsFound(); it.Next()) {
- switch (it.state()) {
- case LookupIterator::INTERCEPTOR:
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ return isolate->factory()->undefined_value();
- case LookupIterator::ACCESS_CHECK:
- if (it.HasAccess()) continue;
- isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
-
- case LookupIterator::JSPROXY:
- return isolate->factory()->undefined_value();
-
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return isolate->factory()->undefined_value();
- case LookupIterator::DATA:
- continue;
- case LookupIterator::ACCESSOR: {
- Handle<Object> maybe_pair = it.GetAccessors();
- if (maybe_pair->IsAccessorPair()) {
- return handle(
- AccessorPair::cast(*maybe_pair)->GetComponent(component),
- isolate);
- }
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return isolate->factory()->undefined_value();
+ case LookupIterator::DATA:
+ continue;
+ case LookupIterator::ACCESSOR: {
+ Handle<Object> maybe_pair = it.GetAccessors();
+ if (maybe_pair->IsAccessorPair()) {
+ return handle(
+ AccessorPair::cast(*maybe_pair)->GetComponent(component),
+ isolate);
}
}
}
}
+
return isolate->factory()->undefined_value();
}
@@ -6933,6 +6616,8 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
}
return GetHeap()->undefined_value();
+ } else if (IsGlobalObject()) {
+ return global_dictionary()->SlowReverseLookup(value);
} else {
return property_dictionary()->SlowReverseLookup(value);
}
@@ -7086,8 +6771,9 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
if (old_size == 0) {
descriptors = DescriptorArray::Allocate(map->GetIsolate(), 0, 1);
} else {
- EnsureDescriptorSlack(
- map, SlackForArraySize(old_size, kMaxNumberOfDescriptors));
+ int slack = SlackForArraySize(map->is_prototype_map(), old_size,
+ kMaxNumberOfDescriptors);
+ EnsureDescriptorSlack(map, slack);
descriptors = handle(map->instance_descriptors());
}
}
@@ -7272,8 +6958,9 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
Handle<Map> new_map = CopyForTransition(map, "CopyAsElementsKind");
new_map->set_elements_kind(kind);
- ConnectElementsTransition(map, new_map);
-
+ Isolate* isolate = map->GetIsolate();
+ Handle<Name> name = isolate->factory()->elements_transition_symbol();
+ ConnectTransition(map, new_map, name, SPECIAL_TRANSITION);
return new_map;
}
@@ -7401,6 +7088,25 @@ Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
}
+Handle<Map> Map::FixProxy(Handle<Map> map, InstanceType type, int size) {
+ DCHECK(type == JS_OBJECT_TYPE || type == JS_FUNCTION_TYPE);
+ DCHECK(map->IsJSProxyMap());
+
+ Isolate* isolate = map->GetIsolate();
+
+ // Allocate fresh map.
+ // TODO(rossberg): Once we optimize proxies, cache these maps.
+ Handle<Map> new_map = isolate->factory()->NewMap(type, size);
+
+ Handle<Object> prototype(map->prototype(), isolate);
+ Map::SetPrototype(new_map, prototype);
+
+ map->NotifyLeafMapLayoutChange();
+
+ return new_map;
+}
+
+
bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
PropertyDetails details = GetDetails(descriptor);
switch (details.type()) {
@@ -7423,6 +7129,44 @@ bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
}
+// static
+Handle<Map> Map::PrepareForDataElement(Handle<Map> map, Handle<Object> value) {
+ ElementsKind kind = map->elements_kind();
+ bool holey = IsHoleyElementsKind(kind);
+
+ switch (kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ if (value->IsSmi()) return map;
+ kind = value->IsNumber() ? FAST_DOUBLE_ELEMENTS : FAST_ELEMENTS;
+ break;
+
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ if (value->IsNumber()) return map;
+ kind = FAST_ELEMENTS;
+ break;
+
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ return map;
+ }
+
+ if (holey) kind = GetHoleyElementsKind(kind);
+ return Map::AsElementsKind(map, kind);
+}
+
+
+// static
Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
Handle<Object> value) {
// Dictionaries can store any property value.
@@ -8209,11 +7953,8 @@ MaybeHandle<FixedArray> FixedArray::AddKeysFromArrayLike(
Handle<FixedArray> content, Handle<JSObject> array, KeyFilter filter) {
DCHECK(array->IsJSArray() || array->HasSloppyArgumentsElements());
ElementsAccessor* accessor = array->GetElementsAccessor();
- Handle<FixedArray> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- array->GetIsolate(), result,
- accessor->AddElementsToFixedArray(array, array, content, filter),
- FixedArray);
+ Handle<FixedArray> result =
+ accessor->AddElementsToFixedArray(array, content, filter);
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
@@ -8230,25 +7971,27 @@ MaybeHandle<FixedArray> FixedArray::AddKeysFromArrayLike(
MaybeHandle<FixedArray> FixedArray::UnionOfKeys(Handle<FixedArray> first,
Handle<FixedArray> second) {
- ElementsAccessor* accessor = ElementsAccessor::ForArray(second);
- Handle<FixedArray> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- first->GetIsolate(), result,
- accessor->AddElementsToFixedArray(
- Handle<Object>::null(), // receiver
- Handle<JSObject>::null(), // holder
- first, Handle<FixedArrayBase>::cast(second), ALL_KEYS),
- FixedArray);
-
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- DisallowHeapAllocation no_allocation;
- for (int i = 0; i < result->length(); i++) {
- Object* current = result->get(i);
- DCHECK(current->IsNumber() || current->IsName());
+ if (second->length() == 0) return first;
+ if (first->length() == 0) return second;
+ Isolate* isolate = first->GetIsolate();
+ Handle<FixedArray> result =
+ isolate->factory()->NewFixedArray(first->length() + second->length());
+ for (int i = 0; i < first->length(); i++) {
+ result->set(i, first->get(i));
+ }
+ int pos = first->length();
+ for (int j = 0; j < second->length(); j++) {
+ Object* current = second->get(j);
+ int i;
+ for (i = 0; i < first->length(); i++) {
+ if (current->KeyEquals(first->get(i))) break;
+ }
+ if (i == first->length()) {
+ result->set(pos++, current);
}
}
-#endif
+
+ result->Shrink(pos);
return result;
}
@@ -8310,59 +8053,46 @@ void WeakFixedArray::Set(Handle<WeakFixedArray> array, int index,
// static
-Handle<WeakFixedArray> WeakFixedArray::Add(
- Handle<Object> maybe_array, Handle<HeapObject> value,
- SearchForDuplicates search_for_duplicates, bool* was_present) {
+Handle<WeakFixedArray> WeakFixedArray::Add(Handle<Object> maybe_array,
+ Handle<HeapObject> value,
+ int* assigned_index) {
Handle<WeakFixedArray> array =
(maybe_array.is_null() || !maybe_array->IsWeakFixedArray())
? Allocate(value->GetIsolate(), 1, Handle<WeakFixedArray>::null())
: Handle<WeakFixedArray>::cast(maybe_array);
- if (was_present != NULL) *was_present = false;
- if (search_for_duplicates == kAddIfNotFound) {
- for (int i = 0; i < array->Length(); ++i) {
- if (array->Get(i) == *value) {
- if (was_present != NULL) *was_present = true;
- return array;
- }
- }
-#if 0 // Enable this if you want to check your search_for_duplicates flags.
- } else {
- for (int i = 0; i < array->Length(); ++i) {
- DCHECK_NE(*value, array->Get(i));
- }
-#endif
- }
-
// Try to store the new entry if there's room. Optimize for consecutive
// accesses.
int first_index = array->last_used_index();
- if (array->Length() > 0) {
+ int length = array->Length();
+ if (length > 0) {
for (int i = first_index;;) {
if (array->IsEmptySlot((i))) {
WeakFixedArray::Set(array, i, value);
+ if (assigned_index != NULL) *assigned_index = i;
return array;
}
if (FLAG_trace_weak_arrays) {
PrintF("[WeakFixedArray: searching for free slot]\n");
}
- i = (i + 1) % array->Length();
+ i = (i + 1) % length;
if (i == first_index) break;
}
}
// No usable slot found, grow the array.
- int new_length =
- array->Length() == 0 ? 1 : array->Length() + (array->Length() >> 1) + 4;
+ int new_length = length == 0 ? 1 : length + (length >> 1) + 4;
Handle<WeakFixedArray> new_array =
Allocate(array->GetIsolate(), new_length, array);
if (FLAG_trace_weak_arrays) {
PrintF("[WeakFixedArray: growing to size %d ]\n", new_length);
}
- WeakFixedArray::Set(new_array, array->Length(), value);
+ WeakFixedArray::Set(new_array, length, value);
+ if (assigned_index != NULL) *assigned_index = length;
return new_array;
}
+template <class CompactionCallback>
void WeakFixedArray::Compact() {
FixedArray* array = FixedArray::cast(this);
int new_length = kFirstIndex;
@@ -8370,6 +8100,9 @@ void WeakFixedArray::Compact() {
Object* element = array->get(i);
if (element->IsSmi()) continue;
if (WeakCell::cast(element)->cleared()) continue;
+ Object* value = WeakCell::cast(element)->value();
+ CompactionCallback::Callback(value, i - kFirstIndex,
+ new_length - kFirstIndex);
array->set(new_length++, element);
}
array->Shrink(new_length);
@@ -8377,6 +8110,23 @@ void WeakFixedArray::Compact() {
}
+void JSObject::PrototypeRegistryCompactionCallback::Callback(Object* value,
+ int old_index,
+ int new_index) {
+ DCHECK(value->IsMap() && Map::cast(value)->is_prototype_map());
+ Map* map = Map::cast(value);
+ DCHECK(map->prototype_info()->IsPrototypeInfo());
+ PrototypeInfo* proto_info = PrototypeInfo::cast(map->prototype_info());
+ DCHECK_EQ(old_index, proto_info->registry_slot());
+ proto_info->set_registry_slot(new_index);
+}
+
+
+template void WeakFixedArray::Compact<WeakFixedArray::NullCallback>();
+template void
+WeakFixedArray::Compact<JSObject::PrototypeRegistryCompactionCallback>();
+
+
bool WeakFixedArray::Remove(Handle<HeapObject> value) {
if (Length() == 0) return false;
// Optimize for the most recently added element to be removed again.
@@ -8384,8 +8134,7 @@ bool WeakFixedArray::Remove(Handle<HeapObject> value) {
for (int i = first_index;;) {
if (Get(i) == *value) {
Clear(i);
- // Users of WeakFixedArray should make sure that there are no duplicates,
- // they can use Add(..., kAddIfNotFound) if necessary.
+ // Users of WeakFixedArray should make sure that there are no duplicates.
return true;
}
i = (i + 1) % Length();
@@ -8617,12 +8366,15 @@ Handle<DeoptimizationOutputData> DeoptimizationOutputData::New(
}
-int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out) {
+int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out,
+ CatchPrediction* prediction_out) {
int innermost_handler = -1, innermost_start = -1;
for (int i = 0; i < length(); i += kRangeEntrySize) {
int start_offset = Smi::cast(get(i + kRangeStartIndex))->value();
int end_offset = Smi::cast(get(i + kRangeEndIndex))->value();
- int handler_offset = Smi::cast(get(i + kRangeHandlerIndex))->value();
+ int handler_field = Smi::cast(get(i + kRangeHandlerIndex))->value();
+ int handler_offset = HandlerOffsetField::decode(handler_field);
+ CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
int stack_depth = Smi::cast(get(i + kRangeDepthIndex))->value();
if (pc_offset > start_offset && pc_offset <= end_offset) {
DCHECK_NE(start_offset, innermost_start);
@@ -8630,6 +8382,7 @@ int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out) {
innermost_handler = handler_offset;
innermost_start = start_offset;
*stack_depth_out = stack_depth;
+ if (prediction_out) *prediction_out = prediction;
}
}
return innermost_handler;
@@ -8637,11 +8390,16 @@ int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out) {
// TODO(turbofan): Make sure table is sorted and use binary search.
-int HandlerTable::LookupReturn(int pc_offset) {
+int HandlerTable::LookupReturn(int pc_offset, CatchPrediction* prediction_out) {
for (int i = 0; i < length(); i += kReturnEntrySize) {
int return_offset = Smi::cast(get(i + kReturnOffsetIndex))->value();
- int handler_offset = Smi::cast(get(i + kReturnHandlerIndex))->value();
- if (pc_offset == return_offset) return handler_offset;
+ int handler_field = Smi::cast(get(i + kReturnHandlerIndex))->value();
+ if (pc_offset == return_offset) {
+ if (prediction_out) {
+ *prediction_out = HandlerPredictionField::decode(handler_field);
+ }
+ return HandlerOffsetField::decode(handler_field);
+ }
}
return -1;
}
@@ -9742,6 +9500,7 @@ static bool CheckEquivalent(Map* first, Map* second) {
first->instance_type() == second->instance_type() &&
first->bit_field() == second->bit_field() &&
first->is_extensible() == second->is_extensible() &&
+ first->is_strong() == second->is_strong() &&
first->has_instance_call_handler() ==
second->has_instance_call_handler();
}
@@ -9761,49 +9520,6 @@ bool Map::EquivalentToForNormalization(Map* other,
}
-void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
- // Unfortunately the serializer relies on pointers within an object being
- // visited in-order, so we have to iterate both the code and heap pointers in
- // the small section before doing so in the extended section.
- for (int s = 0; s <= final_section(); ++s) {
- LayoutSection section = static_cast<LayoutSection>(s);
- ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR,
- section);
- while (!code_iter.is_finished()) {
- v->VisitCodeEntry(reinterpret_cast<Address>(
- RawFieldOfElementAt(code_iter.next_index())));
- }
-
- ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR,
- section);
- while (!heap_iter.is_finished()) {
- v->VisitPointer(RawFieldOfElementAt(heap_iter.next_index()));
- }
- }
-}
-
-
-void ConstantPoolArray::ClearPtrEntries(Isolate* isolate) {
- Type type[] = { CODE_PTR, HEAP_PTR };
- Address default_value[] = {
- isolate->builtins()->builtin(Builtins::kIllegal)->entry(),
- reinterpret_cast<Address>(isolate->heap()->undefined_value()) };
-
- for (int i = 0; i < 2; ++i) {
- for (int s = 0; s <= final_section(); ++s) {
- LayoutSection section = static_cast<LayoutSection>(s);
- if (number_of_entries(type[i], section) > 0) {
- int offset = OffsetOfElementAt(first_index(type[i], section));
- MemsetPointer(
- reinterpret_cast<Address*>(HeapObject::RawField(this, offset)),
- default_value[i],
- number_of_entries(type[i], section));
- }
- }
- }
-}
-
-
void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
// Iterate over all fields in the body but take care in dealing with
// the code entry.
@@ -9816,7 +9532,8 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
void JSFunction::MarkForOptimization() {
Isolate* isolate = GetIsolate();
DCHECK(!IsOptimized());
- DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
+ DCHECK(shared()->allows_lazy_compilation() ||
+ !shared()->optimization_disabled());
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
@@ -9840,7 +9557,8 @@ void JSFunction::AttemptConcurrentOptimization() {
}
DCHECK(!IsInOptimizationQueue());
DCHECK(!IsOptimized());
- DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
+ DCHECK(shared()->allows_lazy_compilation() ||
+ !shared()->optimization_disabled());
DCHECK(isolate->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
@@ -9848,7 +9566,7 @@ void JSFunction::AttemptConcurrentOptimization() {
PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
+ isolate->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
// No write barrier required, since the builtin is part of the root set.
}
@@ -9877,6 +9595,17 @@ Handle<JSFunction> JSFunction::CloneClosure(Handle<JSFunction> function) {
}
+void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
+ Handle<SharedFunctionInfo> shared, Handle<Code> code) {
+ Isolate* isolate = shared->GetIsolate();
+ DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
+ Handle<Object> value(shared->optimized_code_map(), isolate);
+ if (value->IsSmi()) return; // Empty code maps are unsupported.
+ Handle<FixedArray> code_map = Handle<FixedArray>::cast(value);
+ code_map->set(kSharedCodeIndex, *code);
+}
+
+
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
@@ -9893,13 +9622,12 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
if (value->IsSmi()) {
// No optimized code map.
DCHECK_EQ(0, Smi::cast(*value)->value());
- // Create 3 entries per context {context, code, literals}.
new_code_map = isolate->factory()->NewFixedArray(kInitialLength);
old_length = kEntriesStart;
} else {
// Copy old map and append one new entry.
Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value);
- DCHECK_EQ(-1, shared->SearchOptimizedCodeMap(*native_context, osr_ast_id));
+ DCHECK(!shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
old_length = old_code_map->length();
new_code_map = FixedArray::CopySize(
old_code_map, old_length + kEntryLength);
@@ -9929,27 +9657,6 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
}
-FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) {
- DCHECK(index > kEntriesStart);
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
- if (!bound()) {
- FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
- DCHECK_NOT_NULL(cached_literals);
- return cached_literals;
- }
- return NULL;
-}
-
-
-Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) {
- DCHECK(index > kEntriesStart);
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
- Code* code = Code::cast(code_map->get(index));
- DCHECK_NOT_NULL(code);
- return code;
-}
-
-
void SharedFunctionInfo::ClearOptimizedCodeMap() {
FixedArray* code_map = FixedArray::cast(optimized_code_map());
@@ -10002,6 +9709,15 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
dst += kEntryLength;
}
}
+ if (code_map->get(kSharedCodeIndex) == optimized_code) {
+ // Evict context-independent code as well.
+ code_map->set_undefined(kSharedCodeIndex);
+ if (FLAG_trace_opt) {
+ PrintF("[evicting entry from optimizing code map (%s) for ", reason);
+ ShortPrint();
+ PrintF(" (context-independent code)]\n");
+ }
+ }
if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
@@ -10136,67 +9852,74 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
DCHECK(user->is_prototype_map());
Handle<Map> current_user = user;
+ Handle<PrototypeInfo> current_user_info =
+ Map::GetOrCreatePrototypeInfo(user, isolate);
for (PrototypeIterator iter(user); !iter.IsAtEnd(); iter.Advance()) {
+ // Walk up the prototype chain as far as links haven't been registered yet.
+ if (current_user_info->registry_slot() != PrototypeInfo::UNREGISTERED) {
+ break;
+ }
Handle<Object> maybe_proto = PrototypeIterator::GetCurrent(iter);
if (maybe_proto->IsJSGlobalProxy()) continue;
// Proxies on the prototype chain are not supported.
if (maybe_proto->IsJSProxy()) return;
Handle<JSObject> proto = Handle<JSObject>::cast(maybe_proto);
- bool just_registered =
- RegisterPrototypeUserIfNotRegistered(proto, current_user, isolate);
- // Walk up the prototype chain as far as links haven't been registered yet.
- if (!just_registered) break;
- current_user = handle(proto->map(), isolate);
- }
-}
-
+ Handle<PrototypeInfo> proto_info =
+ Map::GetOrCreatePrototypeInfo(proto, isolate);
+ Handle<Object> maybe_registry(proto_info->prototype_users(), isolate);
+ int slot = 0;
+ Handle<WeakFixedArray> new_array =
+ WeakFixedArray::Add(maybe_registry, current_user, &slot);
+ current_user_info->set_registry_slot(slot);
+ if (!maybe_registry.is_identical_to(new_array)) {
+ proto_info->set_prototype_users(*new_array);
+ }
+ if (FLAG_trace_prototype_users) {
+ PrintF("Registering %p as a user of prototype %p (map=%p).\n",
+ reinterpret_cast<void*>(*current_user),
+ reinterpret_cast<void*>(*proto),
+ reinterpret_cast<void*>(proto->map()));
+ }
-// Returns true if the user was not yet registered.
-// static
-bool JSObject::RegisterPrototypeUserIfNotRegistered(Handle<JSObject> prototype,
- Handle<HeapObject> user,
- Isolate* isolate) {
- Handle<PrototypeInfo> proto_info =
- Map::GetOrCreatePrototypeInfo(prototype, isolate);
- Handle<Object> maybe_registry(proto_info->prototype_users(), isolate);
- bool was_present = false;
- Handle<WeakFixedArray> new_array = WeakFixedArray::Add(
- maybe_registry, user, WeakFixedArray::kAddIfNotFound, &was_present);
- if (!maybe_registry.is_identical_to(new_array)) {
- proto_info->set_prototype_users(*new_array);
- }
- if (FLAG_trace_prototype_users && !was_present) {
- PrintF("Registering %p as a user of prototype %p (map=%p).\n",
- reinterpret_cast<void*>(*user), reinterpret_cast<void*>(*prototype),
- reinterpret_cast<void*>(prototype->map()));
+ current_user = handle(proto->map(), isolate);
+ current_user_info = proto_info;
}
- return !was_present;
}
// Can be called regardless of whether |user| was actually registered with
// |prototype|. Returns true when there was a registration.
// static
-bool JSObject::UnregisterPrototypeUser(Handle<JSObject> prototype,
- Handle<HeapObject> user) {
- Isolate* isolate = prototype->GetIsolate();
+bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
+ DCHECK(user->is_prototype_map());
+ // If it doesn't have a PrototypeInfo, it was never registered.
+ if (!user->prototype_info()->IsPrototypeInfo()) return false;
+ // If it doesn't have a prototype, it can't be registered.
+ if (!user->prototype()->IsJSObject()) return false;
+ Handle<JSObject> prototype(JSObject::cast(user->prototype()), isolate);
+ Handle<PrototypeInfo> user_info =
+ Map::GetOrCreatePrototypeInfo(user, isolate);
+ int slot = user_info->registry_slot();
+ if (slot == PrototypeInfo::UNREGISTERED) return false;
if (prototype->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, prototype);
prototype = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
}
DCHECK(prototype->map()->is_prototype_map());
Object* maybe_proto_info = prototype->map()->prototype_info();
- if (!maybe_proto_info->IsPrototypeInfo()) return false;
+ // User knows its registry slot, prototype info and user registry must exist.
+ DCHECK(maybe_proto_info->IsPrototypeInfo());
Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(maybe_proto_info),
isolate);
Object* maybe_registry = proto_info->prototype_users();
- if (!maybe_registry->IsWeakFixedArray()) return false;
- bool result = WeakFixedArray::cast(maybe_registry)->Remove(user);
- if (FLAG_trace_prototype_users && result) {
+ DCHECK(maybe_registry->IsWeakFixedArray());
+ DCHECK(WeakFixedArray::cast(maybe_registry)->Get(slot) == *user);
+ WeakFixedArray::cast(maybe_registry)->Clear(slot);
+ if (FLAG_trace_prototype_users) {
PrintF("Unregistering %p as a user of prototype %p.\n",
reinterpret_cast<void*>(*user), reinterpret_cast<void*>(*prototype));
}
- return result;
+ return true;
}
@@ -10258,6 +9981,19 @@ Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
// static
+Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
+ Isolate* isolate) {
+ Object* maybe_proto_info = prototype_map->prototype_info();
+ if (maybe_proto_info->IsPrototypeInfo()) {
+ return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
+ }
+ Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
+ prototype_map->set_prototype_info(*proto_info);
+ return proto_info;
+}
+
+
+// static
Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
Isolate* isolate) {
Handle<Object> maybe_prototype(map->prototype(), isolate);
@@ -10329,7 +10065,10 @@ Handle<Object> CacheInitialJSArrayMaps(
maps->set(next_kind, *new_map);
current_map = new_map;
}
- native_context->set_js_array_maps(*maps);
+ if (initial_map->is_strong())
+ native_context->set_js_array_strong_maps(*maps);
+ else
+ native_context->set_js_array_maps(*maps);
return initial_map;
}
@@ -10364,13 +10103,17 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
JSFunction::SetInitialMap(function, new_map, value);
// If the function is used as the global Array function, cache the
- // initial map (and transitioned versions) in the native context.
- Context* native_context = function->context()->native_context();
- Object* array_function =
- native_context->get(Context::ARRAY_FUNCTION_INDEX);
+ // updated initial maps (and transitioned versions) in the native context.
+ Handle<Context> native_context(function->context()->native_context(),
+ isolate);
+ Handle<Object> array_function(
+ native_context->get(Context::ARRAY_FUNCTION_INDEX), isolate);
if (array_function->IsJSFunction() &&
- *function == JSFunction::cast(array_function)) {
- CacheInitialJSArrayMaps(handle(native_context, isolate), new_map);
+ *function == JSFunction::cast(*array_function)) {
+ CacheInitialJSArrayMaps(native_context, new_map);
+ Handle<Map> new_strong_map = Map::Copy(new_map, "SetInstancePrototype");
+ new_strong_map->set_is_strong();
+ CacheInitialJSArrayMaps(native_context, new_strong_map);
}
}
@@ -10553,7 +10296,7 @@ bool JSFunction::PassesFilter(const char* raw_filter) {
Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
Handle<Object> name =
- JSObject::GetDataProperty(function, isolate->factory()->name_string());
+ JSReceiver::GetDataProperty(function, isolate->factory()->name_string());
if (name->IsString()) return Handle<String>::cast(name);
return handle(function->shared()->DebugName(), isolate);
}
@@ -10706,6 +10449,56 @@ Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
}
+MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
+ FunctionLiteral* fun) {
+ if (shared_function_infos()->IsWeakFixedArray()) {
+ WeakFixedArray* array = WeakFixedArray::cast(shared_function_infos());
+ for (int i = 0; i < array->Length(); i++) {
+ Object* obj = array->Get(i);
+ if (!obj->IsSharedFunctionInfo()) continue;
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ if (fun->function_token_position() == shared->function_token_position() &&
+ fun->start_position() == shared->start_position()) {
+ return Handle<SharedFunctionInfo>(shared);
+ }
+ }
+ }
+ return MaybeHandle<SharedFunctionInfo>();
+}
+
+
+void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
+ Handle<Object> script_object) {
+ if (shared->script() == *script_object) return;
+ // Remove shared function info from old script's list.
+ if (shared->script()->IsScript()) {
+ Script* old_script = Script::cast(shared->script());
+ if (old_script->shared_function_infos()->IsWeakFixedArray()) {
+ WeakFixedArray* list =
+ WeakFixedArray::cast(old_script->shared_function_infos());
+ list->Remove(shared);
+ }
+ }
+ // Add shared function info to new script's list.
+ if (script_object->IsScript()) {
+ Handle<Script> script = Handle<Script>::cast(script_object);
+ Handle<Object> list(script->shared_function_infos(), shared->GetIsolate());
+#ifdef DEBUG
+ if (list->IsWeakFixedArray()) {
+ Handle<WeakFixedArray> array = Handle<WeakFixedArray>::cast(list);
+ for (int i = 0; i < array->Length(); ++i) {
+ DCHECK(array->Get(i) != *shared);
+ }
+ }
+#endif // DEBUG
+ list = WeakFixedArray::Add(list, shared);
+ script->set_shared_function_infos(*list);
+ }
+ // Finally set new script.
+ shared->set_script(*script_object);
+}
+
+
String* SharedFunctionInfo::DebugName() {
Object* n = name();
if (!n->IsString() || String::cast(n)->length() == 0) return inferred_name();
@@ -10730,10 +10523,7 @@ Handle<Object> SharedFunctionInfo::GetSourceCode() {
bool SharedFunctionInfo::IsInlineable() {
// Check that the function has a script associated with it.
if (!script()->IsScript()) return false;
- if (optimization_disabled()) return false;
- // If we never ran this (unlikely) then lets try to optimize it.
- if (code()->kind() != Code::FUNCTION) return true;
- return code()->optimizable();
+ return !optimization_disabled();
}
@@ -10835,12 +10625,8 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
DCHECK(reason != kNoReason);
set_optimization_disabled(true);
set_disable_optimization_reason(reason);
- // Code should be the lazy compilation stub or else unoptimized. If the
- // latter, disable optimization for the code too.
+ // Code should be the lazy compilation stub or else unoptimized.
DCHECK(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
- if (code()->kind() == Code::FUNCTION) {
- code()->set_optimizable(false);
- }
PROFILE(GetIsolate(), CodeDisableOptEvent(code(), this));
if (FLAG_trace_opt) {
PrintF("[disabled optimization for ");
@@ -10853,12 +10639,7 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
void SharedFunctionInfo::InitFromFunctionLiteral(
Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
shared_info->set_length(lit->scope()->default_function_length());
- if (IsSubclassConstructor(lit->kind())) {
- shared_info->set_internal_formal_parameter_count(lit->parameter_count() +
- 1);
- } else {
- shared_info->set_internal_formal_parameter_count(lit->parameter_count());
- }
+ shared_info->set_internal_formal_parameter_count(lit->parameter_count());
shared_info->set_function_token_position(lit->function_token_position());
shared_info->set_start_position(lit->start_position());
shared_info->set_end_position(lit->end_position());
@@ -10876,10 +10657,10 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
if (lit->dont_optimize_reason() != kNoReason) {
shared_info->DisableOptimization(lit->dont_optimize_reason());
}
- shared_info->set_dont_cache(
- lit->flags()->Contains(AstPropertiesFlag::kDontCache));
+ shared_info->set_dont_crankshaft(
+ lit->flags()->Contains(AstPropertiesFlag::kDontCrankshaft));
shared_info->set_kind(lit->kind());
- shared_info->set_uses_super_property(lit->uses_super_property());
+ shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
shared_info->set_asm_function(lit->scope()->asm_function());
}
@@ -10922,7 +10703,6 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
opt_count() >= FLAG_max_opt_count) {
// Re-enable optimizations if they were disabled due to opt_count limit.
set_optimization_disabled(false);
- code()->set_optimizable(true);
}
set_opt_count(0);
set_deopt_count(0);
@@ -10930,11 +10710,10 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
}
-int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
- BailoutId osr_ast_id) {
+CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
+ Context* native_context, BailoutId osr_ast_id) {
DisallowHeapAllocation no_gc;
DCHECK(native_context->IsNativeContext());
- if (!FLAG_cache_optimized_code) return -1;
Object* value = optimized_code_map();
if (!value->IsSmi()) {
FixedArray* optimized_code_map = FixedArray::cast(value);
@@ -10943,16 +10722,21 @@ int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
for (int i = kEntriesStart; i < length; i += kEntryLength) {
if (optimized_code_map->get(i + kContextOffset) == native_context &&
optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
- return i + kCachedCodeOffset;
+ return {Code::cast(optimized_code_map->get(i + kCachedCodeOffset)),
+ FixedArray::cast(optimized_code_map->get(i + kLiteralsOffset))};
}
}
+ Object* shared_code = optimized_code_map->get(kSharedCodeIndex);
+ if (shared_code->IsCode() && osr_ast_id.IsNone()) {
+ return {Code::cast(shared_code), nullptr};
+ }
if (FLAG_trace_opt) {
PrintF("[didn't find optimized code in optimized code map for ");
ShortPrint();
PrintF("]\n");
}
}
- return -1;
+ return {nullptr, nullptr};
}
@@ -11631,6 +11415,13 @@ WeakCell* Code::CachedWeakCell() {
void DeoptimizationInputData::DeoptimizationInputDataPrint(
std::ostream& os) { // NOLINT
disasm::NameConverter converter;
+ int const inlined_function_count = InlinedFunctionCount()->value();
+ os << "Inlined functions (count = " << inlined_function_count << ")\n";
+ for (int id = 0; id < inlined_function_count; ++id) {
+ Object* info = LiteralArray()->get(id);
+ os << " " << Brief(SharedFunctionInfo::cast(info)) << "\n";
+ }
+ os << "\n";
int deopt_count = DeoptCount();
os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
if (0 != deopt_count) {
@@ -11671,16 +11462,17 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
case Translation::JS_FRAME: {
int ast_id = iterator.Next();
- int function_id = iterator.Next();
+ int shared_info_id = iterator.Next();
unsigned height = iterator.Next();
- os << "{ast_id=" << ast_id << ", function=";
- if (function_id != Translation::kSelfLiteralId) {
- Object* function = LiteralArray()->get(function_id);
- os << Brief(JSFunction::cast(function)->shared()->DebugName());
- } else {
- os << "<self>";
- }
- os << ", height=" << height << "}";
+ Object* shared_info = LiteralArray()->get(shared_info_id);
+ os << "{ast_id=" << ast_id << ", function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << ", height=" << height << "}";
+ break;
+ }
+
+ case Translation::JS_FRAME_FUNCTION: {
+ os << "{function}";
break;
}
@@ -11692,21 +11484,21 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME: {
- int function_id = iterator.Next();
- JSFunction* function =
- JSFunction::cast(LiteralArray()->get(function_id));
+ int shared_info_id = iterator.Next();
+ Object* shared_info = LiteralArray()->get(shared_info_id);
unsigned height = iterator.Next();
- os << "{function=" << Brief(function->shared()->DebugName())
+ os << "{function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
<< ", height=" << height << "}";
break;
}
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME: {
- int function_id = iterator.Next();
- JSFunction* function =
- JSFunction::cast(LiteralArray()->get(function_id));
- os << "{function=" << Brief(function->shared()->DebugName()) << "}";
+ int shared_info_id = iterator.Next();
+ Object* shared_info = LiteralArray()->get(shared_info_id);
+ os << "{function=" << Brief(SharedFunctionInfo::cast(shared_info)
+ ->DebugName()) << "}";
break;
}
@@ -11820,21 +11612,26 @@ void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
for (int i = 0; i < length(); i += kRangeEntrySize) {
int pc_start = Smi::cast(get(i + kRangeStartIndex))->value();
int pc_end = Smi::cast(get(i + kRangeEndIndex))->value();
- int handler = Smi::cast(get(i + kRangeHandlerIndex))->value();
+ int handler_field = Smi::cast(get(i + kRangeHandlerIndex))->value();
+ int handler_offset = HandlerOffsetField::decode(handler_field);
+ CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
int depth = Smi::cast(get(i + kRangeDepthIndex))->value();
os << " (" << std::setw(4) << pc_start << "," << std::setw(4) << pc_end
- << ") -> " << std::setw(4) << handler << " (depth=" << depth << ")\n";
+ << ") -> " << std::setw(4) << handler_offset
+ << " (prediction=" << prediction << ", depth=" << depth << ")\n";
}
}
void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
- os << " off hdlr\n";
+ os << " off hdlr (c)\n";
for (int i = 0; i < length(); i += kReturnEntrySize) {
int pc_offset = Smi::cast(get(i + kReturnOffsetIndex))->value();
- int handler = Smi::cast(get(i + kReturnHandlerIndex))->value();
+ int handler_field = Smi::cast(get(i + kReturnHandlerIndex))->value();
+ int handler_offset = HandlerOffsetField::decode(handler_field);
+ CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
os << " " << std::setw(4) << pc_offset << " -> " << std::setw(4)
- << handler << "\n";
+ << handler_offset << " (prediction=" << prediction << ")\n";
}
}
@@ -11907,21 +11704,44 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
if (kind() == OPTIMIZED_FUNCTION) {
os << "stack_slots = " << stack_slots() << "\n";
}
+ os << "compiler = " << (is_turbofanned()
+ ? "turbofan"
+ : is_crankshafted() ? "crankshaft"
+ : kind() == Code::FUNCTION
+ ? "full-codegen"
+ : "unknown") << "\n";
os << "Instructions (size = " << instruction_size() << ")\n";
{
Isolate* isolate = GetIsolate();
- int decode_size = is_crankshafted()
- ? static_cast<int>(safepoint_table_offset())
- : instruction_size();
- // If there might be a back edge table, stop before reaching it.
- if (kind() == Code::FUNCTION) {
- decode_size =
- Min(decode_size, static_cast<int>(back_edge_table_offset()));
- }
+ int size = instruction_size();
+ int safepoint_offset =
+ is_crankshafted() ? static_cast<int>(safepoint_table_offset()) : size;
+ int back_edge_offset = (kind() == Code::FUNCTION)
+ ? static_cast<int>(back_edge_table_offset())
+ : size;
+ int constant_pool_offset = FLAG_enable_embedded_constant_pool
+ ? this->constant_pool_offset()
+ : size;
+
+ // Stop before reaching any embedded tables
+ int code_size = Min(safepoint_offset, back_edge_offset);
+ code_size = Min(code_size, constant_pool_offset);
byte* begin = instruction_start();
- byte* end = begin + decode_size;
+ byte* end = begin + code_size;
Disassembler::Decode(isolate, &os, begin, end, this);
+
+ if (constant_pool_offset < size) {
+ int constant_pool_size = size - constant_pool_offset;
+ DCHECK((constant_pool_size & kPointerAlignmentMask) == 0);
+ os << "\nConstant Pool (size = " << constant_pool_size << ")\n";
+ Vector<char> buf = Vector<char>::New(50);
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(begin + constant_pool_offset);
+ for (int i = 0; i < constant_pool_size; i += kPointerSize, ptr++) {
+ SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
+ os << static_cast<const void*>(ptr) << " " << buf.start() << "\n";
+ }
+ }
}
os << "\n";
@@ -12000,139 +11820,10 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
it.rinfo()->Print(GetIsolate(), os);
}
os << "\n";
-
-#ifdef OBJECT_PRINT
- if (FLAG_enable_ool_constant_pool) {
- ConstantPoolArray* pool = constant_pool();
- if (pool->length()) {
- os << "Constant Pool\n";
- pool->Print(os);
- os << "\n";
- }
- }
-#endif
}
#endif // ENABLE_DISASSEMBLER
-Handle<FixedArray> JSObject::SetFastElementsCapacity(
- Handle<JSObject> object, int capacity,
- SetFastElementsCapacitySmiMode smi_mode) {
- // We should never end in here with a pixel or external array.
- DCHECK(!object->HasExternalArrayElements());
-
- // Allocate a new fast elements backing store.
- Isolate* isolate = object->GetIsolate();
- Handle<FixedArray> new_elements =
- isolate->factory()->NewUninitializedFixedArray(capacity);
-
- isolate->UpdateArrayProtectorOnSetLength(object);
-
- ElementsKind elements_kind = object->GetElementsKind();
- ElementsKind new_elements_kind;
- // The resized array has FAST_*_SMI_ELEMENTS if the capacity mode forces it,
- // or if it's allowed and the old elements array contained only SMIs.
- bool has_fast_smi_elements =
- (smi_mode == kForceSmiElements) ||
- ((smi_mode == kAllowSmiElements) && object->HasFastSmiElements());
- if (has_fast_smi_elements) {
- if (IsHoleyElementsKind(elements_kind)) {
- new_elements_kind = FAST_HOLEY_SMI_ELEMENTS;
- } else {
- new_elements_kind = FAST_SMI_ELEMENTS;
- }
- } else {
- if (IsHoleyElementsKind(elements_kind)) {
- new_elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- new_elements_kind = FAST_ELEMENTS;
- }
- }
- Handle<FixedArrayBase> old_elements(object->elements());
- ElementsAccessor* accessor = ElementsAccessor::ForKind(new_elements_kind);
- accessor->CopyElements(object, new_elements, elements_kind);
-
- if (elements_kind != SLOPPY_ARGUMENTS_ELEMENTS) {
- Handle<Map> new_map = (new_elements_kind != elements_kind)
- ? GetElementsTransitionMap(object, new_elements_kind)
- : handle(object->map());
- JSObject::ValidateElements(object);
- JSObject::SetMapAndElements(object, new_map, new_elements);
-
- // Transition through the allocation site as well if present.
- JSObject::UpdateAllocationSite(object, new_elements_kind);
- } else {
- Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(old_elements);
- parameter_map->set(1, *new_elements);
- }
-
- if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, object, elements_kind, old_elements,
- object->GetElementsKind(), new_elements);
- }
-
- return new_elements;
-}
-
-
-Handle<FixedArray> JSObject::SetFastElementsCapacityAndLength(
- Handle<JSObject> object, int capacity, int length,
- SetFastElementsCapacitySmiMode smi_mode) {
- Handle<FixedArray> new_elements =
- SetFastElementsCapacity(object, capacity, smi_mode);
- if (object->IsJSArray()) {
- Handle<JSArray>::cast(object)->set_length(Smi::FromInt(length));
- }
- return new_elements;
-}
-
-
-Handle<FixedArrayBase> JSObject::SetFastDoubleElementsCapacity(
- Handle<JSObject> object, int capacity) {
- // We should never end in here with a pixel or external array.
- DCHECK(!object->HasExternalArrayElements());
-
- Handle<FixedArrayBase> elems =
- object->GetIsolate()->factory()->NewFixedDoubleArray(capacity);
-
- ElementsKind elements_kind = object->GetElementsKind();
- CHECK(elements_kind != SLOPPY_ARGUMENTS_ELEMENTS);
- ElementsKind new_elements_kind = elements_kind;
- if (IsHoleyElementsKind(elements_kind)) {
- new_elements_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
- } else {
- new_elements_kind = FAST_DOUBLE_ELEMENTS;
- }
-
- Handle<Map> new_map = GetElementsTransitionMap(object, new_elements_kind);
-
- Handle<FixedArrayBase> old_elements(object->elements());
- ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS);
- accessor->CopyElements(object, elems, elements_kind);
-
- JSObject::ValidateElements(object);
- JSObject::SetMapAndElements(object, new_map, elems);
-
- if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, object, elements_kind, old_elements,
- object->GetElementsKind(), elems);
- }
-
- return elems;
-}
-
-
-Handle<FixedArrayBase> JSObject::SetFastDoubleElementsCapacityAndLength(
- Handle<JSObject> object, int capacity, int length) {
- Handle<FixedArrayBase> new_elements =
- SetFastDoubleElementsCapacity(object, capacity);
- if (object->IsJSArray()) {
- Handle<JSArray>::cast(object)->set_length(Smi::FromInt(length));
- }
- return new_elements;
-}
-
-
// static
void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
DCHECK(capacity >= 0);
@@ -12141,90 +11832,43 @@ void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
}
-void JSArray::Expand(Handle<JSArray> array, int required_size) {
- ElementsAccessor* accessor = array->GetElementsAccessor();
- accessor->SetCapacityAndLength(array, required_size, required_size);
-}
-
-
-// Returns false if the passed-in index is marked non-configurable,
-// which will cause the ES5 truncation operation to halt, and thus
-// no further old values need be collected.
+// Returns false if the passed-in index is marked non-configurable, which will
+// cause the truncation operation to halt, and thus no further old values need
+// be collected.
static bool GetOldValue(Isolate* isolate,
Handle<JSObject> object,
uint32_t index,
List<Handle<Object> >* old_values,
List<uint32_t>* indices) {
- Maybe<PropertyAttributes> maybe =
- JSReceiver::GetOwnElementAttribute(object, index);
- DCHECK(maybe.IsJust());
- DCHECK(maybe.FromJust() != ABSENT);
- if (maybe.FromJust() == DONT_DELETE) return false;
- Handle<Object> value;
- if (!JSObject::GetOwnElementAccessorPair(object, index).is_null()) {
- value = Handle<Object>::cast(isolate->factory()->the_hole_value());
- } else {
- value = Object::GetElement(isolate, object, index).ToHandleChecked();
- }
+ LookupIterator it(isolate, object, index, LookupIterator::HIDDEN);
+ CHECK(JSReceiver::GetPropertyAttributes(&it).IsJust());
+ DCHECK(it.IsFound());
+ if (!it.IsConfigurable()) return false;
+ Handle<Object> value =
+ it.state() == LookupIterator::ACCESSOR
+ ? Handle<Object>::cast(isolate->factory()->the_hole_value())
+ : JSReceiver::GetDataProperty(&it);
old_values->Add(value);
indices->Add(index);
return true;
}
-MUST_USE_RESULT static MaybeHandle<Object> EnqueueSpliceRecord(
- Handle<JSArray> object, uint32_t index, Handle<JSArray> deleted,
- uint32_t add_count) {
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> index_object = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> add_count_object =
- isolate->factory()->NewNumberFromUint(add_count);
-
- Handle<Object> args[] =
- { object, index_object, deleted, add_count_object };
-
- return Execution::Call(
- isolate, Handle<JSFunction>(isolate->observers_enqueue_splice()),
- isolate->factory()->undefined_value(), arraysize(args), args);
-}
-
-MUST_USE_RESULT static MaybeHandle<Object> BeginPerformSplice(
- Handle<JSArray> object) {
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> args[] = { object };
-
- return Execution::Call(
- isolate, Handle<JSFunction>(isolate->observers_begin_perform_splice()),
- isolate->factory()->undefined_value(), arraysize(args), args);
-}
-
-
-MUST_USE_RESULT static MaybeHandle<Object> EndPerformSplice(
- Handle<JSArray> object) {
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> args[] = { object };
-
- return Execution::Call(
- isolate, Handle<JSFunction>(isolate->observers_end_perform_splice()),
- isolate->factory()->undefined_value(), arraysize(args), args);
+void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
+ // We should never end in here with a pixel or external array.
+ DCHECK(array->AllowsSetLength());
+ if (array->SetLengthWouldNormalize(new_length)) {
+ JSObject::NormalizeElements(array);
+ }
+ array->GetElementsAccessor()->SetLength(array, new_length);
}
-MaybeHandle<Object> JSArray::SetElementsLength(
- Handle<JSArray> array,
- Handle<Object> new_length_handle) {
- if (array->HasFastElements() &&
- SetElementsLengthWouldNormalize(array->GetHeap(), new_length_handle)) {
- NormalizeElements(array);
- }
-
- // We should never end in here with a pixel or external array.
- DCHECK(array->AllowsSetElementsLength());
+MaybeHandle<Object> JSArray::ObservableSetLength(Handle<JSArray> array,
+ uint32_t new_length) {
if (!array->map()->is_observed()) {
- return array->GetElementsAccessor()->SetLength(array, new_length_handle);
+ SetLength(array, new_length);
+ return array;
}
Isolate* isolate = array->GetIsolate();
@@ -12232,9 +11876,7 @@ MaybeHandle<Object> JSArray::SetElementsLength(
List<Handle<Object> > old_values;
Handle<Object> old_length_handle(array->length(), isolate);
uint32_t old_length = 0;
- CHECK(old_length_handle->ToArrayIndex(&old_length));
- uint32_t new_length = 0;
- CHECK(new_length_handle->ToArrayIndex(&new_length));
+ CHECK(old_length_handle->ToArrayLength(&old_length));
static const PropertyAttributes kNoAttrFilter = NONE;
int num_elements = array->NumberOfOwnElements(kNoAttrFilter);
@@ -12258,14 +11900,10 @@ MaybeHandle<Object> JSArray::SetElementsLength(
}
}
- Handle<Object> hresult;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, hresult,
- array->GetElementsAccessor()->SetLength(array, new_length_handle),
- Object);
+ SetLength(array, new_length);
- CHECK(array->length()->ToArrayIndex(&new_length));
- if (old_length == new_length) return hresult;
+ CHECK(array->length()->ToArrayLength(&new_length));
+ if (old_length == new_length) return array;
RETURN_ON_EXCEPTION(isolate, BeginPerformSplice(array), Object);
@@ -12280,6 +11918,7 @@ MaybeHandle<Object> JSArray::SetElementsLength(
old_values[i]),
Object);
}
+
RETURN_ON_EXCEPTION(isolate,
JSObject::EnqueueChangeRecord(
array, "update", isolate->factory()->length_string(),
@@ -12297,22 +11936,17 @@ MaybeHandle<Object> JSArray::SetElementsLength(
// Skip deletions where the property was an accessor, leaving holes
// in the array of old values.
if (old_values[i]->IsTheHole()) continue;
- JSObject::SetOwnElement(deleted, indices[i] - index, old_values[i],
- SLOPPY).Assert();
+ JSObject::AddDataElement(deleted, indices[i] - index, old_values[i], NONE)
+ .Assert();
}
- RETURN_ON_EXCEPTION(
- isolate,
- SetProperty(deleted, isolate->factory()->length_string(),
- isolate->factory()->NewNumberFromUint(delete_count),
- STRICT),
- Object);
+ JSArray::SetLength(deleted, delete_count);
}
RETURN_ON_EXCEPTION(
isolate, EnqueueSpliceRecord(array, index, deleted, add_count), Object);
- return hresult;
+ return array;
}
@@ -12513,8 +12147,7 @@ bool DependentCode::MarkCodeForDeoptimization(
WeakCell* cell = WeakCell::cast(obj);
if (cell->cleared()) continue;
Code* code = Code::cast(cell->value());
- if (!code->marked_for_deoptimization() &&
- (!code->is_turbofanned() || FLAG_turbo_deoptimization)) {
+ if (!code->marked_for_deoptimization()) {
SetMarkedForDeoptimization(code, group);
if (invalidate_embedded_objects) {
code->InvalidateEmbeddedObjects();
@@ -12614,6 +12247,13 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
#endif
Isolate* isolate = object->GetIsolate();
+ // Strong objects may not have their prototype set via __proto__ or
+ // setPrototypeOf.
+ if (from_javascript && object->map()->is_strong()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongSetProto, object),
+ Object);
+ }
Heap* heap = isolate->heap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
@@ -12628,9 +12268,8 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
// or [[Extensible]] must not violate the invariants defined in the preceding
// paragraph.
if (!object->map()->is_extensible()) {
- Handle<Object> args[] = { object };
- THROW_NEW_ERROR(isolate, NewTypeError("non_extensible_proto",
- HandleVector(args, arraysize(args))),
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kNonExtensibleProto, object),
Object);
}
@@ -12643,7 +12282,8 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
!iter.IsAtEnd(); iter.Advance()) {
if (JSReceiver::cast(iter.GetCurrent()) == *object) {
// Cycle detected.
- THROW_NEW_ERROR(isolate, NewError(MessageTemplate::kCyclicProto), Object);
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kCyclicProto),
+ Object);
}
}
@@ -12660,11 +12300,9 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
iter.Advance();
if (!real_receiver->map()->is_extensible()) {
- Handle<Object> args[] = {object};
- THROW_NEW_ERROR(isolate,
- NewTypeError("non_extensible_proto",
- HandleVector(args, arraysize(args))),
- Object);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kNonExtensibleProto, object),
+ Object);
}
}
}
@@ -12710,811 +12348,218 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
}
-MaybeHandle<AccessorPair> JSObject::GetOwnElementAccessorPair(
- Handle<JSObject> object,
- uint32_t index) {
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(object->GetIsolate(), object);
- if (iter.IsAtEnd()) return MaybeHandle<AccessorPair>();
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return GetOwnElementAccessorPair(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index);
- }
-
- // Check for lookup interceptor.
- if (object->HasIndexedInterceptor()) return MaybeHandle<AccessorPair>();
-
- return object->GetElementsAccessor()->GetAccessorPair(object, index);
+ElementsAccessor* JSObject::GetElementsAccessor() {
+ return ElementsAccessor::ForKind(GetElementsKind());
}
-MaybeHandle<Object> JSObject::SetElementWithInterceptor(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, LanguageMode language_mode,
- bool check_prototype, SetPropertyMode set_mode) {
- Isolate* isolate = object->GetIsolate();
-
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- if (!interceptor->setter()->IsUndefined()) {
- v8::IndexedPropertySetterCallback setter =
- v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-set", *object, index));
- PropertyCallbackArguments args(isolate, interceptor->data(), *object,
- *object);
- v8::Handle<v8::Value> result =
- args.Call(setter, index, v8::Utils::ToLocal(value));
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (!result.IsEmpty()) return value;
+void JSObject::ValidateElements(Handle<JSObject> object) {
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->Validate(object);
}
-
- return SetElementWithoutInterceptor(object, index, value, attributes,
- language_mode, check_prototype, set_mode);
+#endif
}
-MaybeHandle<Object> JSObject::GetElementWithCallback(
- Handle<JSObject> object,
- Handle<Object> receiver,
- Handle<Object> structure,
- uint32_t index,
- Handle<Object> holder) {
+// static
+MaybeHandle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
+ uint32_t index, Handle<Object> value,
+ LanguageMode language_mode) {
Isolate* isolate = object->GetIsolate();
- DCHECK(!structure->IsForeign());
- // api style callbacks.
- if (structure->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> data =
- Handle<ExecutableAccessorInfo>::cast(structure);
- Object* fun_obj = data->getter();
- v8::AccessorNameGetterCallback call_fun =
- v8::ToCData<v8::AccessorNameGetterCallback>(fun_obj);
- if (call_fun == NULL) return isolate->factory()->undefined_value();
- Handle<JSObject> holder_handle = Handle<JSObject>::cast(holder);
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<String> key = isolate->factory()->NumberToString(number);
- LOG(isolate, ApiNamedPropertyAccess("load", *holder_handle, *key));
- PropertyCallbackArguments
- args(isolate, data->data(), *receiver, *holder_handle);
- v8::Handle<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(key));
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (result.IsEmpty()) return isolate->factory()->undefined_value();
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- // Rebox handle before return.
- return handle(*result_internal, isolate);
- }
-
- // __defineGetter__ callback
- if (structure->IsAccessorPair()) {
- Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(),
- isolate);
- if (getter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(
- receiver, Handle<JSReceiver>::cast(getter));
- }
- // Getter is not a function.
- return isolate->factory()->undefined_value();
- }
-
- UNREACHABLE();
- return MaybeHandle<Object>();
+ LookupIterator it(isolate, object, index);
+ return SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED);
}
-MaybeHandle<Object> JSObject::SetElementWithCallback(
- Handle<Object> object, Handle<Object> structure, uint32_t index,
- Handle<Object> value, Handle<JSObject> holder, LanguageMode language_mode) {
- Isolate* isolate = holder->GetIsolate();
-
- // We should never get here to initialize a const with the hole
- // value since a const declaration would conflict with the setter.
- DCHECK(!value->IsTheHole());
- DCHECK(!structure->IsForeign());
- if (structure->IsExecutableAccessorInfo()) {
- // api style callbacks
- Handle<ExecutableAccessorInfo> data =
- Handle<ExecutableAccessorInfo>::cast(structure);
- Object* call_obj = data->setter();
- v8::AccessorNameSetterCallback call_fun =
- v8::ToCData<v8::AccessorNameSetterCallback>(call_obj);
- if (call_fun == NULL) return value;
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<String> key(isolate->factory()->NumberToString(number));
- LOG(isolate, ApiNamedPropertyAccess("store", *holder, *key));
- PropertyCallbackArguments
- args(isolate, data->data(), *object, *holder);
- args.Call(call_fun,
- v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value));
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return value;
- }
-
- if (structure->IsAccessorPair()) {
- Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
- if (setter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(
- object, Handle<JSReceiver>::cast(setter), value);
- } else {
- if (is_sloppy(language_mode)) return value;
- Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> args[] = {key, holder};
- THROW_NEW_ERROR(isolate,
- NewTypeError("no_setter_in_callback",
- HandleVector(args, arraysize(args))),
- Object);
- }
+static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
+ uint32_t index,
+ uint32_t* new_capacity) {
+ STATIC_ASSERT(JSObject::kMaxUncheckedOldFastElementsLength <=
+ JSObject::kMaxUncheckedFastElementsLength);
+ if (index < capacity) {
+ *new_capacity = capacity;
+ return false;
}
-
- UNREACHABLE();
- return MaybeHandle<Object>();
-}
-
-
-bool JSObject::HasFastArgumentsElements() {
- Heap* heap = GetHeap();
- if (!elements()->IsFixedArray()) return false;
- FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->sloppy_arguments_elements_map()) {
+ if (index - capacity >= JSObject::kMaxGap) return true;
+ *new_capacity = JSObject::NewElementsCapacity(index + 1);
+ DCHECK_LT(index, *new_capacity);
+ if (*new_capacity <= JSObject::kMaxUncheckedOldFastElementsLength ||
+ (*new_capacity <= JSObject::kMaxUncheckedFastElementsLength &&
+ object->GetHeap()->InNewSpace(object))) {
return false;
}
- FixedArray* arguments = FixedArray::cast(elements->get(1));
- return !arguments->IsDictionary();
+ // If the fast-case backing storage takes up roughly three times as
+ // much space (in machine words) as a dictionary backing storage
+ // would, the object should have slow elements.
+ int old_capacity = 0;
+ int used_elements = 0;
+ object->GetElementsCapacityAndUsage(&old_capacity, &used_elements);
+ int dictionary_size = SeededNumberDictionary::ComputeCapacity(used_elements) *
+ SeededNumberDictionary::kEntrySize;
+ return 3 * static_cast<uint32_t>(dictionary_size) <= *new_capacity;
}
-bool JSObject::HasDictionaryArgumentsElements() {
- Heap* heap = GetHeap();
- if (!elements()->IsFixedArray()) return false;
- FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->sloppy_arguments_elements_map()) {
- return false;
+bool JSObject::WouldConvertToSlowElements(uint32_t index) {
+ if (HasFastElements()) {
+ Handle<FixedArrayBase> backing_store(FixedArrayBase::cast(elements()));
+ uint32_t capacity = static_cast<uint32_t>(backing_store->length());
+ uint32_t new_capacity;
+ return ShouldConvertToSlowElements(this, capacity, index, &new_capacity);
}
- FixedArray* arguments = FixedArray::cast(elements->get(1));
- return arguments->IsDictionary();
+ return false;
}
-// Adding n elements in fast case is O(n*n).
-// Note: revisit design to have dual undefined values to capture absent
-// elements.
-MaybeHandle<Object> JSObject::SetFastElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- LanguageMode language_mode,
- bool check_prototype) {
- DCHECK(object->HasFastSmiOrObjectElements() ||
- object->HasFastArgumentsElements());
-
- Isolate* isolate = object->GetIsolate();
-
- // Array optimizations rely on the prototype lookups of Array objects always
- // returning undefined. If there is a store to the initial prototype object,
- // make sure all of these optimizations are invalidated.
- isolate->UpdateArrayProtectorOnSetElement(object);
-
- Handle<FixedArray> backing_store(FixedArray::cast(object->elements()));
- if (backing_store->map() ==
- isolate->heap()->sloppy_arguments_elements_map()) {
- backing_store = handle(FixedArray::cast(backing_store->get(1)));
- } else {
- backing_store = EnsureWritableFastElements(object);
- }
- uint32_t capacity = static_cast<uint32_t>(backing_store->length());
-
- if (check_prototype &&
- (index >= capacity || backing_store->get(index)->IsTheHole())) {
- bool found;
- MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes(
- object, index, value, &found, language_mode);
- if (found) return result;
+static ElementsKind BestFittingFastElementsKind(JSObject* object) {
+ if (object->HasSloppyArgumentsElements()) {
+ return FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
-
- uint32_t new_capacity = capacity;
- // Check if the length property of this object needs to be updated.
- uint32_t array_length = 0;
- bool must_update_array_length = false;
- bool introduces_holes = true;
- if (object->IsJSArray()) {
- CHECK(Handle<JSArray>::cast(object)->length()->ToArrayIndex(&array_length));
- introduces_holes = index > array_length;
- if (index >= array_length) {
- must_update_array_length = true;
- array_length = index + 1;
- }
- } else {
- introduces_holes = index >= capacity;
- }
-
- // If the array is growing, and it's not growth by a single element at the
- // end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = object->GetElementsKind();
- if (introduces_holes &&
- IsFastElementsKind(elements_kind) &&
- !IsFastHoleyElementsKind(elements_kind)) {
- ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- TransitionElementsKind(object, transitioned_kind);
- }
-
- // Check if the capacity of the backing store needs to be increased, or if
- // a transition to slow elements is necessary.
- if (index >= capacity) {
- bool convert_to_slow = true;
- if ((index - capacity) < kMaxGap) {
- new_capacity = NewElementsCapacity(index + 1);
- DCHECK(new_capacity > index);
- if (!object->ShouldConvertToSlowElements(new_capacity)) {
- convert_to_slow = false;
+ DCHECK(object->HasDictionaryElements());
+ SeededNumberDictionary* dictionary = object->element_dictionary();
+ ElementsKind kind = FAST_HOLEY_SMI_ELEMENTS;
+ for (int i = 0; i < dictionary->Capacity(); i++) {
+ Object* key = dictionary->KeyAt(i);
+ if (key->IsNumber()) {
+ Object* value = dictionary->ValueAt(i);
+ if (!value->IsNumber()) return FAST_HOLEY_ELEMENTS;
+ if (!value->IsSmi()) {
+ if (!FLAG_unbox_double_arrays) return FAST_HOLEY_ELEMENTS;
+ kind = FAST_HOLEY_DOUBLE_ELEMENTS;
}
}
- if (convert_to_slow) {
- NormalizeElements(object);
- return SetDictionaryElement(object, index, value, NONE, language_mode,
- check_prototype);
- }
- }
- // Convert to fast double elements if appropriate.
- if (object->HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
- // Consider fixing the boilerplate as well if we have one.
- ElementsKind to_kind = IsHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
-
- UpdateAllocationSite(object, to_kind);
-
- SetFastDoubleElementsCapacityAndLength(object, new_capacity, array_length);
- FixedDoubleArray::cast(object->elements())->set(index, value->Number());
- JSObject::ValidateElements(object);
- return value;
- }
- // Change elements kind from Smi-only to generic FAST if necessary.
- if (object->HasFastSmiElements() && !value->IsSmi()) {
- ElementsKind kind = object->HasFastHoleyElements()
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
-
- UpdateAllocationSite(object, kind);
- Handle<Map> new_map = GetElementsTransitionMap(object, kind);
- JSObject::MigrateToMap(object, new_map);
- DCHECK(IsFastObjectElementsKind(object->GetElementsKind()));
- }
- // Increase backing store capacity if that's been decided previously.
- if (new_capacity != capacity) {
- SetFastElementsCapacitySmiMode smi_mode =
- value->IsSmi() && object->HasFastSmiElements()
- ? kAllowSmiElements
- : kDontAllowSmiElements;
- Handle<FixedArray> new_elements =
- SetFastElementsCapacityAndLength(object, new_capacity, array_length,
- smi_mode);
- new_elements->set(index, *value);
- JSObject::ValidateElements(object);
- return value;
}
-
- // Finally, set the new element and length.
- DCHECK(object->elements()->IsFixedArray());
- backing_store->set(index, *value);
- if (must_update_array_length) {
- Handle<JSArray>::cast(object)->set_length(Smi::FromInt(array_length));
- }
- return value;
+ return kind;
}
-MaybeHandle<Object> JSObject::SetDictionaryElement(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, LanguageMode language_mode,
- bool check_prototype, SetPropertyMode set_mode) {
- DCHECK(object->HasDictionaryElements() ||
- object->HasDictionaryArgumentsElements());
- Isolate* isolate = object->GetIsolate();
-
- // Insert element in the dictionary.
- Handle<FixedArray> elements(FixedArray::cast(object->elements()));
- bool is_arguments =
- (elements->map() == isolate->heap()->sloppy_arguments_elements_map());
- Handle<SeededNumberDictionary> dictionary(is_arguments
- ? SeededNumberDictionary::cast(elements->get(1))
- : SeededNumberDictionary::cast(*elements));
-
- int entry = dictionary->FindEntry(index);
- if (entry != SeededNumberDictionary::kNotFound) {
- Handle<Object> element(dictionary->ValueAt(entry), isolate);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == ACCESSOR_CONSTANT && set_mode == SET_PROPERTY) {
- return SetElementWithCallback(object, element, index, value, object,
- language_mode);
- } else if (set_mode == DEFINE_PROPERTY && !details.IsConfigurable() &&
- details.kind() == kAccessor) {
- return RedefineNonconfigurableProperty(
- isolate, isolate->factory()->NewNumberFromUint(index),
- isolate->factory()->undefined_value(), language_mode);
-
- } else if ((set_mode == DEFINE_PROPERTY && !details.IsConfigurable() &&
- details.IsReadOnly()) ||
- (set_mode == SET_PROPERTY && details.IsReadOnly() &&
- !element->IsTheHole())) {
- // If a value has not been initialized we allow writing to it even if it
- // is read-only (a declared const that has not been initialized).
- return WriteToReadOnlyProperty(
- isolate, object, isolate->factory()->NewNumberFromUint(index),
- isolate->factory()->undefined_value(), language_mode);
- } else {
- DCHECK(details.IsConfigurable() || !details.IsReadOnly() ||
- element->IsTheHole());
- dictionary->UpdateMaxNumberKey(index);
- if (set_mode == DEFINE_PROPERTY) {
- details = PropertyDetails(attributes, DATA, details.dictionary_index(),
- PropertyCellType::kNoCell);
- dictionary->DetailsAtPut(entry, details);
- }
-
- // Elements of the arguments object in slow mode might be slow aliases.
- if (is_arguments && element->IsAliasedArgumentsEntry()) {
- Handle<AliasedArgumentsEntry> entry =
- Handle<AliasedArgumentsEntry>::cast(element);
- Handle<Context> context(Context::cast(elements->get(0)));
- int context_index = entry->aliased_context_slot();
- DCHECK(!context->get(context_index)->IsTheHole());
- context->set(context_index, *value);
- // For elements that are still writable we keep slow aliasing.
- if (!details.IsReadOnly()) value = element;
- }
- dictionary->ValueAtPut(entry, *value);
- }
- } else {
- // Index not already used. Look for an accessor in the prototype chain.
- // Can cause GC!
- if (check_prototype) {
- bool found;
- MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes(
- object, index, value, &found, language_mode);
- if (found) return result;
- }
-
- // When we set the is_extensible flag to false we always force the
- // element into dictionary mode (and force them to stay there).
- if (!object->map()->is_extensible()) {
- if (is_sloppy(language_mode)) {
- return isolate->factory()->undefined_value();
- } else {
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<String> name = isolate->factory()->NumberToString(number);
- Handle<Object> args[] = {name};
- THROW_NEW_ERROR(isolate,
- NewTypeError("object_not_extensible",
- HandleVector(args, arraysize(args))),
- Object);
- }
- }
-
- PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
- Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
- details);
- if (*dictionary != *new_dictionary) {
- if (is_arguments) {
- elements->set(1, *new_dictionary);
- } else {
- object->set_elements(*new_dictionary);
- }
- dictionary = new_dictionary;
- }
- }
-
- // Update the array length if this JSObject is an array.
- if (object->IsJSArray()) {
- JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray>::cast(object), index,
- value);
- }
-
- // Attempt to put this object back in fast case.
- if (object->ShouldConvertToFastElements()) {
- uint32_t new_length = 0;
- if (object->IsJSArray()) {
- CHECK(Handle<JSArray>::cast(object)->length()->ToArrayIndex(&new_length));
- } else {
- new_length = dictionary->max_number_key() + 1;
- }
- bool has_smi_only_elements = false;
- bool should_convert_to_fast_double_elements =
- object->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
- SetFastElementsCapacitySmiMode smi_mode =
- has_smi_only_elements ? kForceSmiElements : kAllowSmiElements;
+static bool ShouldConvertToFastElements(JSObject* object,
+ SeededNumberDictionary* dictionary,
+ uint32_t index,
+ uint32_t* new_capacity) {
+ // If properties with non-standard attributes or accessors were added, we
+ // cannot go back to fast elements.
+ if (dictionary->requires_slow_elements()) return false;
- if (should_convert_to_fast_double_elements) {
- SetFastDoubleElementsCapacityAndLength(object, new_length, new_length);
- } else {
- SetFastElementsCapacityAndLength(object, new_length, new_length,
- smi_mode);
- }
- JSObject::ValidateElements(object);
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- OFStream os(stdout);
- os << "Object elements are fast case again:\n";
- object->Print(os);
- }
-#endif
- }
- return value;
-}
+ // Adding a property with this index will require slow elements.
+ if (index >= static_cast<uint32_t>(Smi::kMaxValue)) return false;
-MaybeHandle<Object> JSObject::SetFastDoubleElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- LanguageMode language_mode,
- bool check_prototype) {
- DCHECK(object->HasFastDoubleElements());
-
- Handle<FixedArrayBase> base_elms(FixedArrayBase::cast(object->elements()));
- uint32_t elms_length = static_cast<uint32_t>(base_elms->length());
-
- // If storing to an element that isn't in the array, pass the store request
- // up the prototype chain before storing in the receiver's elements.
- if (check_prototype &&
- (index >= elms_length ||
- Handle<FixedDoubleArray>::cast(base_elms)->is_the_hole(index))) {
- bool found;
- MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes(
- object, index, value, &found, language_mode);
- if (found) return result;
- }
-
- // If the value object is not a heap number, switch to fast elements and try
- // again.
- bool value_is_smi = value->IsSmi();
- bool introduces_holes = true;
- uint32_t length = elms_length;
if (object->IsJSArray()) {
- CHECK(Handle<JSArray>::cast(object)->length()->ToArrayIndex(&length));
- introduces_holes = index > length;
+ Object* length = JSArray::cast(object)->length();
+ if (!length->IsSmi()) return false;
+ *new_capacity = static_cast<uint32_t>(Smi::cast(length)->value());
} else {
- introduces_holes = index >= elms_length;
- }
-
- if (!value->IsNumber()) {
- SetFastElementsCapacityAndLength(object, elms_length, length,
- kDontAllowSmiElements);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- object->GetIsolate(), result,
- SetFastElement(object, index, value, language_mode, check_prototype),
- Object);
- JSObject::ValidateElements(object);
- return result;
- }
-
- double double_value = value_is_smi
- ? static_cast<double>(Handle<Smi>::cast(value)->value())
- : Handle<HeapNumber>::cast(value)->value();
-
- // If the array is growing, and it's not growth by a single element at the
- // end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = object->GetElementsKind();
- if (introduces_holes && !IsFastHoleyElementsKind(elements_kind)) {
- ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- TransitionElementsKind(object, transitioned_kind);
- }
-
- // Check whether there is extra space in the fixed array.
- if (index < elms_length) {
- Handle<FixedDoubleArray> elms(FixedDoubleArray::cast(object->elements()));
- elms->set(index, double_value);
- if (object->IsJSArray()) {
- // Update the length of the array if needed.
- uint32_t array_length = 0;
- CHECK(
- Handle<JSArray>::cast(object)->length()->ToArrayIndex(&array_length));
- if (index >= array_length) {
- Handle<JSArray>::cast(object)->set_length(Smi::FromInt(index + 1));
- }
- }
- return value;
- }
-
- // Allow gap in fast case.
- if ((index - elms_length) < kMaxGap) {
- // Try allocating extra space.
- int new_capacity = NewElementsCapacity(index+1);
- if (!object->ShouldConvertToSlowElements(new_capacity)) {
- DCHECK(static_cast<uint32_t>(new_capacity) > index);
- SetFastDoubleElementsCapacityAndLength(object, new_capacity, index + 1);
- FixedDoubleArray::cast(object->elements())->set(index, double_value);
- JSObject::ValidateElements(object);
- return value;
- }
+ *new_capacity = dictionary->max_number_key() + 1;
}
+ *new_capacity = Max(index + 1, *new_capacity);
- // Otherwise default to slow case.
- DCHECK(object->HasFastDoubleElements());
- DCHECK(object->map()->has_fast_double_elements());
- DCHECK(object->elements()->IsFixedDoubleArray() ||
- object->elements()->length() == 0);
-
- NormalizeElements(object);
- DCHECK(object->HasDictionaryElements());
- return SetElement(object, index, value, NONE, language_mode, check_prototype);
-}
-
-
-MaybeHandle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
- uint32_t index, Handle<Object> value,
- PropertyAttributes attributes,
- LanguageMode language_mode) {
- if (object->IsJSProxy()) {
- return JSProxy::SetElementWithHandler(Handle<JSProxy>::cast(object), object,
- index, value, language_mode);
- }
- return JSObject::SetElement(Handle<JSObject>::cast(object), index, value,
- attributes, language_mode);
+ uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
+ SeededNumberDictionary::kEntrySize;
+ return 2 * dictionary_size >= *new_capacity;
}
-MaybeHandle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- LanguageMode language_mode) {
- DCHECK(!object->HasExternalArrayElements());
- return JSObject::SetElement(object, index, value, attributes, language_mode,
- false);
-}
-
+// static
+MaybeHandle<Object> JSObject::AddDataElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ DCHECK(object->map()->is_extensible());
-MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
- uint32_t index, Handle<Object> value,
- PropertyAttributes attributes,
- LanguageMode language_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
Isolate* isolate = object->GetIsolate();
- if (object->HasExternalArrayElements() ||
- object->HasFixedTypedArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value,
- Execution::ToNumber(isolate, value), Object);
- }
- }
+ uint32_t old_length = 0;
+ uint32_t new_capacity = 0;
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayAccess(object)) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return value;
+ Handle<Object> old_length_handle;
+ if (object->IsJSArray()) {
+ CHECK(JSArray::cast(*object)->length()->ToArrayLength(&old_length));
+ if (object->map()->is_observed()) {
+ old_length_handle = handle(JSArray::cast(*object)->length(), isolate);
}
}
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return value;
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return SetElement(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index,
- value, attributes, language_mode, check_prototype, set_mode);
- }
-
- // Don't allow element properties to be redefined for external arrays.
- if ((object->HasExternalArrayElements() ||
- object->HasFixedTypedArrayElements()) &&
- set_mode == DEFINE_PROPERTY) {
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { object, number };
- THROW_NEW_ERROR(isolate, NewTypeError("redef_external_array_element",
- HandleVector(args, arraysize(args))),
- Object);
+ ElementsKind kind = object->GetElementsKind();
+ FixedArrayBase* elements = object->elements();
+ ElementsKind dictionary_kind = DICTIONARY_ELEMENTS;
+ if (IsSloppyArgumentsElements(kind)) {
+ elements = FixedArrayBase::cast(FixedArray::cast(elements)->get(1));
+ dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
}
- // Normalize the elements to enable attributes on the property.
- if ((attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) {
- Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
- // Make sure that we never go back to fast case.
- dictionary->set_requires_slow_elements();
- }
-
- if (!object->map()->is_observed()) {
- return object->HasIndexedInterceptor()
- ? SetElementWithInterceptor(object, index, value, attributes,
- language_mode, check_prototype,
- set_mode)
- : SetElementWithoutInterceptor(object, index, value, attributes,
- language_mode, check_prototype,
- set_mode);
- }
-
- Maybe<PropertyAttributes> maybe =
- JSReceiver::GetOwnElementAttribute(object, index);
- if (!maybe.IsJust()) return MaybeHandle<Object>();
- PropertyAttributes old_attributes = maybe.FromJust();
-
- Handle<Object> old_value = isolate->factory()->the_hole_value();
- Handle<Object> old_length_handle;
+ if (attributes != NONE) {
+ kind = dictionary_kind;
+ } else if (elements->IsSeededNumberDictionary()) {
+ kind = ShouldConvertToFastElements(*object,
+ SeededNumberDictionary::cast(elements),
+ index, &new_capacity)
+ ? BestFittingFastElementsKind(*object)
+ : dictionary_kind; // Overwrite in case of arguments.
+ } else if (ShouldConvertToSlowElements(
+ *object, static_cast<uint32_t>(elements->length()), index,
+ &new_capacity)) {
+ kind = dictionary_kind;
+ }
+
+ ElementsKind to = value->OptimalElementsKind();
+ if (IsHoleyElementsKind(kind) || !object->IsJSArray() || index > old_length) {
+ to = GetHoleyElementsKind(to);
+ kind = GetHoleyElementsKind(kind);
+ }
+ to = IsMoreGeneralElementsKindTransition(kind, to) ? to : kind;
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(to);
+ accessor->Add(object, index, value, attributes, new_capacity);
+
+ uint32_t new_length = old_length;
Handle<Object> new_length_handle;
-
- if (old_attributes != ABSENT) {
- if (GetOwnElementAccessorPair(object, index).is_null()) {
- old_value = Object::GetElement(isolate, object, index).ToHandleChecked();
- }
- } else if (object->IsJSArray()) {
- // Store old array length in case adding an element grows the array.
- old_length_handle = handle(Handle<JSArray>::cast(object)->length(),
- isolate);
+ if (object->IsJSArray() && index >= old_length) {
+ new_length = index + 1;
+ new_length_handle = isolate->factory()->NewNumberFromUint(new_length);
+ JSArray::cast(*object)->set_length(*new_length_handle);
}
- // Check for lookup interceptor
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- object->HasIndexedInterceptor()
- ? SetElementWithInterceptor(object, index, value, attributes,
- language_mode, check_prototype, set_mode)
- : SetElementWithoutInterceptor(object, index, value, attributes,
- language_mode, check_prototype,
- set_mode),
- Object);
+ if (!old_length_handle.is_null() && new_length != old_length) {
+ // |old_length_handle| is kept null above unless the object is observed.
+ DCHECK(object->map()->is_observed());
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- maybe = GetOwnElementAttribute(object, index);
- if (!maybe.IsJust()) return MaybeHandle<Object>();
- PropertyAttributes new_attributes = maybe.FromJust();
-
- if (old_attributes == ABSENT) {
- if (object->IsJSArray() &&
- !old_length_handle->SameValue(
- Handle<JSArray>::cast(object)->length())) {
- new_length_handle = handle(Handle<JSArray>::cast(object)->length(),
- isolate);
- uint32_t old_length = 0;
- uint32_t new_length = 0;
- CHECK(old_length_handle->ToArrayIndex(&old_length));
- CHECK(new_length_handle->ToArrayIndex(&new_length));
-
- RETURN_ON_EXCEPTION(
- isolate, BeginPerformSplice(Handle<JSArray>::cast(object)), Object);
- RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, "add", name, old_value), Object);
- RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, "update",
- isolate->factory()->length_string(),
- old_length_handle),
- Object);
- RETURN_ON_EXCEPTION(
- isolate, EndPerformSplice(Handle<JSArray>::cast(object)), Object);
- Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
- RETURN_ON_EXCEPTION(
- isolate,
- EnqueueSpliceRecord(Handle<JSArray>::cast(object), old_length,
- deleted, new_length - old_length),
- Object);
- } else {
- RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, "add", name, old_value), Object);
- }
- } else if (old_value->IsTheHole()) {
+ RETURN_ON_EXCEPTION(isolate, BeginPerformSplice(array), Object);
RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, "reconfigure", name, old_value),
+ isolate, EnqueueChangeRecord(array, "add", name,
+ isolate->factory()->the_hole_value()),
+ Object);
+ RETURN_ON_EXCEPTION(isolate,
+ EnqueueChangeRecord(array, "update",
+ isolate->factory()->length_string(),
+ old_length_handle),
+ Object);
+ RETURN_ON_EXCEPTION(isolate, EndPerformSplice(array), Object);
+ Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
+ RETURN_ON_EXCEPTION(isolate, EnqueueSpliceRecord(array, old_length, deleted,
+ new_length - old_length),
+ Object);
+ } else if (object->map()->is_observed()) {
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ RETURN_ON_EXCEPTION(
+ isolate, EnqueueChangeRecord(object, "add", name,
+ isolate->factory()->the_hole_value()),
Object);
- } else {
- Handle<Object> new_value =
- Object::GetElement(isolate, object, index).ToHandleChecked();
- bool value_changed = !old_value->SameValue(*new_value);
- if (old_attributes != new_attributes) {
- if (!value_changed) old_value = isolate->factory()->the_hole_value();
- RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, "reconfigure", name, old_value),
- Object);
- } else if (value_changed) {
- RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, "update", name, old_value),
- Object);
- }
}
- return result;
+ return value;
}
-MaybeHandle<Object> JSObject::SetElementWithoutInterceptor(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, LanguageMode language_mode,
- bool check_prototype, SetPropertyMode set_mode) {
- DCHECK(object->HasDictionaryElements() ||
- object->HasDictionaryArgumentsElements() ||
- (attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
- Isolate* isolate = object->GetIsolate();
- if (FLAG_trace_external_array_abuse &&
- IsExternalArrayElementsKind(object->GetElementsKind())) {
- CheckArrayAbuse(object, "external elements write", index);
- }
- if (FLAG_trace_js_array_abuse &&
- !IsExternalArrayElementsKind(object->GetElementsKind())) {
- if (object->IsJSArray()) {
- CheckArrayAbuse(object, "elements write", index, true);
- }
- }
- if (object->IsJSArray() && JSArray::WouldChangeReadOnlyLength(
- Handle<JSArray>::cast(object), index)) {
- if (is_sloppy(language_mode)) {
- return value;
- } else {
- return JSArray::ReadOnlyLengthError(Handle<JSArray>::cast(object));
- }
- }
- switch (object->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- return SetFastElement(object, index, value, language_mode,
- check_prototype);
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return SetFastDoubleElement(object, index, value, language_mode,
- check_prototype);
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: { \
- Handle<External##Type##Array> array( \
- External##Type##Array::cast(object->elements())); \
- return External##Type##Array::SetValue(object, array, index, value); \
- } \
- case TYPE##_ELEMENTS: { \
- Handle<Fixed##Type##Array> array( \
- Fixed##Type##Array::cast(object->elements())); \
- return Fixed##Type##Array::SetValue(object, array, index, value); \
- }
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-
-#undef TYPED_ARRAY_CASE
-
- case DICTIONARY_ELEMENTS:
- return SetDictionaryElement(object, index, value, attributes,
- language_mode, check_prototype, set_mode);
- case SLOPPY_ARGUMENTS_ELEMENTS: {
- Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
- uint32_t length = parameter_map->length();
- Handle<Object> probe = index < length - 2 ?
- Handle<Object>(parameter_map->get(index + 2), isolate) :
- Handle<Object>();
- if (!probe.is_null() && !probe->IsTheHole()) {
- Handle<Context> context(Context::cast(parameter_map->get(0)));
- int context_index = Handle<Smi>::cast(probe)->value();
- DCHECK(!context->get(context_index)->IsTheHole());
- context->set(context_index, *value);
- // Redefining attributes of an aliased element destroys fast aliasing.
- if (set_mode == SET_PROPERTY || attributes == NONE) return value;
- parameter_map->set_the_hole(index + 2);
- // For elements that are still writable we re-establish slow aliasing.
- if ((attributes & READ_ONLY) == 0) {
- value = Handle<Object>::cast(
- isolate->factory()->NewAliasedArgumentsEntry(context_index));
- }
- }
- Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
- if (arguments->IsDictionary()) {
- return SetDictionaryElement(object, index, value, attributes,
- language_mode, check_prototype, set_mode);
- } else {
- return SetFastElement(object, index, value, language_mode,
- check_prototype);
- }
- }
- }
- // All possible cases have been handled above. Add a return to avoid the
- // complaints from the compiler.
- UNREACHABLE();
- return isolate->factory()->null_value();
+bool JSArray::SetLengthWouldNormalize(uint32_t new_length) {
+ if (!HasFastElements()) return false;
+ uint32_t capacity = static_cast<uint32_t>(elements()->length());
+ uint32_t new_capacity;
+ return JSArray::SetLengthWouldNormalize(GetHeap(), new_length) &&
+ ShouldConvertToSlowElements(this, capacity, new_length - 1,
+ &new_capacity);
}
@@ -13565,7 +12610,7 @@ void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
// If the array is huge, it's not likely to be defined in a local
// function, so we shouldn't make new instances of it very often.
uint32_t length = 0;
- CHECK(transition_info->length()->ToArrayIndex(&length));
+ CHECK(transition_info->length()->ToArrayLength(&length));
if (length <= kMaximumArrayBytesToPretransition) {
if (FLAG_trace_track_allocation_sites) {
bool is_nested = site->IsNestedSite();
@@ -13638,25 +12683,23 @@ void JSObject::UpdateAllocationSite(Handle<JSObject> object,
void JSObject::TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind) {
- ElementsKind from_kind = object->map()->elements_kind();
+ ElementsKind from_kind = object->GetElementsKind();
if (IsFastHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
if (from_kind == to_kind) return;
- // Don't update the site if to_kind isn't fast
- if (IsFastElementsKind(to_kind)) {
- UpdateAllocationSite(object, to_kind);
- }
- Isolate* isolate = object->GetIsolate();
- if (object->elements() == isolate->heap()->empty_fixed_array() ||
- (IsFastSmiOrObjectElementsKind(from_kind) &&
- IsFastSmiOrObjectElementsKind(to_kind)) ||
- (from_kind == FAST_DOUBLE_ELEMENTS &&
- to_kind == FAST_HOLEY_DOUBLE_ELEMENTS)) {
- DCHECK(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
+ // This method should never be called for any other case.
+ DCHECK(IsFastElementsKind(from_kind));
+ DCHECK(IsFastElementsKind(to_kind));
+ DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
+
+ UpdateAllocationSite(object, to_kind);
+ if (object->elements() == object->GetHeap()->empty_fixed_array() ||
+ IsFastDoubleElementsKind(from_kind) ==
+ IsFastDoubleElementsKind(to_kind)) {
// No change is needed to the elements() buffer, the transition
// only requires a map change.
Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
@@ -13665,42 +12708,14 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
Handle<FixedArrayBase> elms(object->elements());
PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
}
- return;
- }
-
- Handle<FixedArrayBase> elms(object->elements());
- uint32_t capacity = static_cast<uint32_t>(elms->length());
- uint32_t length = capacity;
-
- if (object->IsJSArray()) {
- Object* raw_length = Handle<JSArray>::cast(object)->length();
- if (raw_length->IsUndefined()) {
- // If length is undefined, then JSArray is being initialized and has no
- // elements, assume a length of zero.
- length = 0;
- } else {
- CHECK(raw_length->ToArrayIndex(&length));
- }
- }
-
- if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- SetFastDoubleElementsCapacityAndLength(object, capacity, length);
- JSObject::ValidateElements(object);
- return;
- }
-
- if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- SetFastElementsCapacityAndLength(object, capacity, length,
- kDontAllowSmiElements);
- JSObject::ValidateElements(object);
- return;
+ } else {
+ DCHECK((IsFastSmiElementsKind(from_kind) &&
+ IsFastDoubleElementsKind(to_kind)) ||
+ (IsFastDoubleElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind)));
+ uint32_t c = static_cast<uint32_t>(object->elements()->length());
+ ElementsAccessor::ForKind(to_kind)->GrowCapacityAndConvert(object, c);
}
-
- // This method should never be called for any other case than the ones
- // handled above.
- UNREACHABLE();
}
@@ -13718,21 +12733,6 @@ bool Map::IsValidElementsTransition(ElementsKind from_kind,
}
-void JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray> array,
- uint32_t index,
- Handle<Object> value) {
- uint32_t old_len = 0;
- CHECK(array->length()->ToArrayIndex(&old_len));
- // Check to see if we need to update the length. For now, we make
- // sure that the length stays within 32-bits (unsigned).
- if (index >= old_len && index != 0xffffffff) {
- Handle<Object> len = array->GetIsolate()->factory()->NewNumber(
- static_cast<double>(index) + 1);
- array->set_length(*len);
- }
-}
-
-
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
LookupIterator it(array, array->GetIsolate()->factory()->length_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
@@ -13746,7 +12746,7 @@ bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
uint32_t index) {
uint32_t length = 0;
- CHECK(array->length()->ToArrayIndex(&length));
+ CHECK(array->length()->ToArrayLength(&length));
if (length <= index) return HasReadOnlyLength(array);
return false;
}
@@ -13755,62 +12755,10 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
MaybeHandle<Object> JSArray::ReadOnlyLengthError(Handle<JSArray> array) {
Isolate* isolate = array->GetIsolate();
Handle<Name> length = isolate->factory()->length_string();
- Handle<Object> args[] = {length, array};
- THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
- HandleVector(args, arraysize(args))),
- Object);
-}
-
-
-MaybeHandle<Object> JSObject::GetElementWithInterceptor(Handle<JSObject> object,
- Handle<Object> receiver,
- uint32_t index,
- bool check_prototype) {
- Isolate* isolate = object->GetIsolate();
-
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor(), isolate);
- if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get", *object, index));
- PropertyCallbackArguments
- args(isolate, interceptor->data(), *receiver, *object);
- v8::Handle<v8::Value> result = args.Call(getter, index);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (!result.IsEmpty()) {
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- // Rebox handle before return.
- return handle(*result_internal, isolate);
- }
- }
-
- if (!check_prototype) return MaybeHandle<Object>();
-
- ElementsAccessor* handler = object->GetElementsAccessor();
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, handler->Get(receiver, object, index),
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kStrictReadOnlyProperty, length, array),
Object);
- if (!result->IsTheHole()) return result;
-
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return isolate->factory()->undefined_value();
- return Object::GetElementWithReceiver(
- isolate, PrototypeIterator::GetCurrent(iter), receiver, index);
-}
-
-
-bool JSObject::HasDenseElements() {
- int capacity = 0;
- int used = 0;
- GetElementsCapacityAndUsage(&capacity, &used);
- return (capacity == 0) || (used > (capacity / 2));
}
@@ -13821,7 +12769,8 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
FixedArray* backing_store = NULL;
switch (GetElementsKind()) {
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
backing_store_base =
FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
backing_store = FixedArray::cast(backing_store_base);
@@ -13889,102 +12838,6 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
}
-bool JSObject::WouldConvertToSlowElements(uint32_t index) {
- if (HasFastElements()) {
- Handle<FixedArrayBase> backing_store(FixedArrayBase::cast(elements()));
- uint32_t capacity = static_cast<uint32_t>(backing_store->length());
- if (index >= capacity) {
- if ((index - capacity) >= kMaxGap) return true;
- uint32_t new_capacity = NewElementsCapacity(index + 1);
- return ShouldConvertToSlowElements(new_capacity);
- }
- }
- return false;
-}
-
-
-bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
- STATIC_ASSERT(kMaxUncheckedOldFastElementsLength <=
- kMaxUncheckedFastElementsLength);
- if (new_capacity <= kMaxUncheckedOldFastElementsLength ||
- (new_capacity <= kMaxUncheckedFastElementsLength &&
- GetHeap()->InNewSpace(this))) {
- return false;
- }
- // If the fast-case backing storage takes up roughly three times as
- // much space (in machine words) as a dictionary backing storage
- // would, the object should have slow elements.
- int old_capacity = 0;
- int used_elements = 0;
- GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- int dictionary_size = SeededNumberDictionary::ComputeCapacity(used_elements) *
- SeededNumberDictionary::kEntrySize;
- return 3 * dictionary_size <= new_capacity;
-}
-
-
-bool JSObject::ShouldConvertToFastElements() {
- DCHECK(HasDictionaryElements() || HasDictionaryArgumentsElements());
- // If the elements are sparse, we should not go back to fast case.
- if (!HasDenseElements()) return false;
- // An object requiring access checks is never allowed to have fast
- // elements. If it had fast elements we would skip security checks.
- if (IsAccessCheckNeeded()) return false;
- // Observed objects may not go to fast mode because they rely on map checks,
- // and for fast element accesses we sometimes check element kinds only.
- if (map()->is_observed()) return false;
-
- FixedArray* elements = FixedArray::cast(this->elements());
- SeededNumberDictionary* dictionary = NULL;
- if (elements->map() == GetHeap()->sloppy_arguments_elements_map()) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
- // If an element has been added at a very high index in the elements
- // dictionary, we cannot go back to fast case.
- if (dictionary->requires_slow_elements()) return false;
- // If the dictionary backing storage takes up roughly half as much
- // space (in machine words) as a fast-case backing storage would,
- // the object should have fast elements.
- uint32_t array_size = 0;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_size));
- } else {
- array_size = dictionary->max_number_key();
- }
- uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
- SeededNumberDictionary::kEntrySize;
- return 2 * dictionary_size >= array_size;
-}
-
-
-bool JSObject::ShouldConvertToFastDoubleElements(
- bool* has_smi_only_elements) {
- *has_smi_only_elements = false;
- if (HasSloppyArgumentsElements()) return false;
- if (FLAG_unbox_double_arrays) {
- DCHECK(HasDictionaryElements());
- SeededNumberDictionary* dictionary = element_dictionary();
- bool found_double = false;
- for (int i = 0; i < dictionary->Capacity(); i++) {
- Object* key = dictionary->KeyAt(i);
- if (key->IsNumber()) {
- Object* value = dictionary->ValueAt(i);
- if (!value->IsNumber()) return false;
- if (!value->IsSmi()) {
- found_double = true;
- }
- }
- }
- *has_smi_only_elements = !found_double;
- return found_double;
- } else {
- return false;
- }
-}
-
-
// Certain compilers request function template instantiation when they
// see the definition of the other template functions in the
// class. This requires us to have the template functions put
@@ -13993,17 +12846,18 @@ bool JSObject::ShouldConvertToFastDoubleElements(
#ifdef OBJECT_PRINT
template <typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::Print(std::ostream& os) { // NOLINT
- int capacity = DerivedHashTable::Capacity();
+ int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = DerivedHashTable::KeyAt(i);
- if (DerivedHashTable::IsKey(k)) {
+ Object* k = this->KeyAt(i);
+ if (this->IsKey(k)) {
os << " ";
if (k->IsString()) {
String::cast(k)->StringPrint(os);
} else {
os << Brief(k);
}
- os << ": " << Brief(ValueAt(i)) << " " << DetailsAt(i) << "\n";
+ os << ": " << Brief(this->ValueAt(i)) << " " << this->DetailsAt(i)
+ << "\n";
}
}
}
@@ -14013,13 +12867,13 @@ void Dictionary<Derived, Shape, Key>::Print(std::ostream& os) { // NOLINT
template<typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::CopyValuesTo(FixedArray* elements) {
int pos = 0;
- int capacity = DerivedHashTable::Capacity();
+ int capacity = this->Capacity();
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
- Object* k = Dictionary::KeyAt(i);
- if (Dictionary::IsKey(k)) {
- elements->set(pos++, ValueAt(i), mode);
+ Object* k = this->KeyAt(i);
+ if (this->IsKey(k)) {
+ elements->set(pos++, this->ValueAt(i), mode);
}
}
DCHECK(pos == elements->length());
@@ -14046,32 +12900,52 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() {
}
-MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(
- Handle<JSObject> holder,
- Handle<Object> receiver,
- Handle<Name> name) {
- Isolate* isolate = holder->GetIsolate();
+MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
+ bool* done) {
+ *done = false;
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor(), isolate);
- if (interceptor->getter()->IsUndefined()) return MaybeHandle<Object>();
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ Handle<InterceptorInfo> interceptor = it->GetInterceptor();
+ if (interceptor->getter()->IsUndefined()) {
+ return isolate->factory()->undefined_value();
+ }
- if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
- return MaybeHandle<Object>();
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ v8::Local<v8::Value> result;
+ PropertyCallbackArguments args(isolate, interceptor->data(),
+ *it->GetReceiver(), *holder);
+
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-get", *holder, index));
+ result = args.Call(getter, index);
+ } else {
+ Handle<Name> name = it->name();
+
+ if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+ return isolate->factory()->undefined_value();
+ }
+
+ v8::GenericNamedPropertyGetterCallback getter =
+ v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+ interceptor->getter());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-get", *holder, *name));
+ result = args.Call(getter, v8::Utils::ToLocal(name));
}
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get", *holder, *name));
- PropertyCallbackArguments
- args(isolate, interceptor->data(), *receiver, *holder);
- v8::Handle<v8::Value> result = args.Call(getter, v8::Utils::ToLocal(name));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (result.IsEmpty()) return MaybeHandle<Object>();
-
+ if (result.IsEmpty()) return isolate->factory()->undefined_value();
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
+ *done = true;
// Rebox handle before return
return handle(*result_internal, isolate);
}
@@ -14084,7 +12958,7 @@ MaybeHandle<JSObject> JSObject::GetKeysForNamedInterceptor(
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
PropertyCallbackArguments
args(isolate, interceptor->data(), *receiver, *object);
- v8::Handle<v8::Object> result;
+ v8::Local<v8::Object> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::GenericNamedPropertyEnumeratorCallback enum_fun =
v8::ToCData<v8::GenericNamedPropertyEnumeratorCallback>(
@@ -14107,7 +12981,7 @@ MaybeHandle<JSObject> JSObject::GetKeysForIndexedInterceptor(
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
PropertyCallbackArguments
args(isolate, interceptor->data(), *receiver, *object);
- v8::Handle<v8::Object> result;
+ v8::Local<v8::Object> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::IndexedPropertyEnumeratorCallback enum_fun =
v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
@@ -14124,8 +12998,9 @@ MaybeHandle<JSObject> JSObject::GetKeysForIndexedInterceptor(
Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
- Handle<Name> key) {
- LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<Name> name) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ name->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
if (!maybe_result.IsJust()) return Nothing<bool>();
return Just(it.IsFound());
@@ -14135,34 +13010,18 @@ Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
uint32_t index) {
Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- // Check access rights if needed.
- if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayAccess(object)) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- return Just(false);
- }
- }
-
- if (object->IsJSGlobalProxy()) {
- HandleScope scope(isolate);
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return Just(false);
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return HasRealElementProperty(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index);
- }
-
- Maybe<PropertyAttributes> result =
- GetElementAttributeWithoutInterceptor(object, object, index, false);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+ LookupIterator it(isolate, object, index,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
+ if (!maybe_result.IsJust()) return Nothing<bool>();
+ return Just(it.IsFound());
}
Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
- Handle<Name> key) {
- LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<Name> name) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ name->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
return maybe_result.IsJust() ? Just(it.state() == LookupIterator::ACCESSOR)
: Nothing<bool>();
@@ -14178,8 +13037,11 @@ int JSObject::NumberOfOwnProperties(PropertyAttributes filter) {
if (result != kInvalidEnumCacheSentinel) return result;
}
return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
+ } else if (IsGlobalObject()) {
+ return global_dictionary()->NumberOfElementsFilterAttributes(filter);
+ } else {
+ return property_dictionary()->NumberOfElementsFilterAttributes(filter);
}
- return property_dictionary()->NumberOfElementsFilterAttributes(this, filter);
}
@@ -14311,8 +13173,11 @@ void JSObject::GetOwnPropertyNames(
storage->set(index++, descs->GetKey(i));
}
}
+ } else if (IsGlobalObject()) {
+ global_dictionary()->CopyKeysTo(storage, index, filter,
+ GlobalDictionary::UNSORTED);
} else {
- property_dictionary()->CopyKeysTo(this, storage, index, filter,
+ property_dictionary()->CopyKeysTo(storage, index, filter,
NameDictionary::UNSORTED);
}
}
@@ -14396,16 +13261,14 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
case DICTIONARY_ELEMENTS: {
if (storage != NULL) {
- element_dictionary()->CopyKeysTo<DictionaryEntryType::kObjects>(
- storage, filter, SeededNumberDictionary::SORTED);
+ element_dictionary()->CopyKeysTo(storage, filter,
+ SeededNumberDictionary::SORTED);
}
- counter +=
- element_dictionary()
- ->NumberOfElementsFilterAttributes<DictionaryEntryType::kObjects>(
- filter);
+ counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
break;
}
- case SLOPPY_ARGUMENTS_ELEMENTS: {
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
int mapped_length = parameter_map->length() - 2;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@@ -14415,11 +13278,10 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
SeededNumberDictionary* dictionary =
SeededNumberDictionary::cast(arguments);
if (storage != NULL) {
- dictionary->CopyKeysTo<DictionaryEntryType::kObjects>(
- storage, filter, SeededNumberDictionary::UNSORTED);
+ dictionary->CopyKeysTo(storage, filter,
+ SeededNumberDictionary::UNSORTED);
}
- counter += dictionary->NumberOfElementsFilterAttributes<
- DictionaryEntryType::kObjects>(filter);
+ counter += dictionary->NumberOfElementsFilterAttributes(filter);
for (int i = 0; i < mapped_length; ++i) {
if (!parameter_map->get(i + 2)->IsTheHole()) {
if (storage != NULL) storage->set(counter, Smi::FromInt(i));
@@ -14737,9 +13599,10 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(
// Find entry for key otherwise return kNotFound.
-int NameDictionary::FindEntry(Handle<Name> key) {
+template <typename Derived, typename Shape>
+int NameDictionaryBase<Derived, Shape>::FindEntry(Handle<Name> key) {
if (!key->IsUniqueName()) {
- return DerivedHashTable::FindEntry(key);
+ return DerivedDictionary::FindEntry(key);
}
// Optimized for unique names. Knowledge of the key type allows:
@@ -14752,13 +13615,13 @@ int NameDictionary::FindEntry(Handle<Name> key) {
// boost a certain style of code).
// EnsureCapacity will guarantee the hash table is never full.
- uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(key->Hash(), capacity);
+ uint32_t capacity = this->Capacity();
+ uint32_t entry = Derived::FirstProbe(key->Hash(), capacity);
uint32_t count = 1;
while (true) {
- int index = EntryToIndex(entry);
- Object* element = get(index);
+ int index = Derived::EntryToIndex(entry);
+ Object* element = this->get(index);
if (element->IsUndefined()) break; // Empty entry.
if (*key == element) return entry;
if (!element->IsUniqueName() &&
@@ -14766,13 +13629,13 @@ int NameDictionary::FindEntry(Handle<Name> key) {
Name::cast(element)->Equals(*key)) {
// Replace a key that is a non-internalized string by the equivalent
// internalized string for faster further lookups.
- set(index, *key);
+ this->set(index, *key);
return entry;
}
DCHECK(element->IsTheHole() || !Name::cast(element)->Equals(*key));
- entry = NextProbe(entry, count++, capacity);
+ entry = Derived::NextProbe(entry, count++, capacity);
}
- return kNotFound;
+ return Derived::kNotFound;
}
@@ -14793,12 +13656,12 @@ void HashTable<Derived, Shape, Key>::Rehash(
}
// Rehash the elements.
- int capacity = Capacity();
+ int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
uint32_t from_index = EntryToIndex(i);
- Object* k = get(from_index);
+ Object* k = this->get(from_index);
if (IsKey(k)) {
- uint32_t hash = HashTable::HashForObject(key, k);
+ uint32_t hash = this->HashForObject(key, k);
uint32_t insertion_index =
EntryToIndex(new_table->FindInsertionEntry(hash));
for (int j = 0; j < Shape::kEntrySize; j++) {
@@ -14817,8 +13680,8 @@ uint32_t HashTable<Derived, Shape, Key>::EntryForProbe(
Object* k,
int probe,
uint32_t expected) {
- uint32_t hash = HashTable::HashForObject(key, k);
- uint32_t capacity = Capacity();
+ uint32_t hash = this->HashForObject(key, k);
+ uint32_t capacity = this->Capacity();
uint32_t entry = FirstProbe(hash, capacity);
for (int i = 1; i < probe; i++) {
if (entry == expected) return expected;
@@ -14977,6 +13840,9 @@ template class HashTable<WeakHashTable, WeakHashTableShape<2>, Handle<Object> >;
template class Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >;
+template class Dictionary<GlobalDictionary, GlobalDictionaryShape,
+ Handle<Name> >;
+
template class Dictionary<SeededNumberDictionary,
SeededNumberDictionaryShape,
uint32_t>;
@@ -14997,6 +13863,10 @@ template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
New(Isolate*, int n, PretenureFlag pretenure);
+template Handle<GlobalDictionary>
+Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name> >::New(
+ Isolate*, int n, PretenureFlag pretenure);
+
template Handle<SeededNumberDictionary>
Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
AtPut(Handle<SeededNumberDictionary>, uint32_t, Handle<Object>);
@@ -15037,6 +13907,11 @@ template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::Add(
Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails);
+template Handle<GlobalDictionary>
+ Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name> >::Add(
+ Handle<GlobalDictionary>, Handle<Name>, Handle<Object>,
+ PropertyDetails);
+
template Handle<FixedArray> Dictionary<
NameDictionary, NameDictionaryShape,
Handle<Name> >::BuildIterationIndicesArray(Handle<NameDictionary>);
@@ -15071,17 +13946,15 @@ template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>);
-template bool
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t>::HasComplexElements<DictionaryEntryType::kCells>();
-
-template bool
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t>::HasComplexElements<DictionaryEntryType::kObjects>();
+template bool Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
+ uint32_t>::HasComplexElements();
template int HashTable<SeededNumberDictionary, SeededNumberDictionaryShape,
uint32_t>::FindEntry(uint32_t);
+template int NameDictionaryBase<NameDictionary, NameDictionaryShape>::FindEntry(
+ Handle<Name>);
+
Handle<Object> JSObject::PrepareSlowElementsForSort(
Handle<JSObject> object, uint32_t limit) {
@@ -15337,221 +14210,169 @@ size_t JSTypedArray::element_size() {
}
-Handle<Object> ExternalUint8ClampedArray::SetValue(
- Handle<JSObject> holder, Handle<ExternalUint8ClampedArray> array,
- uint32_t index, Handle<Object> value) {
+void FixedArray::SetValue(uint32_t index, Object* value) { set(index, value); }
+
+
+void FixedDoubleArray::SetValue(uint32_t index, Object* value) {
+ set(index, value->Number());
+}
+
+
+void ExternalUint8ClampedArray::SetValue(uint32_t index, Object* value) {
uint8_t clamped_value = 0;
- Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(holder);
- if (!view->WasNeutered()) {
- if (index < static_cast<uint32_t>(array->length())) {
- if (value->IsSmi()) {
- int int_value = Handle<Smi>::cast(value)->value();
- if (int_value < 0) {
- clamped_value = 0;
- } else if (int_value > 255) {
- clamped_value = 255;
- } else {
- clamped_value = static_cast<uint8_t>(int_value);
- }
- } else if (value->IsHeapNumber()) {
- double double_value = Handle<HeapNumber>::cast(value)->value();
- if (!(double_value > 0)) {
- // NaN and less than zero clamp to zero.
- clamped_value = 0;
- } else if (double_value > 255) {
- // Greater than 255 clamp to 255.
- clamped_value = 255;
- } else {
- // Other doubles are rounded to the nearest integer.
- clamped_value = static_cast<uint8_t>(lrint(double_value));
- }
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- array->set(index, clamped_value);
+ if (value->IsSmi()) {
+ int int_value = Smi::cast(value)->value();
+ if (int_value < 0) {
+ clamped_value = 0;
+ } else if (int_value > 255) {
+ clamped_value = 255;
+ } else {
+ clamped_value = static_cast<uint8_t>(int_value);
+ }
+ } else if (value->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value)->value();
+ if (!(double_value > 0)) {
+ // NaN and less than zero clamp to zero.
+ clamped_value = 0;
+ } else if (double_value > 255) {
+ // Greater than 255 clamp to 255.
+ clamped_value = 255;
+ } else {
+ // Other doubles are rounded to the nearest integer.
+ clamped_value = static_cast<uint8_t>(lrint(double_value));
}
+ } else {
+ // Clamp undefined to zero (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
}
- return handle(Smi::FromInt(clamped_value), array->GetIsolate());
+ set(index, clamped_value);
}
template <typename ExternalArrayClass, typename ValueType>
-static Handle<Object> ExternalArrayIntSetter(
- Isolate* isolate, Handle<JSObject> holder,
- Handle<ExternalArrayClass> receiver, uint32_t index, Handle<Object> value) {
+static void ExternalArrayIntSetter(ExternalArrayClass* receiver, uint32_t index,
+ Object* value) {
ValueType cast_value = 0;
- Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(holder);
- if (!view->WasNeutered()) {
- if (index < static_cast<uint32_t>(receiver->length())) {
- if (value->IsSmi()) {
- int int_value = Handle<Smi>::cast(value)->value();
- cast_value = static_cast<ValueType>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = Handle<HeapNumber>::cast(value)->value();
- cast_value = static_cast<ValueType>(DoubleToInt32(double_value));
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- receiver->set(index, cast_value);
- }
+ if (value->IsSmi()) {
+ int int_value = Smi::cast(value)->value();
+ cast_value = static_cast<ValueType>(int_value);
+ } else if (value->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value)->value();
+ cast_value = static_cast<ValueType>(DoubleToInt32(double_value));
+ } else {
+ // Clamp undefined to zero (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
}
- return isolate->factory()->NewNumberFromInt(cast_value);
+ receiver->set(index, cast_value);
}
-Handle<Object> ExternalInt8Array::SetValue(Handle<JSObject> holder,
- Handle<ExternalInt8Array> array,
- uint32_t index,
- Handle<Object> value) {
- return ExternalArrayIntSetter<ExternalInt8Array, int8_t>(
- array->GetIsolate(), holder, array, index, value);
+void ExternalInt8Array::SetValue(uint32_t index, Object* value) {
+ ExternalArrayIntSetter<ExternalInt8Array, int8_t>(this, index, value);
}
-Handle<Object> ExternalUint8Array::SetValue(Handle<JSObject> holder,
- Handle<ExternalUint8Array> array,
- uint32_t index,
- Handle<Object> value) {
- return ExternalArrayIntSetter<ExternalUint8Array, uint8_t>(
- array->GetIsolate(), holder, array, index, value);
+void ExternalUint8Array::SetValue(uint32_t index, Object* value) {
+ ExternalArrayIntSetter<ExternalUint8Array, uint8_t>(this, index, value);
}
-Handle<Object> ExternalInt16Array::SetValue(Handle<JSObject> holder,
- Handle<ExternalInt16Array> array,
- uint32_t index,
- Handle<Object> value) {
- return ExternalArrayIntSetter<ExternalInt16Array, int16_t>(
- array->GetIsolate(), holder, array, index, value);
+void ExternalInt16Array::SetValue(uint32_t index, Object* value) {
+ ExternalArrayIntSetter<ExternalInt16Array, int16_t>(this, index, value);
}
-Handle<Object> ExternalUint16Array::SetValue(Handle<JSObject> holder,
- Handle<ExternalUint16Array> array,
- uint32_t index,
- Handle<Object> value) {
- return ExternalArrayIntSetter<ExternalUint16Array, uint16_t>(
- array->GetIsolate(), holder, array, index, value);
+void ExternalUint16Array::SetValue(uint32_t index, Object* value) {
+ ExternalArrayIntSetter<ExternalUint16Array, uint16_t>(this, index, value);
}
-Handle<Object> ExternalInt32Array::SetValue(Handle<JSObject> holder,
- Handle<ExternalInt32Array> array,
- uint32_t index,
- Handle<Object> value) {
- return ExternalArrayIntSetter<ExternalInt32Array, int32_t>(
- array->GetIsolate(), holder, array, index, value);
+void ExternalInt32Array::SetValue(uint32_t index, Object* value) {
+ ExternalArrayIntSetter<ExternalInt32Array, int32_t>(this, index, value);
}
-Handle<Object> ExternalUint32Array::SetValue(Handle<JSObject> holder,
- Handle<ExternalUint32Array> array,
- uint32_t index,
- Handle<Object> value) {
+void ExternalUint32Array::SetValue(uint32_t index, Object* value) {
uint32_t cast_value = 0;
- Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(holder);
- if (!view->WasNeutered()) {
- if (index < static_cast<uint32_t>(array->length())) {
- if (value->IsSmi()) {
- int int_value = Handle<Smi>::cast(value)->value();
- cast_value = static_cast<uint32_t>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = Handle<HeapNumber>::cast(value)->value();
- cast_value = static_cast<uint32_t>(DoubleToUint32(double_value));
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- array->set(index, cast_value);
- }
+ if (value->IsSmi()) {
+ int int_value = Smi::cast(value)->value();
+ cast_value = static_cast<uint32_t>(int_value);
+ } else if (value->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value)->value();
+ cast_value = static_cast<uint32_t>(DoubleToUint32(double_value));
+ } else {
+ // Clamp undefined to zero (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
}
- return array->GetIsolate()->factory()->NewNumberFromUint(cast_value);
+ set(index, cast_value);
}
-Handle<Object> ExternalFloat32Array::SetValue(
- Handle<JSObject> holder, Handle<ExternalFloat32Array> array, uint32_t index,
- Handle<Object> value) {
+void ExternalFloat32Array::SetValue(uint32_t index, Object* value) {
float cast_value = std::numeric_limits<float>::quiet_NaN();
- Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(holder);
- if (!view->WasNeutered()) {
- if (index < static_cast<uint32_t>(array->length())) {
- if (value->IsSmi()) {
- int int_value = Handle<Smi>::cast(value)->value();
- cast_value = static_cast<float>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = Handle<HeapNumber>::cast(value)->value();
- cast_value = static_cast<float>(double_value);
- } else {
- // Clamp undefined to NaN (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- array->set(index, cast_value);
- }
+ if (value->IsSmi()) {
+ int int_value = Smi::cast(value)->value();
+ cast_value = static_cast<float>(int_value);
+ } else if (value->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value)->value();
+ cast_value = static_cast<float>(double_value);
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
}
- return array->GetIsolate()->factory()->NewNumber(cast_value);
+ set(index, cast_value);
}
-Handle<Object> ExternalFloat64Array::SetValue(
- Handle<JSObject> holder, Handle<ExternalFloat64Array> array, uint32_t index,
- Handle<Object> value) {
+void ExternalFloat64Array::SetValue(uint32_t index, Object* value) {
double double_value = std::numeric_limits<double>::quiet_NaN();
- Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(holder);
- if (!view->WasNeutered()) {
- if (index < static_cast<uint32_t>(array->length())) {
- if (value->IsNumber()) {
- double_value = value->Number();
- } else {
- // Clamp undefined to NaN (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- array->set(index, double_value);
- }
+ if (value->IsNumber()) {
+ double_value = value->Number();
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
}
- return array->GetIsolate()->factory()->NewNumber(double_value);
+ set(index, double_value);
}
void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
Handle<Name> name) {
DCHECK(!global->HasFastProperties());
- auto dictionary = handle(global->property_dictionary());
+ auto dictionary = handle(global->global_dictionary());
int entry = dictionary->FindEntry(name);
- if (entry == NameDictionary::kNotFound) return;
+ if (entry == GlobalDictionary::kNotFound) return;
PropertyCell::InvalidateEntry(dictionary, entry);
}
-// TODO(dcarney): rename to EnsureEmptyPropertyCell or something.
+// TODO(ishell): rename to EnsureEmptyPropertyCell or something.
Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
Handle<GlobalObject> global, Handle<Name> name) {
DCHECK(!global->HasFastProperties());
- auto dictionary = handle(global->property_dictionary());
+ auto dictionary = handle(global->global_dictionary());
int entry = dictionary->FindEntry(name);
Handle<PropertyCell> cell;
- if (entry != NameDictionary::kNotFound) {
+ if (entry != GlobalDictionary::kNotFound) {
// This call should be idempotent.
- DCHECK(dictionary->DetailsAt(entry).cell_type() ==
- PropertyCellType::kUninitialized ||
- dictionary->DetailsAt(entry).cell_type() ==
- PropertyCellType::kInvalidated);
DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
cell = handle(PropertyCell::cast(dictionary->ValueAt(entry)));
+ DCHECK(cell->property_details().cell_type() ==
+ PropertyCellType::kUninitialized ||
+ cell->property_details().cell_type() ==
+ PropertyCellType::kInvalidated);
DCHECK(cell->value()->IsTheHole());
return cell;
}
Isolate* isolate = global->GetIsolate();
cell = isolate->factory()->NewPropertyCell();
PropertyDetails details(NONE, DATA, 0, PropertyCellType::kUninitialized);
- dictionary = NameDictionary::Add(dictionary, name, cell, details);
+ dictionary = GlobalDictionary::Add(dictionary, name, cell, details);
global->set_properties(*dictionary);
return cell;
}
@@ -16162,31 +14983,16 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
}
-template <DictionaryEntryType type, typename D>
-static inline bool IsDeleted(D d, int i) {
- switch (type) {
- case DictionaryEntryType::kObjects:
- return false;
- case DictionaryEntryType::kCells:
- DCHECK(d->ValueAt(i)->IsPropertyCell());
- return PropertyCell::cast(d->ValueAt(i))->value()->IsTheHole();
- }
- UNREACHABLE();
- return false;
-}
-
-
template <typename Derived, typename Shape, typename Key>
-template <DictionaryEntryType type>
int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
PropertyAttributes filter) {
- int capacity = DerivedHashTable::Capacity();
+ int capacity = this->Capacity();
int result = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = DerivedHashTable::KeyAt(i);
- if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) {
- if (IsDeleted<type>(this, i)) continue;
- PropertyDetails details = DetailsAt(i);
+ Object* k = this->KeyAt(i);
+ if (this->IsKey(k) && !FilterKey(k, filter)) {
+ if (this->IsDeleted(i)) continue;
+ PropertyDetails details = this->DetailsAt(i);
PropertyAttributes attr = details.attributes();
if ((attr & filter) == 0) result++;
}
@@ -16196,14 +15002,13 @@ int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
template <typename Derived, typename Shape, typename Key>
-template <DictionaryEntryType type>
bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
- int capacity = DerivedHashTable::Capacity();
+ int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = DerivedHashTable::KeyAt(i);
- if (DerivedHashTable::IsKey(k) && !FilterKey(k, NONE)) {
- if (IsDeleted<type>(this, i)) continue;
- PropertyDetails details = DetailsAt(i);
+ Object* k = this->KeyAt(i);
+ if (this->IsKey(k) && !FilterKey(k, NONE)) {
+ if (this->IsDeleted(i)) continue;
+ PropertyDetails details = this->DetailsAt(i);
if (details.type() == ACCESSOR_CONSTANT) return true;
PropertyAttributes attr = details.attributes();
if (attr & (READ_ONLY | DONT_DELETE | DONT_ENUM)) return true;
@@ -16214,21 +15019,20 @@ bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
template <typename Derived, typename Shape, typename Key>
-template <DictionaryEntryType type>
void Dictionary<Derived, Shape, Key>::CopyKeysTo(
FixedArray* storage, PropertyAttributes filter,
typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
- DCHECK(storage->length() >= NumberOfElementsFilterAttributes<type>(filter));
- int capacity = DerivedHashTable::Capacity();
+ DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
+ int capacity = this->Capacity();
int index = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = DerivedHashTable::KeyAt(i);
- if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) {
- if (IsDeleted<type>(this, i)) continue;
- PropertyDetails details = DetailsAt(i);
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) == 0) storage->set(index++, k);
- }
+ Object* k = this->KeyAt(i);
+ if (this->IsKey(k) && !FilterKey(k, filter)) {
+ if (this->IsDeleted(i)) continue;
+ PropertyDetails details = this->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) == 0) storage->set(index++, k);
+ }
}
if (sort_mode == Dictionary::SORTED) {
storage->SortPairs(storage, index);
@@ -16237,55 +15041,55 @@ void Dictionary<Derived, Shape, Key>::CopyKeysTo(
}
+template <typename Dictionary>
struct EnumIndexComparator {
- explicit EnumIndexComparator(NameDictionary* dict) : dict(dict) { }
+ explicit EnumIndexComparator(Dictionary* dict) : dict(dict) {}
bool operator() (Smi* a, Smi* b) {
PropertyDetails da(dict->DetailsAt(a->value()));
PropertyDetails db(dict->DetailsAt(b->value()));
return da.dictionary_index() < db.dictionary_index();
}
- NameDictionary* dict;
+ Dictionary* dict;
};
-template <DictionaryEntryType type>
-void NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
+template <typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(FixedArray* storage) {
int length = storage->length();
- int capacity = Capacity();
+ int capacity = this->Capacity();
int properties = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k) && !k->IsSymbol()) {
- PropertyDetails details = DetailsAt(i);
- if (details.IsDontEnum() || IsDeleted<type>(this, i)) continue;
- storage->set(properties, Smi::FromInt(i));
- properties++;
- if (properties == length) break;
- }
+ Object* k = this->KeyAt(i);
+ if (this->IsKey(k) && !k->IsSymbol()) {
+ PropertyDetails details = this->DetailsAt(i);
+ if (details.IsDontEnum() || this->IsDeleted(i)) continue;
+ storage->set(properties, Smi::FromInt(i));
+ properties++;
+ if (properties == length) break;
+ }
}
CHECK_EQ(length, properties);
- EnumIndexComparator cmp(this);
+ EnumIndexComparator<Derived> cmp(static_cast<Derived*>(this));
Smi** start = reinterpret_cast<Smi**>(storage->GetFirstElementAddress());
std::sort(start, start + length, cmp);
for (int i = 0; i < length; i++) {
int index = Smi::cast(storage->get(i))->value();
- storage->set(i, KeyAt(index));
+ storage->set(i, this->KeyAt(index));
}
}
template <typename Derived, typename Shape, typename Key>
-template <DictionaryEntryType type>
void Dictionary<Derived, Shape, Key>::CopyKeysTo(
FixedArray* storage, int index, PropertyAttributes filter,
typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
- DCHECK(storage->length() >= NumberOfElementsFilterAttributes<type>(filter));
- int capacity = DerivedHashTable::Capacity();
+ DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
+ int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = DerivedHashTable::KeyAt(i);
- if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) {
- if (IsDeleted<type>(this, i)) continue;
- PropertyDetails details = DetailsAt(i);
+ Object* k = this->KeyAt(i);
+ if (this->IsKey(k) && !FilterKey(k, filter)) {
+ if (this->IsDeleted(i)) continue;
+ PropertyDetails details = this->DetailsAt(i);
PropertyAttributes attr = details.attributes();
if ((attr & filter) == 0) storage->set(index++, k);
}
@@ -16300,11 +15104,11 @@ void Dictionary<Derived, Shape, Key>::CopyKeysTo(
// Backwards lookup (slow).
template<typename Derived, typename Shape, typename Key>
Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) {
- int capacity = DerivedHashTable::Capacity();
+ int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = DerivedHashTable::KeyAt(i);
- if (Dictionary::IsKey(k)) {
- Object* e = ValueAt(i);
+ Object* k = this->KeyAt(i);
+ if (this->IsKey(k)) {
+ Object* e = this->ValueAt(i);
// TODO(dcarney): this should be templatized.
if (e->IsPropertyCell()) {
e = PropertyCell::cast(e)->value();
@@ -16317,18 +15121,34 @@ Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) {
}
+Object* ObjectHashTable::Lookup(Isolate* isolate, Handle<Object> key,
+ int32_t hash) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(IsKey(*key));
+
+ int entry = FindEntry(isolate, key, hash);
+ if (entry == kNotFound) return isolate->heap()->the_hole_value();
+ return get(EntryToIndex(entry) + 1);
+}
+
+
Object* ObjectHashTable::Lookup(Handle<Object> key) {
DisallowHeapAllocation no_gc;
DCHECK(IsKey(*key));
+ Isolate* isolate = GetIsolate();
+
// If the object does not have an identity hash, it was never used as a key.
Object* hash = key->GetHash();
if (hash->IsUndefined()) {
- return GetHeap()->the_hole_value();
+ return isolate->heap()->the_hole_value();
}
- int entry = FindEntry(key);
- if (entry == kNotFound) return GetHeap()->the_hole_value();
- return get(EntryToIndex(entry) + 1);
+ return Lookup(isolate, key, Smi::cast(hash)->value());
+}
+
+
+Object* ObjectHashTable::Lookup(Handle<Object> key, int32_t hash) {
+ return Lookup(GetIsolate(), key, hash);
}
@@ -16339,11 +15159,23 @@ Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
DCHECK(!value->IsTheHole());
Isolate* isolate = table->GetIsolate();
-
// Make sure the key object has an identity hash code.
- Handle<Smi> hash = Object::GetOrCreateHash(isolate, key);
+ int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
+
+ return Put(table, key, value, hash);
+}
- int entry = table->FindEntry(key);
+
+Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value,
+ int32_t hash) {
+ DCHECK(table->IsKey(*key));
+ DCHECK(!value->IsTheHole());
+
+ Isolate* isolate = table->GetIsolate();
+
+ int entry = table->FindEntry(isolate, key, hash);
// Key is already in table, just overwrite value.
if (entry != kNotFound) {
@@ -16353,9 +15185,7 @@ Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
// Check whether the hash table should be extended.
table = EnsureCapacity(table, 1, key);
- table->AddEntry(table->FindInsertionEntry(hash->value()),
- *key,
- *value);
+ table->AddEntry(table->FindInsertionEntry(hash), *key, *value);
return table;
}
@@ -16371,7 +15201,17 @@ Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
return table;
}
- int entry = table->FindEntry(key);
+ return Remove(table, key, was_present, Smi::cast(hash)->value());
+}
+
+
+Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ bool* was_present,
+ int32_t hash) {
+ DCHECK(table->IsKey(*key));
+
+ int entry = table->FindEntry(table->GetIsolate(), key, hash);
if (entry == kNotFound) {
*was_present = false;
return table;
@@ -16436,6 +15276,49 @@ void WeakHashTable::AddEntry(int entry, Handle<WeakCell> key_cell,
}
+#ifdef DEBUG
+Object* WeakValueHashTable::LookupWeak(Handle<Object> key) {
+ Object* value = Lookup(key);
+ if (value->IsWeakCell() && !WeakCell::cast(value)->cleared()) {
+ value = WeakCell::cast(value)->value();
+ }
+ return value;
+}
+#endif // DEBUG
+
+
+Handle<WeakValueHashTable> WeakValueHashTable::PutWeak(
+ Handle<WeakValueHashTable> table, Handle<Object> key,
+ Handle<HeapObject> value) {
+ Handle<WeakCell> cell = value->GetIsolate()->factory()->NewWeakCell(value);
+ return Handle<WeakValueHashTable>::cast(
+ Put(Handle<ObjectHashTable>::cast(table), key, cell));
+}
+
+
+Handle<FixedArray> WeakValueHashTable::GetWeakValues(
+ Handle<WeakValueHashTable> table) {
+ Isolate* isolate = table->GetIsolate();
+ uint32_t capacity = table->Capacity();
+ Handle<FixedArray> results = isolate->factory()->NewFixedArray(capacity);
+ int length = 0;
+ for (uint32_t i = 0; i < capacity; i++) {
+ uint32_t key_index = table->EntryToIndex(i);
+ Object* key = table->get(key_index);
+ if (!table->IsKey(key)) continue;
+ uint32_t value_index = table->EntryToValueIndex(i);
+ WeakCell* value_cell = WeakCell::cast(table->get(value_index));
+ if (value_cell->cleared()) {
+ table->RemoveEntry(i);
+ } else {
+ results->set(length++, value_cell->value());
+ }
+ }
+ results->Shrink(length);
+ return results;
+}
+
+
template<class Derived, class Iterator, int entrysize>
Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Allocate(
Isolate* isolate, int capacity, PretenureFlag pretenure) {
@@ -17153,7 +16036,7 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
Handle<PropertyCell> PropertyCell::InvalidateEntry(
- Handle<NameDictionary> dictionary, int entry) {
+ Handle<GlobalDictionary> dictionary, int entry) {
Isolate* isolate = dictionary->GetIsolate();
// Swap with a copy.
DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
@@ -17163,10 +16046,10 @@ Handle<PropertyCell> PropertyCell::InvalidateEntry(
dictionary->ValueAtPut(entry, *new_cell);
bool is_the_hole = cell->value()->IsTheHole();
// Cell is officially mutable henceforth.
- auto details = dictionary->DetailsAt(entry);
+ PropertyDetails details = cell->property_details();
details = details.set_cell_type(is_the_hole ? PropertyCellType::kInvalidated
: PropertyCellType::kMutable);
- dictionary->DetailsAtPut(entry, details);
+ new_cell->set_property_details(details);
// Old cell is ready for invalidation.
if (is_the_hole) {
cell->set_value(isolate->heap()->undefined_value());
@@ -17236,17 +16119,17 @@ PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
}
-void PropertyCell::UpdateCell(Handle<NameDictionary> dictionary, int entry,
+void PropertyCell::UpdateCell(Handle<GlobalDictionary> dictionary, int entry,
Handle<Object> value, PropertyDetails details) {
DCHECK(!value->IsTheHole());
DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
- const PropertyDetails original_details = dictionary->DetailsAt(entry);
+ const PropertyDetails original_details = cell->property_details();
// Data accesses could be cached in ics or optimized code.
bool invalidate =
original_details.kind() == kData && details.kind() == kAccessor;
int index = original_details.dictionary_index();
- auto old_type = original_details.cell_type();
+ PropertyCellType old_type = original_details.cell_type();
// Preserve the enumeration index unless the property was deleted or never
// initialized.
if (cell->value()->IsTheHole()) {
@@ -17258,12 +16141,12 @@ void PropertyCell::UpdateCell(Handle<NameDictionary> dictionary, int entry,
DCHECK(index > 0);
details = details.set_index(index);
- auto new_type = UpdatedType(cell, value, original_details);
+ PropertyCellType new_type = UpdatedType(cell, value, original_details);
if (invalidate) cell = PropertyCell::InvalidateEntry(dictionary, entry);
// Install new property details and cell value.
details = details.set_cell_type(new_type);
- dictionary->DetailsAtPut(entry, details);
+ cell->set_property_details(details);
cell->set_value(*value);
// Deopt when transitioning from a constant type.
@@ -17285,4 +16168,5 @@ void PropertyCell::SetValueWithInvalidation(Handle<PropertyCell> cell,
isolate, DependentCode::kPropertyCellChangedGroup);
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 5ec1d7d27b..7e4fcbafeb 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -121,6 +121,7 @@
// - ExternalTwoByteInternalizedString
// - Symbol
// - HeapNumber
+// - Float32x4
// - Cell
// - PropertyCell
// - Code
@@ -251,8 +252,15 @@ static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
enum IcCheckType { ELEMENT, PROPERTY };
-// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
-enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
+// SKIP_WRITE_BARRIER skips the write barrier.
+// UPDATE_WEAK_WRITE_BARRIER skips the marking part of the write barrier and
+// only performs the generational part.
+// UPDATE_WRITE_BARRIER is doing the full barrier, marking and generational.
+enum WriteBarrierMode {
+ SKIP_WRITE_BARRIER,
+ UPDATE_WEAK_WRITE_BARRIER,
+ UPDATE_WRITE_BARRIER
+};
// Indicates whether a value can be loaded as a constant.
@@ -387,6 +395,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
\
V(HEAP_NUMBER_TYPE) \
V(MUTABLE_HEAP_NUMBER_TYPE) \
+ V(FLOAT32X4_TYPE) \
V(FOREIGN_TYPE) \
V(BYTE_ARRAY_TYPE) \
V(FREE_SPACE_TYPE) \
@@ -438,7 +447,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
\
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
- V(CONSTANT_POOL_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
V(WEAK_CELL_TYPE) \
\
@@ -683,6 +691,7 @@ enum InstanceType {
// objects.
HEAP_NUMBER_TYPE,
MUTABLE_HEAP_NUMBER_TYPE,
+ FLOAT32X4_TYPE, // FIRST_SIMD_TYPE, LAST_SIMD_TYPE
FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
FREE_SPACE_TYPE,
@@ -730,7 +739,6 @@ enum InstanceType {
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
FIXED_ARRAY_TYPE,
- CONSTANT_POOL_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
CELL_TYPE,
WEAK_CELL_TYPE,
@@ -775,6 +783,9 @@ enum InstanceType {
FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
+ // Boundaries for testing for a SIMD type.
+ FIRST_SIMD_TYPE = FLOAT32X4_TYPE,
+ LAST_SIMD_TYPE = FLOAT32X4_TYPE,
// Boundaries for testing for an external array.
FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_INT8_ARRAY_TYPE,
LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE,
@@ -864,16 +875,22 @@ class AccessorPair;
class AllocationSite;
class AllocationSiteCreationContext;
class AllocationSiteUsageContext;
+class Cell;
class ConsString;
-class DictionaryElementsAccessor;
class ElementsAccessor;
class FixedArrayBase;
class FunctionLiteral;
class GlobalObject;
+class JSBuiltinsObject;
class LayoutDescriptor;
class LookupIterator;
+class ObjectHashTable;
class ObjectVisitor;
+class PropertyCell;
+class SafepointEntry;
+class SharedFunctionInfo;
class StringStream;
+class TypeFeedbackInfo;
class TypeFeedbackVector;
class WeakCell;
@@ -941,6 +958,7 @@ template <class C> inline bool Is(Object* obj);
V(FixedFloat32Array) \
V(FixedFloat64Array) \
V(FixedUint8ClampedArray) \
+ V(Float32x4) \
V(ByteArray) \
V(FreeSpace) \
V(JSReceiver) \
@@ -961,7 +979,6 @@ template <class C> inline bool Is(Object* obj);
V(FixedDoubleArray) \
V(WeakFixedArray) \
V(ArrayList) \
- V(ConstantPoolArray) \
V(Context) \
V(ScriptContextTable) \
V(NativeContext) \
@@ -1012,6 +1029,7 @@ template <class C> inline bool Is(Object* obj);
V(WeakCell) \
V(ObjectHashTable) \
V(WeakHashTable) \
+ V(WeakValueHashTable) \
V(OrderedHashTable)
// Object is the abstract superclass for all classes in the
@@ -1051,11 +1069,13 @@ class Object {
INLINE(bool IsSpecFunction()) const;
INLINE(bool IsTemplateInfo()) const;
INLINE(bool IsNameDictionary() const);
+ INLINE(bool IsGlobalDictionary() const);
INLINE(bool IsSeededNumberDictionary() const);
INLINE(bool IsUnseededNumberDictionary() const);
INLINE(bool IsOrderedHashSet() const);
INLINE(bool IsOrderedHashMap() const);
bool IsCallable() const;
+ static bool IsPromise(Handle<Object> object);
// Oddball testing.
INLINE(bool IsUndefined() const);
@@ -1093,6 +1113,12 @@ class Object {
}
}
+ inline ElementsKind OptimalElementsKind() {
+ if (IsSmi()) return FAST_SMI_ELEMENTS;
+ if (IsNumber()) return FAST_DOUBLE_ELEMENTS;
+ return FAST_ELEMENTS;
+ }
+
inline bool FitsRepresentation(Representation representation) {
if (FLAG_track_fields && representation.IsNone()) {
return false;
@@ -1106,6 +1132,11 @@ class Object {
return true;
}
+ // Checks whether two valid primitive encodings of a property name resolve to
+ // the same logical property. E.g., the smi 1, the string "1" and the double
+ // 1 all refer to the same property, so this helper will return true.
+ inline bool KeyEquals(Object* other);
+
Handle<HeapType> OptimalType(Isolate* isolate, Representation representation);
inline static Handle<Object> NewStorageFor(Isolate* isolate,
@@ -1136,11 +1167,12 @@ class Object {
MUST_USE_RESULT static inline MaybeHandle<Smi> ToSmi(Isolate* isolate,
Handle<Object> object);
- MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
+ MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
+ LookupIterator* it, LanguageMode language_mode = SLOPPY);
// Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
- Handle<Object> object, Handle<Name> key, Handle<Object> value,
+ Handle<Object> object, Handle<Name> name, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
@@ -1152,13 +1184,15 @@ class Object {
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
+ LookupIterator* it, LanguageMode language_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
+ LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
- Isolate* isolate, Handle<Object> reciever, Handle<Object> name,
- Handle<Object> value, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyElement(
- Isolate* isolate, Handle<Object> receiver, uint32_t index,
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
Handle<Object> value, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> RedefineNonconfigurableProperty(
Isolate* isolate, Handle<Object> name, Handle<Object> value,
@@ -1169,25 +1203,19 @@ class Object {
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
LanguageMode language_mode, StoreFromKeyed store_mode);
MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
- Handle<Object> object,
- Handle<Name> key);
+ Handle<Object> object, Handle<Name> name,
+ LanguageMode language_mode = SLOPPY);
MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
- Isolate* isolate,
- Handle<Object> object,
- const char* key);
+ Isolate* isolate, Handle<Object> object, const char* key,
+ LanguageMode language_mode = SLOPPY);
MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
- Handle<Object> object,
- Handle<Name> key);
+ Handle<Object> object, Handle<Name> name,
+ LanguageMode language_mode = SLOPPY);
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
- Handle<Object> receiver,
- Handle<Name> name,
- Handle<JSObject> holder,
- Handle<Object> structure);
+ LookupIterator* it, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithAccessor(
- Handle<Object> receiver, Handle<Name> name, Handle<Object> value,
- Handle<JSObject> holder, Handle<Object> structure,
- LanguageMode language_mode);
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
Handle<Object> receiver,
@@ -1198,19 +1226,8 @@ class Object {
Handle<Object> value);
MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
- Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
-
- MUST_USE_RESULT static MaybeHandle<Object> GetElementWithReceiver(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> receiver,
- uint32_t index);
-
- MUST_USE_RESULT static MaybeHandle<Object> SetElementWithReceiver(
- Isolate* isolate, Handle<Object> object, Handle<Object> receiver,
- uint32_t index, Handle<Object> value, LanguageMode language_mode);
+ Isolate* isolate, Handle<Object> object, uint32_t index,
+ LanguageMode language_mode = SLOPPY);
static inline Handle<Object> GetPrototypeSkipHiddenPrototypes(
Isolate* isolate, Handle<Object> receiver);
@@ -1219,6 +1236,11 @@ class Object {
// undefined if not yet created.
Object* GetHash();
+ // Returns undefined for JSObjects, but returns the hash code for simple
+ // objects. This avoids a double lookup in the cases where we know we will
+ // add the hash to the JSObject if it does not already exist.
+ Object* GetSimpleHash();
+
// Returns the permanent hash code associated with this object depending on
// the actual object type. May create and store a hash code if needed and none
// exists.
@@ -1235,8 +1257,13 @@ class Object {
// by ES6 Map and Set.
bool SameValueZero(Object* other);
- // Tries to convert an object to an array index. Returns true and sets
- // the output parameter if it succeeds.
+ // Tries to convert an object to an array length. Returns true and sets the
+ // output parameter if it succeeds.
+ inline bool ToArrayLength(uint32_t* index);
+
+ // Tries to convert an object to an array index. Returns true and sets the
+ // output parameter if it succeeds. Equivalent to ToArrayLength, but does not
+ // allow kMaxUInt32.
inline bool ToArrayIndex(uint32_t* index);
// Returns true if this is a JSValue containing a string and the index is
@@ -1381,6 +1408,13 @@ class MapWord BASE_EMBEDDED {
};
+// The content of an heap object (except for the map pointer). kTaggedValues
+// objects can contain both heap pointers and Smis, kMixedValues can contain
+// heap pointers, Smis, and raw values (e.g. doubles or strings), and kRawValues
+// objects can contain raw values and Smis.
+enum class HeapObjectContents { kTaggedValues, kMixedValues, kRawValues };
+
+
// HeapObject is the superclass for all classes describing heap allocated
// objects.
class HeapObject: public Object {
@@ -1432,9 +1466,8 @@ class HeapObject: public Object {
// Returns the heap object's size in bytes
inline int Size();
- // Returns true if this heap object may contain raw values, i.e., values that
- // look like pointers to heap objects.
- inline bool MayContainRawValues();
+ // Indicates what type of values this heap object may contain.
+ inline HeapObjectContents ContentType();
// Given a heap object's map pointer, returns the heap size in bytes
// Useful when the map pointer field is used for other purposes.
@@ -1480,7 +1513,7 @@ class HeapObject: public Object {
static void VerifyHeapPointer(Object* p);
#endif
- inline bool NeedsToEnsureDoubleAlignment();
+ inline AllocationAlignment RequiredAlignment();
// Layout description.
// First field in a heap object is map.
@@ -1596,6 +1629,28 @@ class HeapNumber: public HeapObject {
};
+// The Float32x4 class describes heap allocated SIMD values holding 4 32-bit
+// IEEE floats.
+class Float32x4 : public HeapObject {
+ public:
+ inline float get_lane(int lane) const;
+ inline void set_lane(int lane, float value);
+
+ DECLARE_CAST(Float32x4)
+
+ // Dispatched behavior.
+ void Float32x4Print(std::ostream& os); // NOLINT
+ DECLARE_VERIFIER(Float32x4)
+
+ // Layout description.
+ static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kSize = kValueOffset + kSimd128Size;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Float32x4);
+};
+
+
enum EnsureElementsMode {
DONT_ALLOW_DOUBLE_ELEMENTS,
ALLOW_COPIED_DOUBLE_ELEMENTS,
@@ -1603,16 +1658,6 @@ enum EnsureElementsMode {
};
-// Indicates whether a property should be set or (re)defined. Setting of a
-// property causes attributes to remain unchanged, writability to be checked
-// and callbacks to be called. Defining of a property causes attributes to
-// be updated and callbacks to be overridden.
-enum SetPropertyMode {
- SET_PROPERTY,
- DEFINE_PROPERTY
-};
-
-
// Indicator for one component of an AccessorPair.
enum AccessorComponent {
ACCESSOR_GETTER,
@@ -1628,7 +1673,7 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static MaybeHandle<Object> SetElement(
Handle<JSReceiver> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, LanguageMode language_mode);
+ LanguageMode language_mode);
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
MUST_USE_RESULT static inline Maybe<bool> HasProperty(
@@ -1641,9 +1686,14 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> object, uint32_t index);
// Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
+ MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyOrElement(
+ Handle<JSReceiver> object, Handle<Name> name,
+ LanguageMode language_mode = SLOPPY);
MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
Handle<JSReceiver> object, Handle<Name> name,
LanguageMode language_mode = SLOPPY);
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
+ LookupIterator* it, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> DeleteElement(
Handle<JSReceiver> object, uint32_t index,
LanguageMode language_mode = SLOPPY);
@@ -1660,15 +1710,22 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name);
- MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
- LookupIterator* it);
- MUST_USE_RESULT static Maybe<PropertyAttributes> GetOwnPropertyAttributes(
- Handle<JSReceiver> object, Handle<Name> name);
+ MUST_USE_RESULT static inline Maybe<PropertyAttributes>
+ GetOwnPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
- MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetElementAttribute(
+ MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetElementAttributes(
Handle<JSReceiver> object, uint32_t index);
MUST_USE_RESULT static inline Maybe<PropertyAttributes>
- GetOwnElementAttribute(Handle<JSReceiver> object, uint32_t index);
+ GetOwnElementAttributes(Handle<JSReceiver> object, uint32_t index);
+
+ MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
+ LookupIterator* it);
+
+
+ static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
+ Handle<Name> name);
+ static Handle<Object> GetDataProperty(LookupIterator* it);
+
// Retrieves a permanent object identity hash code. The undefined value might
// be returned in case no hash was created yet.
@@ -1691,12 +1748,6 @@ class JSReceiver: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
-// Forward declaration for JSObject::GetOrCreateHiddenPropertiesHashTable.
-class ObjectHashTable;
-
-// Forward declaration for JSObject::Copy.
-class AllocationSite;
-
// The JSObject describes real heap allocated JavaScript objects with
// properties.
@@ -1710,7 +1761,10 @@ class JSObject: public JSReceiver {
DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
inline void initialize_properties();
inline bool HasFastProperties();
- inline NameDictionary* property_dictionary(); // Gets slow properties.
+ // Gets slow properties for non-global objects.
+ inline NameDictionary* property_dictionary();
+ // Gets global object properties.
+ inline GlobalDictionary* global_dictionary();
// [elements]: The elements (properties with names that are integers).
//
@@ -1737,7 +1791,7 @@ class JSObject: public JSReceiver {
Handle<Map> map,
Handle<FixedArrayBase> elements);
inline ElementsKind GetElementsKind();
- inline ElementsAccessor* GetElementsAccessor();
+ ElementsAccessor* GetElementsAccessor();
// Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind.
inline bool HasFastSmiElements();
// Returns true if an object has elements of FAST_ELEMENTS ElementsKind.
@@ -1780,8 +1834,8 @@ class JSObject: public JSReceiver {
inline bool HasFixedFloat32Elements();
inline bool HasFixedFloat64Elements();
- bool HasFastArgumentsElements();
- bool HasDictionaryArgumentsElements();
+ inline bool HasFastArgumentsElements();
+ inline bool HasSlowArgumentsElements();
inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
// Requires: HasFastElements().
@@ -1804,21 +1858,42 @@ class JSObject: public JSReceiver {
// SetLocalPropertyIgnoreAttributes converts callbacks to fields. We need to
// grant an exemption to ExecutableAccessor callbacks in some cases.
- enum ExecutableAccessorInfoHandling {
- DEFAULT_HANDLING,
- DONT_FORCE_FIELD
- };
+ enum ExecutableAccessorInfoHandling { DEFAULT_HANDLING, DONT_FORCE_FIELD };
+
+ MUST_USE_RESULT static MaybeHandle<Object> DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
MUST_USE_RESULT static MaybeHandle<Object> SetOwnPropertyIgnoreAttributes(
- Handle<JSObject> object,
- Handle<Name> key,
- Handle<Object> value,
+ Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes,
ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
- static void AddProperty(Handle<JSObject> object, Handle<Name> key,
+ MUST_USE_RESULT static MaybeHandle<Object> SetOwnElementIgnoreAttributes(
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes,
+ ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+
+ // Equivalent to one of the above depending on whether |name| can be converted
+ // to an array index.
+ MUST_USE_RESULT static MaybeHandle<Object>
+ DefinePropertyOrElementIgnoreAttributes(
+ Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes = NONE,
+ ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+
+ // Adds or reconfigures a property to attributes NONE. It will fail when it
+ // cannot.
+ MUST_USE_RESULT static Maybe<bool> CreateDataProperty(LookupIterator* it,
+ Handle<Object> value);
+
+ static void AddProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value, PropertyAttributes attributes);
+ MUST_USE_RESULT static MaybeHandle<Object> AddDataElement(
+ Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes);
+
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
@@ -1833,37 +1908,39 @@ class JSObject: public JSReceiver {
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
- static void SetNormalizedProperty(Handle<JSObject> object,
- Handle<Name> key,
+ static void SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value,
PropertyDetails details);
+ static void SetDictionaryElement(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+ static void SetDictionaryArgumentsElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes);
static void OptimizeAsPrototype(Handle<JSObject> object,
PrototypeOptimizationMode mode);
static void ReoptimizeIfPrototype(Handle<JSObject> object);
static void LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate);
- static bool RegisterPrototypeUserIfNotRegistered(Handle<JSObject> prototype,
- Handle<HeapObject> user,
- Isolate* isolate);
- static bool UnregisterPrototypeUser(Handle<JSObject> prototype,
- Handle<HeapObject> user);
+ static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
static void InvalidatePrototypeChains(Map* map);
+ // Alternative implementation of WeakFixedArray::NullCallback.
+ class PrototypeRegistryCompactionCallback {
+ public:
+ static void Callback(Object* value, int old_index, int new_index);
+ };
+
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
InterceptorInfo* GetIndexedInterceptor();
// Used from JSReceiver.
MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetPropertyAttributesWithInterceptor(Handle<JSObject> holder,
- Handle<Object> receiver,
- Handle<Name> name);
+ GetPropertyAttributesWithInterceptor(LookupIterator* it);
MUST_USE_RESULT static Maybe<PropertyAttributes>
GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it);
- MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetElementAttributeWithReceiver(Handle<JSObject> object,
- Handle<JSReceiver> receiver,
- uint32_t index, bool check_prototype);
// Retrieves an AccessorPair property from the given object. Might return
// undefined if the property doesn't exist or is of a different kind.
@@ -1885,10 +1962,11 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
Handle<AccessorInfo> info);
+ // The result must be checked first for exceptions. If there's no exception,
+ // the output parameter |done| indicates whether the interceptor has a result
+ // or not.
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
- Handle<JSObject> object,
- Handle<Object> receiver,
- Handle<Name> name);
+ LookupIterator* it, bool* done);
// Accessors for hidden properties object.
//
@@ -1917,7 +1995,7 @@ class JSObject: public JSReceiver {
static void SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash);
- static inline void ValidateElements(Handle<JSObject> object);
+ static void ValidateElements(Handle<JSObject> object);
// Makes sure that this object can contain HeapObject as elements.
static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
@@ -1944,75 +2022,16 @@ class JSObject: public JSReceiver {
// an access at key?
bool WouldConvertToSlowElements(uint32_t index);
inline bool WouldConvertToSlowElements(Handle<Object> key);
- // Do we want to keep the elements in fast case when increasing the
- // capacity?
- bool ShouldConvertToSlowElements(int new_capacity);
- // Returns true if the backing storage for the slow-case elements of
- // this object takes up nearly as much space as a fast-case backing
- // storage would. In that case the JSObject should have fast
- // elements.
- bool ShouldConvertToFastElements();
- // Returns true if the elements of JSObject contains only values that can be
- // represented in a FixedDoubleArray and has at least one value that can only
- // be represented as a double and not a Smi.
- bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements);
// Computes the new capacity when expanding the elements of a JSObject.
- static int NewElementsCapacity(int old_capacity) {
+ static uint32_t NewElementsCapacity(uint32_t old_capacity) {
// (old_capacity + 50%) + 16
return old_capacity + (old_capacity >> 1) + 16;
}
// These methods do not perform access checks!
- MUST_USE_RESULT static MaybeHandle<AccessorPair> GetOwnElementAccessorPair(
- Handle<JSObject> object, uint32_t index);
-
- MUST_USE_RESULT static MaybeHandle<Object> SetFastElement(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- LanguageMode language_mode, bool check_prototype);
-
- MUST_USE_RESULT static inline MaybeHandle<Object> SetOwnElement(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- LanguageMode language_mode);
-
- MUST_USE_RESULT static MaybeHandle<Object> SetOwnElement(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, LanguageMode language_mode);
-
- // Empty handle is returned if the element cannot be set to the given value.
- MUST_USE_RESULT static MaybeHandle<Object> SetElement(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, LanguageMode language_mode,
- bool check_prototype = true, SetPropertyMode set_mode = SET_PROPERTY);
-
- // Returns the index'th element.
- // The undefined object if index is out of bounds.
- MUST_USE_RESULT static MaybeHandle<Object> GetElementWithInterceptor(
- Handle<JSObject> object, Handle<Object> receiver, uint32_t index,
- bool check_prototype);
-
- enum SetFastElementsCapacitySmiMode {
- kAllowSmiElements,
- kForceSmiElements,
- kDontAllowSmiElements
- };
-
- static Handle<FixedArray> SetFastElementsCapacity(
- Handle<JSObject> object, int capacity,
- SetFastElementsCapacitySmiMode smi_mode);
- static Handle<FixedArrayBase> SetFastDoubleElementsCapacity(
- Handle<JSObject> object, int capacity);
-
- // Replace the elements' backing store with fast elements of the given
- // capacity. Update the length for JSArrays. Returns the new backing
- // store.
- static Handle<FixedArray> SetFastElementsCapacityAndLength(
- Handle<JSObject> object,
- int capacity,
- int length,
- SetFastElementsCapacitySmiMode smi_mode);
- static Handle<FixedArrayBase> SetFastDoubleElementsCapacityAndLength(
- Handle<JSObject> object, int capacity, int length);
+ static void UpdateAllocationSite(Handle<JSObject> object,
+ ElementsKind to_kind);
// Lookup interceptors are used for handling properties controlled by host
// objects.
@@ -2030,11 +2049,11 @@ class JSObject: public JSReceiver {
// Support functions for v8 api (needed for correct interceptor behavior).
MUST_USE_RESULT static Maybe<bool> HasRealNamedProperty(
- Handle<JSObject> object, Handle<Name> key);
+ Handle<JSObject> object, Handle<Name> name);
MUST_USE_RESULT static Maybe<bool> HasRealElementProperty(
Handle<JSObject> object, uint32_t index);
MUST_USE_RESULT static Maybe<bool> HasRealNamedCallbackProperty(
- Handle<JSObject> object, Handle<Name> key);
+ Handle<JSObject> object, Handle<Name> name);
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
@@ -2141,10 +2160,12 @@ class JSObject: public JSReceiver {
// Check whether this object references another object
bool ReferencesObject(Object* obj);
- // Disalow further properties to be added to the object.
+ // Disalow further properties to be added to the oject.
MUST_USE_RESULT static MaybeHandle<Object> PreventExtensions(
Handle<JSObject> object);
+ bool IsExtensible();
+
// ES5 Object.seal
MUST_USE_RESULT static MaybeHandle<Object> Seal(Handle<JSObject> object);
@@ -2157,7 +2178,6 @@ class JSObject: public JSReceiver {
// Copy object.
enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
- static Handle<JSObject> Copy(Handle<JSObject> object);
MUST_USE_RESULT static MaybeHandle<JSObject> DeepCopy(
Handle<JSObject> object,
AllocationSiteUsageContext* site_context,
@@ -2166,10 +2186,6 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
AllocationSiteCreationContext* site_context);
- static Handle<Object> GetDataProperty(Handle<JSObject> object,
- Handle<Name> key);
- static Handle<Object> GetDataProperty(LookupIterator* it);
-
DECLARE_CAST(JSObject)
// Dispatched behavior.
@@ -2274,8 +2290,14 @@ class JSObject: public JSReceiver {
Handle<JSObject> object, const char* type, Handle<Name> name,
Handle<Object> old_value);
+ // Gets the current elements capacity and the number of used elements.
+ void GetElementsCapacityAndUsage(int* capacity, int* used);
+
+ // Deletes an existing named property in a normalized object.
+ static void DeleteNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name, int entry);
+
private:
- friend class DictionaryElementsAccessor;
friend class JSReceiver;
friend class Object;
@@ -2284,79 +2306,12 @@ class JSObject: public JSReceiver {
Handle<Map> new_map,
int expected_additional_properties);
- static void UpdateAllocationSite(Handle<JSObject> object,
- ElementsKind to_kind);
-
// Used from Object::GetProperty().
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithFailedAccessCheck(
LookupIterator* it);
- MUST_USE_RESULT static MaybeHandle<Object> GetElementWithCallback(
- Handle<JSObject> object,
- Handle<Object> receiver,
- Handle<Object> structure,
- uint32_t index,
- Handle<Object> holder);
-
- MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetElementAttributeWithInterceptor(Handle<JSObject> object,
- Handle<JSReceiver> receiver,
- uint32_t index, bool continue_search);
-
- // Queries indexed interceptor on an object for property attributes.
- //
- // We determine property attributes as follows:
- // - if interceptor has a query callback, then the property attributes are
- // the result of query callback for index.
- // - otherwise if interceptor has a getter callback and it returns
- // non-empty value on index, then the property attributes is NONE
- // (property is present, and it is enumerable, configurable, writable)
- // - otherwise there are no property attributes that can be inferred for
- // interceptor, and this function returns ABSENT.
- MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetElementAttributeFromInterceptor(Handle<JSObject> object,
- Handle<Object> receiver,
- uint32_t index);
-
- MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetElementAttributeWithoutInterceptor(Handle<JSObject> object,
- Handle<JSReceiver> receiver,
- uint32_t index,
- bool continue_search);
- MUST_USE_RESULT static MaybeHandle<Object> SetElementWithCallback(
- Handle<Object> object, Handle<Object> structure, uint32_t index,
- Handle<Object> value, Handle<JSObject> holder,
- LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetElementWithInterceptor(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, LanguageMode language_mode,
- bool check_prototype, SetPropertyMode set_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetElementWithoutInterceptor(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, LanguageMode language_mode,
- bool check_prototype, SetPropertyMode set_mode);
- MUST_USE_RESULT
- static MaybeHandle<Object> SetElementWithCallbackSetterInPrototypes(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- bool* found, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetDictionaryElement(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, LanguageMode language_mode,
- bool check_prototype, SetPropertyMode set_mode = SET_PROPERTY);
- MUST_USE_RESULT static MaybeHandle<Object> SetFastDoubleElement(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- LanguageMode language_mode, bool check_prototype = true);
- MUST_USE_RESULT static MaybeHandle<Object> GetElementWithFailedAccessCheck(
- Isolate* isolate, Handle<JSObject> object, Handle<Object> receiver,
- uint32_t index);
- MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetElementAttributesWithFailedAccessCheck(Isolate* isolate,
- Handle<JSObject> object,
- Handle<Object> receiver,
- uint32_t index);
-
MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode);
+ LookupIterator* it, Handle<Object> value);
// Add a property to a slow-case object.
static void AddSlowProperty(Handle<JSObject> object,
@@ -2364,31 +2319,13 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
- Handle<JSObject> object, Handle<Name> name, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithInterceptor(
- Handle<JSObject> holder, Handle<JSObject> receiver, Handle<Name> name);
-
- // Deletes an existing named property in a normalized object.
- static void DeleteNormalizedProperty(Handle<JSObject> object,
- Handle<Name> name);
-
- MUST_USE_RESULT static MaybeHandle<Object> DeleteElement(
- Handle<JSObject> object, uint32_t index, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteElementWithInterceptor(
- Handle<JSObject> object,
- uint32_t index);
+ LookupIterator* it);
bool ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object);
- // Returns true if most of the elements backing storage is used.
- bool HasDenseElements();
-
- // Gets the current elements capacity and the number of used elements.
- void GetElementsCapacityAndUsage(int* capacity, int* used);
-
static bool CanSetCallback(Handle<JSObject> object, Handle<Name> name);
static void SetElementCallback(Handle<JSObject> object,
uint32_t index,
@@ -2466,6 +2403,7 @@ class FixedArray: public FixedArrayBase {
public:
// Setter and getter for elements.
inline Object* get(int index) const;
+ void SetValue(uint32_t index, Object* value);
static inline Handle<Object> get(Handle<FixedArray> array, int index);
// Setter that uses write barrier.
inline void set(int index, Object* value);
@@ -2587,6 +2525,8 @@ class FixedDoubleArray: public FixedArrayBase {
inline double get_scalar(int index);
inline uint64_t get_representation(int index);
static inline Handle<Object> get(Handle<FixedDoubleArray> array, int index);
+ // This accessor has to get a Number as |value|.
+ void SetValue(uint32_t index, Object* value);
inline void set(int index, double value);
inline void set_the_hole(int index);
@@ -2626,17 +2566,22 @@ class FixedDoubleArray: public FixedArrayBase {
class WeakFixedArray : public FixedArray {
public:
- enum SearchForDuplicates { kAlwaysAdd, kAddIfNotFound };
-
// If |maybe_array| is not a WeakFixedArray, a fresh one will be allocated.
- static Handle<WeakFixedArray> Add(
- Handle<Object> maybe_array, Handle<HeapObject> value,
- SearchForDuplicates search_for_duplicates = kAlwaysAdd,
- bool* was_present = NULL);
+ // This function does not check if the value exists already, callers must
+ // ensure this themselves if necessary.
+ static Handle<WeakFixedArray> Add(Handle<Object> maybe_array,
+ Handle<HeapObject> value,
+ int* assigned_index = NULL);
// Returns true if an entry was found and removed.
bool Remove(Handle<HeapObject> value);
+ class NullCallback {
+ public:
+ static void Callback(Object* value, int old_index, int new_index) {}
+ };
+
+ template <class CompactionCallback>
void Compact();
inline Object* Get(int index) const;
@@ -2698,312 +2643,6 @@ class ArrayList : public FixedArray {
};
-// ConstantPoolArray describes a fixed-sized array containing constant pool
-// entries.
-//
-// A ConstantPoolArray can be structured in two different ways depending upon
-// whether it is extended or small. The is_extended_layout() method can be used
-// to discover which layout the constant pool has.
-//
-// The format of a small constant pool is:
-// [kSmallLayout1Offset] : Small section layout bitmap 1
-// [kSmallLayout2Offset] : Small section layout bitmap 2
-// [first_index(INT64, SMALL_SECTION)] : 64 bit entries
-// ... : ...
-// [first_index(CODE_PTR, SMALL_SECTION)] : code pointer entries
-// ... : ...
-// [first_index(HEAP_PTR, SMALL_SECTION)] : heap pointer entries
-// ... : ...
-// [first_index(INT32, SMALL_SECTION)] : 32 bit entries
-// ... : ...
-//
-// If the constant pool has an extended layout, the extended section constant
-// pool also contains an extended section, which has the following format at
-// location get_extended_section_header_offset():
-// [kExtendedInt64CountOffset] : count of extended 64 bit entries
-// [kExtendedCodePtrCountOffset] : count of extended code pointers
-// [kExtendedHeapPtrCountOffset] : count of extended heap pointers
-// [kExtendedInt32CountOffset] : count of extended 32 bit entries
-// [first_index(INT64, EXTENDED_SECTION)] : 64 bit entries
-// ... : ...
-// [first_index(CODE_PTR, EXTENDED_SECTION)]: code pointer entries
-// ... : ...
-// [first_index(HEAP_PTR, EXTENDED_SECTION)]: heap pointer entries
-// ... : ...
-// [first_index(INT32, EXTENDED_SECTION)] : 32 bit entries
-// ... : ...
-//
-class ConstantPoolArray: public HeapObject {
- public:
- enum WeakObjectState { NO_WEAK_OBJECTS, WEAK_OBJECTS_IN_OPTIMIZED_CODE };
-
- enum Type {
- INT64 = 0,
- CODE_PTR,
- HEAP_PTR,
- INT32,
- // Number of types stored by the ConstantPoolArrays.
- NUMBER_OF_TYPES,
- FIRST_TYPE = INT64,
- LAST_TYPE = INT32
- };
-
- enum LayoutSection {
- SMALL_SECTION = 0,
- EXTENDED_SECTION,
- NUMBER_OF_LAYOUT_SECTIONS
- };
-
- class NumberOfEntries BASE_EMBEDDED {
- public:
- inline NumberOfEntries() {
- for (int i = 0; i < NUMBER_OF_TYPES; i++) {
- element_counts_[i] = 0;
- }
- }
-
- inline NumberOfEntries(int int64_count, int code_ptr_count,
- int heap_ptr_count, int int32_count) {
- element_counts_[INT64] = int64_count;
- element_counts_[CODE_PTR] = code_ptr_count;
- element_counts_[HEAP_PTR] = heap_ptr_count;
- element_counts_[INT32] = int32_count;
- }
-
- inline NumberOfEntries(ConstantPoolArray* array, LayoutSection section) {
- element_counts_[INT64] = array->number_of_entries(INT64, section);
- element_counts_[CODE_PTR] = array->number_of_entries(CODE_PTR, section);
- element_counts_[HEAP_PTR] = array->number_of_entries(HEAP_PTR, section);
- element_counts_[INT32] = array->number_of_entries(INT32, section);
- }
-
- inline void increment(Type type);
- inline int equals(const NumberOfEntries& other) const;
- inline bool is_empty() const;
- inline int count_of(Type type) const;
- inline int base_of(Type type) const;
- inline int total_count() const;
- inline int are_in_range(int min, int max) const;
-
- private:
- int element_counts_[NUMBER_OF_TYPES];
- };
-
- class Iterator BASE_EMBEDDED {
- public:
- inline Iterator(ConstantPoolArray* array, Type type)
- : array_(array),
- type_(type),
- final_section_(array->final_section()),
- current_section_(SMALL_SECTION),
- next_index_(array->first_index(type, SMALL_SECTION)) {
- update_section();
- }
-
- inline Iterator(ConstantPoolArray* array, Type type, LayoutSection section)
- : array_(array),
- type_(type),
- final_section_(section),
- current_section_(section),
- next_index_(array->first_index(type, section)) {
- update_section();
- }
-
- inline int next_index();
- inline bool is_finished();
-
- private:
- inline void update_section();
- ConstantPoolArray* array_;
- const Type type_;
- const LayoutSection final_section_;
-
- LayoutSection current_section_;
- int next_index_;
- };
-
- // Getters for the first index, the last index and the count of entries of
- // a given type for a given layout section.
- inline int first_index(Type type, LayoutSection layout_section);
- inline int last_index(Type type, LayoutSection layout_section);
- inline int number_of_entries(Type type, LayoutSection layout_section);
-
- // Returns the type of the entry at the given index.
- inline Type get_type(int index);
- inline bool offset_is_type(int offset, Type type);
-
- // Setter and getter for pool elements.
- inline Address get_code_ptr_entry(int index);
- inline Object* get_heap_ptr_entry(int index);
- inline int64_t get_int64_entry(int index);
- inline int32_t get_int32_entry(int index);
- inline double get_int64_entry_as_double(int index);
-
- inline void set(int index, Address value);
- inline void set(int index, Object* value);
- inline void set(int index, int64_t value);
- inline void set(int index, double value);
- inline void set(int index, int32_t value);
-
- // Setters which take a raw offset rather than an index (for code generation).
- inline void set_at_offset(int offset, int32_t value);
- inline void set_at_offset(int offset, int64_t value);
- inline void set_at_offset(int offset, double value);
- inline void set_at_offset(int offset, Address value);
- inline void set_at_offset(int offset, Object* value);
-
- // Setter and getter for weak objects state
- inline void set_weak_object_state(WeakObjectState state);
- inline WeakObjectState get_weak_object_state();
-
- // Returns true if the constant pool has an extended layout, false if it has
- // only the small layout.
- inline bool is_extended_layout();
-
- // Returns the last LayoutSection in this constant pool array.
- inline LayoutSection final_section();
-
- // Set up initial state for a small layout constant pool array.
- inline void Init(const NumberOfEntries& small);
-
- // Set up initial state for an extended layout constant pool array.
- inline void InitExtended(const NumberOfEntries& small,
- const NumberOfEntries& extended);
-
- // Clears the pointer entries with GC safe values.
- void ClearPtrEntries(Isolate* isolate);
-
- // returns the total number of entries in the constant pool array.
- inline int length();
-
- // Garbage collection support.
- inline int size();
-
-
- inline static int MaxInt64Offset(int number_of_int64) {
- return kFirstEntryOffset + (number_of_int64 * kInt64Size);
- }
-
- inline static int SizeFor(const NumberOfEntries& small) {
- int size = kFirstEntryOffset +
- (small.count_of(INT64) * kInt64Size) +
- (small.count_of(CODE_PTR) * kPointerSize) +
- (small.count_of(HEAP_PTR) * kPointerSize) +
- (small.count_of(INT32) * kInt32Size);
- return RoundUp(size, kPointerSize);
- }
-
- inline static int SizeForExtended(const NumberOfEntries& small,
- const NumberOfEntries& extended) {
- int size = SizeFor(small);
- size = RoundUp(size, kInt64Size); // Align extended header to 64 bits.
- size += kExtendedFirstOffset +
- (extended.count_of(INT64) * kInt64Size) +
- (extended.count_of(CODE_PTR) * kPointerSize) +
- (extended.count_of(HEAP_PTR) * kPointerSize) +
- (extended.count_of(INT32) * kInt32Size);
- return RoundUp(size, kPointerSize);
- }
-
- inline static int entry_size(Type type) {
- switch (type) {
- case INT32:
- return kInt32Size;
- case INT64:
- return kInt64Size;
- case CODE_PTR:
- case HEAP_PTR:
- return kPointerSize;
- default:
- UNREACHABLE();
- return 0;
- }
- }
-
- // Code Generation support.
- inline int OffsetOfElementAt(int index) {
- int offset;
- LayoutSection section;
- if (is_extended_layout() && index >= first_extended_section_index()) {
- section = EXTENDED_SECTION;
- offset = get_extended_section_header_offset() + kExtendedFirstOffset;
- } else {
- section = SMALL_SECTION;
- offset = kFirstEntryOffset;
- }
-
- // Add offsets for the preceding type sections.
- DCHECK(index <= last_index(LAST_TYPE, section));
- for (Type type = FIRST_TYPE; index > last_index(type, section);
- type = next_type(type)) {
- offset += entry_size(type) * number_of_entries(type, section);
- }
-
- // Add offset for the index in it's type.
- Type type = get_type(index);
- offset += entry_size(type) * (index - first_index(type, section));
- return offset;
- }
-
- DECLARE_CAST(ConstantPoolArray)
-
- // Garbage collection support.
- Object** RawFieldOfElementAt(int index) {
- return HeapObject::RawField(this, OffsetOfElementAt(index));
- }
-
- // Small Layout description.
- static const int kSmallLayout1Offset = HeapObject::kHeaderSize;
- static const int kSmallLayout2Offset = kSmallLayout1Offset + kInt32Size;
- static const int kHeaderSize = kSmallLayout2Offset + kInt32Size;
- static const int kFirstEntryOffset = ROUND_UP(kHeaderSize, kInt64Size);
-
- static const int kSmallLayoutCountBits = 10;
- static const int kMaxSmallEntriesPerType = (1 << kSmallLayoutCountBits) - 1;
-
- // Fields in kSmallLayout1Offset.
- class Int64CountField: public BitField<int, 1, kSmallLayoutCountBits> {};
- class CodePtrCountField: public BitField<int, 11, kSmallLayoutCountBits> {};
- class HeapPtrCountField: public BitField<int, 21, kSmallLayoutCountBits> {};
- class IsExtendedField: public BitField<bool, 31, 1> {};
-
- // Fields in kSmallLayout2Offset.
- class Int32CountField: public BitField<int, 1, kSmallLayoutCountBits> {};
- class TotalCountField: public BitField<int, 11, 12> {};
- class WeakObjectStateField: public BitField<WeakObjectState, 23, 2> {};
-
- // Extended layout description, which starts at
- // get_extended_section_header_offset().
- static const int kExtendedInt64CountOffset = 0;
- static const int kExtendedCodePtrCountOffset =
- kExtendedInt64CountOffset + kInt32Size;
- static const int kExtendedHeapPtrCountOffset =
- kExtendedCodePtrCountOffset + kInt32Size;
- static const int kExtendedInt32CountOffset =
- kExtendedHeapPtrCountOffset + kInt32Size;
- static const int kExtendedFirstOffset =
- kExtendedInt32CountOffset + kInt32Size;
-
- // Dispatched behavior.
- void ConstantPoolIterateBody(ObjectVisitor* v);
-
- DECLARE_PRINTER(ConstantPoolArray)
- DECLARE_VERIFIER(ConstantPoolArray)
-
- private:
- inline int first_extended_section_index();
- inline int get_extended_section_header_offset();
-
- inline static Type next_type(Type type) {
- DCHECK(type >= FIRST_TYPE && type < NUMBER_OF_TYPES);
- int type_int = static_cast<int>(type);
- return static_cast<Type>(++type_int);
- }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolArray);
-};
-
-
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
// [0]: Number of descriptors
@@ -3437,6 +3076,7 @@ class HashTable : public HashTableBase {
// Find entry for key otherwise return kNotFound.
inline int FindEntry(Key key);
+ inline int FindEntry(Isolate* isolate, Key key, int32_t hash);
int FindEntry(Isolate* isolate, Key key);
// Rehashes the table in-place.
@@ -3452,6 +3092,11 @@ class HashTable : public HashTableBase {
static const int kCapacityOffset =
kHeaderSize + kCapacityIndex * kPointerSize;
+ // Returns the index for an entry (of the key)
+ static inline int EntryToIndex(int entry) {
+ return (entry * kEntrySize) + kElementsStartIndex;
+ }
+
protected:
friend class ObjectHashTable;
@@ -3469,11 +3114,6 @@ class HashTable : public HashTableBase {
Key key,
PretenureFlag pretenure = NOT_TENURED);
- // Returns the index for an entry (of the key)
- static inline int EntryToIndex(int entry) {
- return (entry * kEntrySize) + kElementsStartIndex;
- }
-
// Sets the capacity of the hash table.
void SetCapacity(int capacity) {
// To scale a computed hash code to fit within the hash table, we
@@ -3583,35 +3223,34 @@ class StringTable: public HashTable<StringTable,
};
-enum class DictionaryEntryType { kObjects, kCells };
-
-
template <typename Derived, typename Shape, typename Key>
class Dictionary: public HashTable<Derived, Shape, Key> {
- protected:
typedef HashTable<Derived, Shape, Key> DerivedHashTable;
public:
// Returns the value at entry.
Object* ValueAt(int entry) {
- return this->get(DerivedHashTable::EntryToIndex(entry) + 1);
+ return this->get(Derived::EntryToIndex(entry) + 1);
}
// Set the value for entry.
void ValueAtPut(int entry, Object* value) {
- this->set(DerivedHashTable::EntryToIndex(entry) + 1, value);
+ this->set(Derived::EntryToIndex(entry) + 1, value);
}
// Returns the property details for the property at entry.
PropertyDetails DetailsAt(int entry) {
- DCHECK(entry >= 0); // Not found is -1, which is not caught by get().
- return PropertyDetails(
- Smi::cast(this->get(DerivedHashTable::EntryToIndex(entry) + 2)));
+ return Shape::DetailsAt(static_cast<Derived*>(this), entry);
}
// Set the details for entry.
void DetailsAtPut(int entry, PropertyDetails value) {
- this->set(DerivedHashTable::EntryToIndex(entry) + 2, value.AsSmi());
+ Shape::DetailsAtPut(static_cast<Derived*>(this), entry, value);
+ }
+
+ // Returns true if property at given entry is deleted.
+ bool IsDeleted(int entry) {
+ return Shape::IsDeleted(static_cast<Derived*>(this), entry);
}
// Delete a property from the dictionary.
@@ -3630,76 +3269,30 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
// Returns the number of elements in the dictionary filtering out properties
// with the specified attributes.
- template <DictionaryEntryType type>
int NumberOfElementsFilterAttributes(PropertyAttributes filter);
- int NumberOfElementsFilterAttributes(Object* holder,
- PropertyAttributes filter) {
- if (holder->IsGlobalObject()) {
- return NumberOfElementsFilterAttributes<DictionaryEntryType::kCells>(
- filter);
- } else {
- return NumberOfElementsFilterAttributes<DictionaryEntryType::kObjects>(
- filter);
- }
- }
// Returns the number of enumerable elements in the dictionary.
- template <DictionaryEntryType type>
int NumberOfEnumElements() {
- return NumberOfElementsFilterAttributes<type>(
+ return NumberOfElementsFilterAttributes(
static_cast<PropertyAttributes>(DONT_ENUM | SYMBOLIC));
}
- int NumberOfEnumElements(Object* holder) {
- if (holder->IsGlobalObject()) {
- return NumberOfEnumElements<DictionaryEntryType::kCells>();
- } else {
- return NumberOfEnumElements<DictionaryEntryType::kObjects>();
- }
- }
// Returns true if the dictionary contains any elements that are non-writable,
// non-configurable, non-enumerable, or have getters/setters.
- template <DictionaryEntryType type>
bool HasComplexElements();
- bool HasComplexElements(Object* holder) {
- if (holder->IsGlobalObject()) {
- return HasComplexElements<DictionaryEntryType::kCells>();
- } else {
- return HasComplexElements<DictionaryEntryType::kObjects>();
- }
- }
enum SortMode { UNSORTED, SORTED };
// Copies keys to preallocated fixed array.
- template <DictionaryEntryType type>
void CopyKeysTo(FixedArray* storage, PropertyAttributes filter,
SortMode sort_mode);
- void CopyKeysTo(Object* holder, FixedArray* storage,
- PropertyAttributes filter, SortMode sort_mode) {
- if (holder->IsGlobalObject()) {
- return CopyKeysTo<DictionaryEntryType::kCells>(storage, filter,
- sort_mode);
- } else {
- return CopyKeysTo<DictionaryEntryType::kObjects>(storage, filter,
- sort_mode);
- }
- }
// Fill in details for properties into storage.
- template <DictionaryEntryType type>
void CopyKeysTo(FixedArray* storage, int index, PropertyAttributes filter,
SortMode sort_mode);
- void CopyKeysTo(Object* holder, FixedArray* storage, int index,
- PropertyAttributes filter, SortMode sort_mode) {
- if (holder->IsGlobalObject()) {
- return CopyKeysTo<DictionaryEntryType::kCells>(storage, index, filter,
- sort_mode);
- } else {
- return CopyKeysTo<DictionaryEntryType::kObjects>(storage, index, filter,
- sort_mode);
- }
- }
+
+ // Copies enumerable keys to preallocated fixed array.
+ void CopyEnumKeysTo(FixedArray* storage);
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
@@ -3770,7 +3363,47 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
};
-class NameDictionaryShape : public BaseShape<Handle<Name> > {
+template <typename Derived, typename Shape>
+class NameDictionaryBase : public Dictionary<Derived, Shape, Handle<Name> > {
+ typedef Dictionary<Derived, Shape, Handle<Name> > DerivedDictionary;
+
+ public:
+ // Find entry for key, otherwise return kNotFound. Optimized version of
+ // HashTable::FindEntry.
+ int FindEntry(Handle<Name> key);
+};
+
+
+template <typename Key>
+class BaseDictionaryShape : public BaseShape<Key> {
+ public:
+ template <typename Dictionary>
+ static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
+ STATIC_ASSERT(Dictionary::kEntrySize == 3);
+ DCHECK(entry >= 0); // Not found is -1, which is not caught by get().
+ return PropertyDetails(
+ Smi::cast(dict->get(Dictionary::EntryToIndex(entry) + 2)));
+ }
+
+ template <typename Dictionary>
+ static inline void DetailsAtPut(Dictionary* dict, int entry,
+ PropertyDetails value) {
+ STATIC_ASSERT(Dictionary::kEntrySize == 3);
+ dict->set(Dictionary::EntryToIndex(entry) + 2, value.AsSmi());
+ }
+
+ template <typename Dictionary>
+ static bool IsDeleted(Dictionary* dict, int entry) {
+ return false;
+ }
+
+ template <typename Dictionary>
+ static inline void SetEntry(Dictionary* dict, int entry, Handle<Object> key,
+ Handle<Object> value, PropertyDetails details);
+};
+
+
+class NameDictionaryShape : public BaseDictionaryShape<Handle<Name> > {
public:
static inline bool IsMatch(Handle<Name> key, Object* other);
static inline uint32_t Hash(Handle<Name> key);
@@ -3782,36 +3415,47 @@ class NameDictionaryShape : public BaseShape<Handle<Name> > {
};
-class NameDictionary: public Dictionary<NameDictionary,
- NameDictionaryShape,
- Handle<Name> > {
- typedef Dictionary<
- NameDictionary, NameDictionaryShape, Handle<Name> > DerivedDictionary;
+class NameDictionary
+ : public NameDictionaryBase<NameDictionary, NameDictionaryShape> {
+ typedef NameDictionaryBase<NameDictionary, NameDictionaryShape>
+ DerivedDictionary;
public:
DECLARE_CAST(NameDictionary)
- // Copies enumerable keys to preallocated fixed array.
- template <DictionaryEntryType type>
- void CopyEnumKeysTo(FixedArray* storage);
- void CopyEnumKeysTo(Object* holder, FixedArray* storage) {
- if (holder->IsGlobalObject()) {
- return CopyEnumKeysTo<DictionaryEntryType::kCells>(storage);
- } else {
- return CopyEnumKeysTo<DictionaryEntryType::kObjects>(storage);
- }
- }
-
inline static Handle<FixedArray> DoGenerateNewEnumerationIndices(
Handle<NameDictionary> dictionary);
+};
- // Find entry for key, otherwise return kNotFound. Optimized version of
- // HashTable::FindEntry.
- int FindEntry(Handle<Name> key);
+
+class GlobalDictionaryShape : public NameDictionaryShape {
+ public:
+ static const int kEntrySize = 2; // Overrides NameDictionaryShape::kEntrySize
+
+ template <typename Dictionary>
+ static inline PropertyDetails DetailsAt(Dictionary* dict, int entry);
+
+ template <typename Dictionary>
+ static inline void DetailsAtPut(Dictionary* dict, int entry,
+ PropertyDetails value);
+
+ template <typename Dictionary>
+ static bool IsDeleted(Dictionary* dict, int entry);
+
+ template <typename Dictionary>
+ static inline void SetEntry(Dictionary* dict, int entry, Handle<Object> key,
+ Handle<Object> value, PropertyDetails details);
+};
+
+
+class GlobalDictionary
+ : public NameDictionaryBase<GlobalDictionary, GlobalDictionaryShape> {
+ public:
+ DECLARE_CAST(GlobalDictionary)
};
-class NumberDictionaryShape : public BaseShape<uint32_t> {
+class NumberDictionaryShape : public BaseDictionaryShape<uint32_t> {
public:
static inline bool IsMatch(uint32_t key, Object* other);
static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
@@ -3944,18 +3588,26 @@ class ObjectHashTable: public HashTable<ObjectHashTable,
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
Object* Lookup(Handle<Object> key);
+ Object* Lookup(Handle<Object> key, int32_t hash);
+ Object* Lookup(Isolate* isolate, Handle<Object> key, int32_t hash);
// Adds (or overwrites) the value associated with the given key.
static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<Object> value);
+ static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table,
+ Handle<Object> key, Handle<Object> value,
+ int32_t hash);
// Returns an ObjectHashTable (possibly |table|) where |key| has been removed.
static Handle<ObjectHashTable> Remove(Handle<ObjectHashTable> table,
Handle<Object> key,
bool* was_present);
+ static Handle<ObjectHashTable> Remove(Handle<ObjectHashTable> table,
+ Handle<Object> key, bool* was_present,
+ int32_t hash);
- private:
+ protected:
friend class MarkCompactCollector;
void AddEntry(int entry, Object* key, Object* value);
@@ -4182,6 +3834,8 @@ class WeakHashTable: public HashTable<WeakHashTable,
Handle<HeapObject> key,
Handle<HeapObject> value);
+ static Handle<FixedArray> GetValues(Handle<WeakHashTable> table);
+
private:
friend class MarkCompactCollector;
@@ -4194,6 +3848,26 @@ class WeakHashTable: public HashTable<WeakHashTable,
};
+class WeakValueHashTable : public ObjectHashTable {
+ public:
+ DECLARE_CAST(WeakValueHashTable)
+
+#ifdef DEBUG
+ // Looks up the value associated with the given key. The hole value is
+ // returned in case the key is not present.
+ Object* LookupWeak(Handle<Object> key);
+#endif // DEBUG
+
+ // Adds (or overwrites) the value associated with the given key. Mapping a
+ // key to the hole value causes removal of the whole entry.
+ MUST_USE_RESULT static Handle<WeakValueHashTable> PutWeak(
+ Handle<WeakValueHashTable> table, Handle<Object> key,
+ Handle<HeapObject> value);
+
+ static Handle<FixedArray> GetWeakValues(Handle<WeakValueHashTable> table);
+};
+
+
// JSFunctionResultCache caches results of some JSFunction invocation.
// It is a fixed array with fixed structure:
// [0]: factory function
@@ -4201,7 +3875,7 @@ class WeakHashTable: public HashTable<WeakHashTable,
// [2]: current cache size
// [3]: dummy field.
// The rest of array are key/value pairs.
-class JSFunctionResultCache: public FixedArray {
+class JSFunctionResultCache : public FixedArray {
public:
static const int kFactoryIndex = 0;
static const int kFingerIndex = kFactoryIndex + 1;
@@ -4271,6 +3945,13 @@ class ScopeInfo : public FixedArray {
// no contexts are allocated for this scope ContextLength returns 0.
int ContextLength();
+ // Does this scope declare a "this" binding?
+ bool HasReceiver();
+
+ // Does this scope declare a "this" binding, and the "this" binding is stack-
+ // or context-allocated?
+ bool HasAllocatedReceiver();
+
// Is this scope the scope of a named function expression?
bool HasFunctionName();
@@ -4337,7 +4018,8 @@ class ScopeInfo : public FixedArray {
// If the slot is present and mode != NULL, sets *mode to the corresponding
// mode for that variable.
static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
- VariableMode* mode, InitializationFlag* init_flag,
+ VariableMode* mode, VariableLocation* location,
+ InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
// Lookup support for serialized scope info. Returns the
@@ -4351,16 +4033,21 @@ class ScopeInfo : public FixedArray {
// must be an internalized string.
int FunctionContextSlotIndex(String* name, VariableMode* mode);
- bool block_scope_is_class_scope();
+ // Lookup support for serialized scope info. Returns the receiver context
+ // slot index if scope has a "this" binding, and the binding is
+ // context-allocated. Otherwise returns a value < 0.
+ int ReceiverContextSlotIndex();
+
FunctionKind function_kind();
// Copies all the context locals into an object used to materialize a scope.
- static bool CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
+ static void CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
Handle<Context> context,
Handle<JSObject> scope_object);
static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope);
+ static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
// Serializes empty scope info.
static ScopeInfo* Empty(Isolate* isolate);
@@ -4382,6 +4069,7 @@ class ScopeInfo : public FixedArray {
V(ParameterCount) \
V(StackLocalCount) \
V(ContextLocalCount) \
+ V(ContextGlobalCount) \
V(StrongModeFreeVariableCount)
#define FIELD_ACCESSORS(name) \
@@ -4438,7 +4126,10 @@ class ScopeInfo : public FixedArray {
// 7. StrongModeFreeVariablePositionEntries:
// Stores the locations (start and end position) of strong mode free
// variables.
- // 8. FunctionNameEntryIndex:
+ // 8. RecieverEntryIndex:
+ // If the scope binds a "this" value, one slot is reserved to hold the
+ // context or stack slot index for the variable.
+ // 9. FunctionNameEntryIndex:
// If the scope belongs to a named function expression this part contains
// information about the function variable. It always occupies two array
// slots: a. The name of the function variable.
@@ -4447,34 +4138,42 @@ class ScopeInfo : public FixedArray {
int StackLocalFirstSlotIndex();
int StackLocalEntriesIndex();
int ContextLocalNameEntriesIndex();
+ int ContextGlobalNameEntriesIndex();
int ContextLocalInfoEntriesIndex();
+ int ContextGlobalInfoEntriesIndex();
int StrongModeFreeVariableNameEntriesIndex();
int StrongModeFreeVariablePositionEntriesIndex();
+ int ReceiverEntryIndex();
int FunctionNameEntryIndex();
- // Location of the function variable for named function expressions.
- enum FunctionVariableInfo {
- NONE, // No function name present.
- STACK, // Function
- CONTEXT,
- UNUSED
- };
+ int Lookup(Handle<String> name, int start, int end, VariableMode* mode,
+ VariableLocation* location, InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag);
+
+ // Used for the function name variable for named function expressions, and for
+ // the receiver.
+ enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
// Properties of scopes.
class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
- class CallsEvalField : public BitField<bool, 4, 1> {};
+ class CallsEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {};
STATIC_ASSERT(LANGUAGE_END == 3);
- class LanguageModeField : public BitField<LanguageMode, 5, 2> {};
- class FunctionVariableField : public BitField<FunctionVariableInfo, 7, 2> {};
- class FunctionVariableMode : public BitField<VariableMode, 9, 3> {};
- class AsmModuleField : public BitField<bool, 12, 1> {};
- class AsmFunctionField : public BitField<bool, 13, 1> {};
+ class LanguageModeField
+ : public BitField<LanguageMode, CallsEvalField::kNext, 2> {};
+ class ReceiverVariableField
+ : public BitField<VariableAllocationInfo, LanguageModeField::kNext, 2> {};
+ class FunctionVariableField
+ : public BitField<VariableAllocationInfo, ReceiverVariableField::kNext,
+ 2> {};
+ class FunctionVariableMode
+ : public BitField<VariableMode, FunctionVariableField::kNext, 3> {};
+ class AsmModuleField : public BitField<bool, FunctionVariableMode::kNext, 1> {
+ };
+ class AsmFunctionField : public BitField<bool, AsmModuleField::kNext, 1> {};
class IsSimpleParameterListField
: public BitField<bool, AsmFunctionField::kNext, 1> {};
- class BlockScopeIsClassScopeField
- : public BitField<bool, IsSimpleParameterListField::kNext, 1> {};
class FunctionKindField
- : public BitField<FunctionKind, BlockScopeIsClassScopeField::kNext, 8> {};
+ : public BitField<FunctionKind, IsSimpleParameterListField::kNext, 8> {};
// BitFields representing the encoded information for context locals in the
// ContextLocalInfoEntries part.
@@ -4673,9 +4372,7 @@ class ExternalUint8ClampedArray: public ExternalArray {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined and clamps the converted value between 0 and 255.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<ExternalUint8ClampedArray> array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_CAST(ExternalUint8ClampedArray)
@@ -4697,9 +4394,7 @@ class ExternalInt8Array: public ExternalArray {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<ExternalInt8Array> array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_CAST(ExternalInt8Array)
@@ -4721,9 +4416,7 @@ class ExternalUint8Array: public ExternalArray {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<ExternalUint8Array> array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_CAST(ExternalUint8Array)
@@ -4745,9 +4438,7 @@ class ExternalInt16Array: public ExternalArray {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<ExternalInt16Array> array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_CAST(ExternalInt16Array)
@@ -4770,9 +4461,7 @@ class ExternalUint16Array: public ExternalArray {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<ExternalUint16Array> array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_CAST(ExternalUint16Array)
@@ -4794,9 +4483,7 @@ class ExternalInt32Array: public ExternalArray {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<ExternalInt32Array> array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_CAST(ExternalInt32Array)
@@ -4819,9 +4506,7 @@ class ExternalUint32Array: public ExternalArray {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<ExternalUint32Array> array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_CAST(ExternalUint32Array)
@@ -4844,9 +4529,7 @@ class ExternalFloat32Array: public ExternalArray {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<ExternalFloat32Array> array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_CAST(ExternalFloat32Array)
@@ -4869,9 +4552,7 @@ class ExternalFloat64Array: public ExternalArray {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<ExternalFloat64Array> array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_CAST(ExternalFloat64Array)
@@ -4886,8 +4567,21 @@ class ExternalFloat64Array: public ExternalArray {
class FixedTypedArrayBase: public FixedArrayBase {
public:
+ // [base_pointer]: For now, points to the FixedTypedArrayBase itself.
+ DECL_ACCESSORS(base_pointer, Object)
+
+ // Dispatched behavior.
+ inline void FixedTypedArrayBaseIterateBody(ObjectVisitor* v);
+
+ template <typename StaticVisitor>
+ inline void FixedTypedArrayBaseIterateBody();
+
DECLARE_CAST(FixedTypedArrayBase)
+ static const int kBasePointerOffset =
+ FixedArrayBase::kHeaderSize + kPointerSize;
+ static const int kHeaderSize = kBasePointerOffset + kPointerSize;
+
static const int kDataOffset = DOUBLE_POINTER_ALIGN(kHeaderSize);
inline int size();
@@ -4926,9 +4620,7 @@ class FixedTypedArray: public FixedTypedArrayBase {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- static Handle<Object> SetValue(Handle<JSObject> holder,
- Handle<FixedTypedArray<Traits> > array,
- uint32_t index, Handle<Object> value);
+ void SetValue(uint32_t index, Object* value);
DECLARE_PRINTER(FixedTypedArray)
DECLARE_VERIFIER(FixedTypedArray)
@@ -5099,6 +4791,11 @@ class DeoptimizationOutputData: public FixedArray {
// [ return-address-offset , handler-offset ]
class HandlerTable : public FixedArray {
public:
+ // Conservative prediction whether a given handler will locally catch an
+ // exception or cause a re-throw to outside the code boundary. Since this is
+ // undecidable it is merely an approximation (e.g. useful for debugger).
+ enum CatchPrediction { UNCAUGHT, CAUGHT };
+
// Accessors for handler table based on ranges.
void SetRangeStart(int index, int value) {
set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
@@ -5106,7 +4803,9 @@ class HandlerTable : public FixedArray {
void SetRangeEnd(int index, int value) {
set(index * kRangeEntrySize + kRangeEndIndex, Smi::FromInt(value));
}
- void SetRangeHandler(int index, int value) {
+ void SetRangeHandler(int index, int offset, CatchPrediction prediction) {
+ int value = HandlerOffsetField::encode(offset) |
+ HandlerPredictionField::encode(prediction);
set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
}
void SetRangeDepth(int index, int value) {
@@ -5117,15 +4816,17 @@ class HandlerTable : public FixedArray {
void SetReturnOffset(int index, int value) {
set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value));
}
- void SetReturnHandler(int index, int value) {
+ void SetReturnHandler(int index, int offset, CatchPrediction prediction) {
+ int value = HandlerOffsetField::encode(offset) |
+ HandlerPredictionField::encode(prediction);
set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
}
// Lookup handler in a table based on ranges.
- int LookupRange(int pc_offset, int* stack_depth);
+ int LookupRange(int pc_offset, int* stack_depth, CatchPrediction* prediction);
// Lookup handler in a table based on return addresses.
- int LookupReturn(int pc_offset);
+ int LookupReturn(int pc_offset, CatchPrediction* prediction);
// Returns the required length of the underlying fixed array.
static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
@@ -5150,14 +4851,12 @@ class HandlerTable : public FixedArray {
static const int kReturnOffsetIndex = 0;
static const int kReturnHandlerIndex = 1;
static const int kReturnEntrySize = 2;
-};
+ // Encoding of the {handler} field.
+ class HandlerPredictionField : public BitField<CatchPrediction, 0, 1> {};
+ class HandlerOffsetField : public BitField<int, 1, 30> {};
+};
-// Forward declaration.
-class Cell;
-class PropertyCell;
-class SafepointEntry;
-class TypeFeedbackInfo;
// Code describes objects with on-the-fly generated machine code.
class Code: public HeapObject {
@@ -5264,6 +4963,11 @@ class Code: public HeapObject {
inline int prologue_offset() const;
inline void set_prologue_offset(int offset);
+ // [constant_pool offset]: Offset of the constant pool.
+ // Valid for FLAG_enable_embedded_constant_pool only
+ inline int constant_pool_offset() const;
+ inline void set_constant_pool_offset(int offset);
+
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
@@ -5324,10 +5028,6 @@ class Code: public HeapObject {
inline bool can_have_weak_objects();
inline void set_can_have_weak_objects(bool value);
- // [optimizable]: For FUNCTION kind, tells if it is optimizable.
- inline bool optimizable();
- inline void set_optimizable(bool value);
-
// [has_deoptimization_support]: For FUNCTION kind, tells if it has
// deoptimization support.
inline bool has_deoptimization_support();
@@ -5387,7 +5087,7 @@ class Code: public HeapObject {
inline bool back_edges_patched_for_osr();
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
- inline byte to_boolean_state();
+ inline uint16_t to_boolean_state();
// [has_function_cache]: For kind STUB tells whether there is a function
// cache is passed to the stub.
@@ -5401,8 +5101,7 @@ class Code: public HeapObject {
inline void set_marked_for_deoptimization(bool flag);
// [constant_pool]: The constant pool for this function.
- inline ConstantPoolArray* constant_pool();
- inline void set_constant_pool(Object* constant_pool);
+ inline Address constant_pool();
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -5603,6 +5302,9 @@ class Code: public HeapObject {
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
+ static const int kConstantPoolSize =
+ FLAG_enable_embedded_constant_pool ? kIntSize : 0;
+
// Layout description.
static const int kRelocationInfoOffset = HeapObject::kHeaderSize;
static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
@@ -5622,20 +5324,16 @@ class Code: public HeapObject {
// Note: We might be able to squeeze this into the flags above.
static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
static const int kConstantPoolOffset = kPrologueOffset + kIntSize;
-
- static const int kHeaderPaddingStart = kConstantPoolOffset + kPointerSize;
+ static const int kHeaderPaddingStart =
+ kConstantPoolOffset + kConstantPoolSize;
// Add padding to align the instruction start following right after
// the Code object header.
static const int kHeaderSize =
(kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
- // Ensure that the slot for the constant pool pointer is aligned.
- STATIC_ASSERT((kConstantPoolOffset & kPointerAlignmentMask) == 0);
// Byte offsets within kKindSpecificFlags1Offset.
- static const int kOptimizableOffset = kKindSpecificFlags1Offset;
-
- static const int kFullCodeFlags = kOptimizableOffset + 1;
+ static const int kFullCodeFlags = kKindSpecificFlags1Offset;
class FullCodeFlagsHasDeoptimizationSupportField:
public BitField<bool, 0, 1> {}; // NOLINT
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
@@ -5903,7 +5601,8 @@ class Map: public HeapObject {
class Deprecated : public BitField<bool, 23, 1> {};
class IsUnstable : public BitField<bool, 24, 1> {};
class IsMigrationTarget : public BitField<bool, 25, 1> {};
- // Bits 26 and 27 are free.
+ class IsStrong : public BitField<bool, 26, 1> {};
+ // Bit 27 is free.
// Keep this bit field at the very end for better code in
// Builtins::kJSConstructStubGeneric stub.
@@ -5981,6 +5680,8 @@ class Map: public HeapObject {
return ((1 << kIsObserved) & bit_field()) != 0;
}
+ inline void set_is_strong();
+ inline bool is_strong();
inline void set_is_extensible(bool value);
inline bool is_extensible();
inline void set_is_prototype_map(bool value);
@@ -6020,7 +5721,7 @@ class Map: public HeapObject {
}
inline bool has_sloppy_arguments_elements() {
- return elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS;
+ return IsSloppyArgumentsElements(elements_kind());
}
inline bool has_external_array_elements() {
@@ -6035,11 +5736,6 @@ class Map: public HeapObject {
return IsDictionaryElementsKind(elements_kind());
}
- inline bool has_slow_elements_kind() {
- return elements_kind() == DICTIONARY_ELEMENTS
- || elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS;
- }
-
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
@@ -6062,6 +5758,8 @@ class Map: public HeapObject {
// the given prototype's map).
static Handle<PrototypeInfo> GetOrCreatePrototypeInfo(
Handle<JSObject> prototype, Isolate* isolate);
+ static Handle<PrototypeInfo> GetOrCreatePrototypeInfo(
+ Handle<Map> prototype_map, Isolate* isolate);
// [prototype chain validity cell]: Associated with a prototype object,
// stored in that object's map's PrototypeInfo, indicates that prototype
@@ -6105,6 +5803,8 @@ class Map: public HeapObject {
static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
int descriptor_number,
Handle<Object> value);
+ static Handle<Map> PrepareForDataElement(Handle<Map> old_map,
+ Handle<Object> value);
static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
const char* reason);
@@ -6271,6 +5971,10 @@ class Map: public HeapObject {
PropertyAttributes attrs_to_add,
Handle<Symbol> transition_marker,
const char* reason);
+
+ static Handle<Map> FixProxy(Handle<Map> map, InstanceType type, int size);
+
+
// Maximal number of fast properties. Used to restrict the number of map
// transitions to avoid an explosion in the number of maps for objects used as
// dictionaries.
@@ -6332,7 +6036,8 @@ class Map: public HeapObject {
static void AppendCallbackDescriptors(Handle<Map> map,
Handle<Object> descriptors);
- static inline int SlackForArraySize(int old_size, int size_limit);
+ static inline int SlackForArraySize(bool is_prototype_map, int old_size,
+ int size_limit);
static void EnsureDescriptorSlack(Handle<Map> map, int slack);
@@ -6358,7 +6063,8 @@ class Map: public HeapObject {
// Returns the transitioned map for this map with the most generic
// elements_kind that's found in |candidates|, or null handle if no match is
// found at all.
- Handle<Map> FindTransitionedMap(MapHandleList* candidates);
+ static Handle<Map> FindTransitionedMap(Handle<Map> map,
+ MapHandleList* candidates);
bool CanTransition() {
// Only JSObject and subtypes have map transitions and back pointers.
@@ -6526,7 +6232,6 @@ class Map: public HeapObject {
Handle<LayoutDescriptor> layout_descriptor);
private:
- static void ConnectElementsTransition(Handle<Map> parent, Handle<Map> child);
static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
Handle<Name> name, SimpleTransitionFlag flag);
@@ -6564,9 +6269,6 @@ class Map: public HeapObject {
// the descriptor array.
inline void NotifyLeafMapLayoutChange();
- static Handle<Map> TransitionElementsToSlow(Handle<Map> object,
- ElementsKind to_kind);
-
void DeprecateTransitionTree();
bool DeprecateTarget(PropertyKind kind, Name* key,
PropertyAttributes attributes,
@@ -6635,9 +6337,15 @@ class Box : public Struct {
// Container for metadata stored on each prototype map.
class PrototypeInfo : public Struct {
public:
+ static const int UNREGISTERED = -1;
+
// [prototype_users]: WeakFixedArray containing maps using this prototype,
// or Smi(0) if uninitialized.
DECL_ACCESSORS(prototype_users, Object)
+ // [registry_slot]: Slot in prototype's user registry where this user
+ // is stored. Returns UNREGISTERED if this prototype has not been registered.
+ inline int registry_slot() const;
+ inline void set_registry_slot(int slot);
// [validity_cell]: Cell containing the validity bit for prototype chains
// going through this object, or Smi(0) if uninitialized.
DECL_ACCESSORS(validity_cell, Object)
@@ -6651,7 +6359,8 @@ class PrototypeInfo : public Struct {
DECLARE_VERIFIER(PrototypeInfo)
static const int kPrototypeUsersOffset = HeapObject::kHeaderSize;
- static const int kValidityCellOffset = kPrototypeUsersOffset + kPointerSize;
+ static const int kRegistrySlotOffset = kPrototypeUsersOffset + kPointerSize;
+ static const int kValidityCellOffset = kRegistrySlotOffset + kPointerSize;
static const int kConstructorNameOffset = kValidityCellOffset + kPointerSize;
static const int kSize = kConstructorNameOffset + kPointerSize;
@@ -6718,6 +6427,10 @@ class Script: public Struct {
// function from which eval was called where eval was called.
DECL_ACCESSORS(eval_from_instructions_offset, Smi)
+ // [shared_function_infos]: weak fixed array containing all shared
+ // function infos created from this script.
+ DECL_ACCESSORS(shared_function_infos, Object)
+
// [flags]: Holds an exciting bitfield.
DECL_ACCESSORS(flags, Smi)
@@ -6737,17 +6450,11 @@ class Script: public Struct {
inline CompilationState compilation_state();
inline void set_compilation_state(CompilationState state);
- // [is_embedder_debug_script]: An opaque boolean set by the embedder via
- // ScriptOrigin, and used by the embedder to make decisions about the
- // script's origin. V8 just passes this through. Encoded in
- // the 'flags' field.
- DECL_BOOLEAN_ACCESSORS(is_embedder_debug_script)
-
- // [is_shared_cross_origin]: An opaque boolean set by the embedder via
- // ScriptOrigin, and used by the embedder to make decisions about the
- // script's level of privilege. V8 just passes this through. Encoded in
- // the 'flags' field.
- DECL_BOOLEAN_ACCESSORS(is_shared_cross_origin)
+ // [origin_options]: optional attributes set by the embedder via ScriptOrigin,
+ // and used by the embedder to make decisions about the script. V8 just passes
+ // this through. Encoded in the 'flags' field.
+ inline v8::ScriptOriginOptions origin_options();
+ inline void set_origin_options(ScriptOriginOptions origin_options);
DECLARE_CAST(Script)
@@ -6771,6 +6478,10 @@ class Script: public Struct {
// Get the JS object wrapping the given script; create it if none exists.
static Handle<JSObject> GetWrapper(Handle<Script> script);
+ // Look through the list of existing shared function infos to find one
+ // that matches the function literal. Return empty handle if not found.
+ MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(FunctionLiteral* fun);
+
// Dispatched behavior.
DECLARE_PRINTER(Script)
DECLARE_VERIFIER(Script)
@@ -6787,8 +6498,9 @@ class Script: public Struct {
static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
static const int kEvalFrominstructionsOffsetOffset =
kEvalFromSharedOffset + kPointerSize;
- static const int kFlagsOffset =
+ static const int kSharedFunctionInfosOffset =
kEvalFrominstructionsOffsetOffset + kPointerSize;
+ static const int kFlagsOffset = kSharedFunctionInfosOffset + kPointerSize;
static const int kSourceUrlOffset = kFlagsOffset + kPointerSize;
static const int kSourceMappingUrlOffset = kSourceUrlOffset + kPointerSize;
static const int kSize = kSourceMappingUrlOffset + kPointerSize;
@@ -6799,8 +6511,10 @@ class Script: public Struct {
// Bit positions in the flags field.
static const int kCompilationTypeBit = 0;
static const int kCompilationStateBit = 1;
- static const int kIsEmbedderDebugScriptBit = 2;
- static const int kIsSharedCrossOriginBit = 3;
+ static const int kOriginOptionsShift = 2;
+ static const int kOriginOptionsSize = 3;
+ static const int kOriginOptionsMask = ((1 << kOriginOptionsSize) - 1)
+ << kOriginOptionsShift;
DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
};
@@ -6848,11 +6562,16 @@ class Script: public Struct {
V(Math, clz32, MathClz32) \
V(Math, fround, MathFround)
+#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
+ V(Atomics, load, AtomicsLoad) \
+ V(Atomics, store, AtomicsStore)
+
enum BuiltinFunctionId {
kArrayCode,
#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
k##name,
FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
+ ATOMIC_FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
@@ -6860,6 +6579,14 @@ enum BuiltinFunctionId {
};
+// Result of searching in an optimized code map of a SharedFunctionInfo. Note
+// that both {code} and {literals} can be NULL to pass search result status.
+struct CodeAndLiterals {
+ Code* code; // Cached optimized code.
+ FixedArray* literals; // Cached literals array.
+};
+
+
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
class SharedFunctionInfo: public HeapObject {
@@ -6875,16 +6602,11 @@ class SharedFunctionInfo: public HeapObject {
// and a shared literals array or Smi(0) if none.
DECL_ACCESSORS(optimized_code_map, Object)
- // Returns index i of the entry with the specified context and OSR entry.
- // At position i - 1 is the context, position i the code, and i + 1 the
- // literals array. Returns -1 when no matching entry is found.
- int SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id);
-
- // Installs optimized code from the code map on the given closure. The
- // index has to be consistent with a search result as defined above.
- FixedArray* GetLiteralsFromOptimizedCodeMap(int index);
-
- Code* GetCodeFromOptimizedCodeMap(int index);
+ // Returns entry from optimized code map for specified context and OSR entry.
+ // Note that {code == nullptr} indicates no matching entry has been found,
+ // whereas {literals == nullptr} indicates the code is context-independent.
+ CodeAndLiterals SearchOptimizedCodeMap(Context* native_context,
+ BailoutId osr_ast_id);
// Clear optimized code map.
void ClearOptimizedCodeMap();
@@ -6892,29 +6614,29 @@ class SharedFunctionInfo: public HeapObject {
// Removed a specific optimized code object from the optimized code map.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
- // Unconditionally clear the type feedback vector (including vector ICs).
- void ClearTypeFeedbackInfo();
-
- // Clear the type feedback vector with a more subtle policy at GC time.
- void ClearTypeFeedbackInfoAtGCTime();
-
// Trims the optimized code map after entries have been removed.
void TrimOptimizedCodeMap(int shrink_by);
- // Initialize a SharedFunctionInfo from a parsed function literal.
- static void InitFromFunctionLiteral(Handle<SharedFunctionInfo> shared_info,
- FunctionLiteral* lit);
+ // Add a new entry to the optimized code map for context-independent code.
+ static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
+ Handle<Code> code);
- // Add a new entry to the optimized code map.
+ // Add a new entry to the optimized code map for context-dependent code.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
Handle<FixedArray> literals,
BailoutId osr_ast_id);
+ // Set up the link between shared function info and the script. The shared
+ // function info is added to the list on the script.
+ static void SetScript(Handle<SharedFunctionInfo> shared,
+ Handle<Object> script_object);
+
// Layout description of the optimized code map.
static const int kNextMapIndex = 0;
- static const int kEntriesStart = 1;
+ static const int kSharedCodeIndex = 1;
+ static const int kEntriesStart = 2;
static const int kContextOffset = 0;
static const int kCachedCodeOffset = 1;
static const int kLiteralsOffset = 2;
@@ -6955,6 +6677,12 @@ class SharedFunctionInfo: public HeapObject {
// available.
DECL_ACCESSORS(feedback_vector, TypeFeedbackVector)
+ // Unconditionally clear the type feedback vector (including vector ICs).
+ void ClearTypeFeedbackInfo();
+
+ // Clear the type feedback vector with a more subtle policy at GC time.
+ void ClearTypeFeedbackInfoAtGCTime();
+
#if TRACE_MAPS
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
// even if the GC moves this SharedFunctionInfo.
@@ -7064,9 +6792,10 @@ class SharedFunctionInfo: public HeapObject {
// False if the function definitely does not allocate an arguments object.
DECL_BOOLEAN_ACCESSORS(uses_arguments)
- // Indicates that this function uses a super property.
+ // Indicates that this function uses a super property (or an eval that may
+ // use a super property).
// This is needed to set up the [[HomeObject]] on the function instance.
- DECL_BOOLEAN_ACCESSORS(uses_super_property)
+ DECL_BOOLEAN_ACCESSORS(needs_home_object)
// True if the function has any duplicated parameter names.
DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
@@ -7077,8 +6806,8 @@ class SharedFunctionInfo: public HeapObject {
// global object.
DECL_BOOLEAN_ACCESSORS(native)
- // Indicate that this builtin needs to be inlined in crankshaft.
- DECL_BOOLEAN_ACCESSORS(inline_builtin)
+ // Indicate that this function should always be inlined in optimized code.
+ DECL_BOOLEAN_ACCESSORS(force_inline)
// Indicates that the function was created by the Function function.
// Though it's anonymous, toString should treat it as if it had the name
@@ -7097,8 +6826,8 @@ class SharedFunctionInfo: public HeapObject {
// Is this a function or top-level/eval code.
DECL_BOOLEAN_ACCESSORS(is_function)
- // Indicates that code for this function cannot be cached.
- DECL_BOOLEAN_ACCESSORS(dont_cache)
+ // Indicates that code for this function cannot be compiled with Crankshaft.
+ DECL_BOOLEAN_ACCESSORS(dont_crankshaft)
// Indicates that code for this function cannot be flushed.
DECL_BOOLEAN_ACCESSORS(dont_flush)
@@ -7124,6 +6853,9 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that the the shared function info is deserialized from cache.
DECL_BOOLEAN_ACCESSORS(deserialized)
+ // Indicates that the the shared function info has never been compiled before.
+ DECL_BOOLEAN_ACCESSORS(never_compiled)
+
inline FunctionKind kind();
inline void set_kind(FunctionKind kind);
@@ -7193,6 +6925,10 @@ class SharedFunctionInfo: public HeapObject {
inline bool is_simple_parameter_list();
+ // Initialize a SharedFunctionInfo from a parsed function literal.
+ static void InitFromFunctionLiteral(Handle<SharedFunctionInfo> shared_info,
+ FunctionLiteral* lit);
+
// Dispatched behavior.
DECLARE_PRINTER(SharedFunctionInfo)
DECLARE_VERIFIER(SharedFunctionInfo)
@@ -7353,15 +7089,15 @@ class SharedFunctionInfo: public HeapObject {
kStrictModeFunction,
kStrongModeFunction,
kUsesArguments,
- kUsesSuperProperty,
+ kNeedsHomeObject,
kHasDuplicateParameters,
kNative,
- kInlineBuiltin,
+ kForceInline,
kBoundFunction,
kIsAnonymous,
kNameShouldPrintAsAnonymous,
kIsFunction,
- kDontCache,
+ kDontCrankshaft,
kDontFlush,
kIsArrow,
kIsGenerator,
@@ -7373,6 +7109,7 @@ class SharedFunctionInfo: public HeapObject {
kInClassLiteral,
kIsAsmFunction,
kDeserialized,
+ kNeverCompiled,
kCompilerHintsCount // Pseudo entry
};
// Add hints for other modes when they're added.
@@ -7407,6 +7144,8 @@ class SharedFunctionInfo: public HeapObject {
// Allows to use byte-width instructions.
static const int kStrictModeBitWithinByte =
(kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+ static const int kStrongModeBitWithinByte =
+ (kStrongModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
static const int kNativeBitWithinByte =
(kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
@@ -7414,12 +7153,18 @@ class SharedFunctionInfo: public HeapObject {
#if defined(V8_TARGET_LITTLE_ENDIAN)
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+ static const int kStrongModeByteOffset =
+ kCompilerHintsOffset +
+ (kStrongModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
static const int kNativeByteOffset = kCompilerHintsOffset +
(kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
#elif defined(V8_TARGET_BIG_ENDIAN)
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+ static const int kStrongModeByteOffset =
+ kCompilerHintsOffset + (kCompilerHintsSize - 1) -
+ ((kStrongModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
static const int kNativeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
@@ -7564,15 +7309,15 @@ class JSFunction: public JSObject {
// Tells whether this function is defined in an extension script.
inline bool IsFromExtensionScript();
+ // Tells whether this function should be subject to debugging.
+ inline bool IsSubjectToDebugging();
+
// Tells whether or not the function needs arguments adaption.
inline bool NeedsArgumentsAdaption();
// Tells whether or not this function has been optimized.
inline bool IsOptimized();
- // Tells whether or not this function can be optimized.
- inline bool IsOptimizable();
-
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
void MarkForOptimization();
@@ -7785,9 +7530,6 @@ class JSGlobalProxy : public JSObject {
};
-// Forward declaration.
-class JSBuiltinsObject;
-
// Common super class for JavaScript global objects and the special
// builtins global objects.
class GlobalObject: public JSObject {
@@ -7993,10 +7735,11 @@ class JSDate: public JSObject {
class JSMessageObject: public JSObject {
public:
// [type]: the type of error message.
- DECL_ACCESSORS(type, String)
+ inline int type() const;
+ inline void set_type(int value);
// [arguments]: the arguments for formatting the error message.
- DECL_ACCESSORS(arguments, JSArray)
+ DECL_ACCESSORS(argument, Object)
// [script]: the script from which the error message originated.
DECL_ACCESSORS(script, Object)
@@ -8828,8 +8571,13 @@ class Name: public HeapObject {
// Conversion.
inline bool AsArrayIndex(uint32_t* index);
- // Whether name can only name own properties.
- inline bool IsOwn();
+ // If the name is private, it can only name own properties.
+ inline bool IsPrivate();
+
+ // If the name is a non-flat string, this method returns a flat version of the
+ // string. Otherwise it'll just return the input.
+ static inline Handle<Name> Flatten(Handle<Name> name,
+ PretenureFlag pretenure = NOT_TENURED);
DECLARE_CAST(Name)
@@ -8907,18 +8655,15 @@ class Name: public HeapObject {
// ES6 symbols.
class Symbol: public Name {
public:
- // [name]: the print name of a symbol, or undefined if none.
+ // [name]: The print name of a symbol, or undefined if none.
DECL_ACCESSORS(name, Object)
DECL_ACCESSORS(flags, Smi)
- // [is_private]: whether this is a private symbol.
+ // [is_private]: Whether this is a private symbol. Private symbols can only
+ // be used to designate own properties of objects.
DECL_BOOLEAN_ACCESSORS(is_private)
- // [is_own]: whether this is an own symbol, that is, only used to designate
- // own properties of objects.
- DECL_BOOLEAN_ACCESSORS(is_own)
-
DECLARE_CAST(Symbol)
// Dispatched behavior.
@@ -8936,7 +8681,6 @@ class Symbol: public Name {
private:
static const int kPrivateBit = 0;
- static const int kOwnBit = 1;
const char* PrivateSymbolToName() const;
@@ -9834,12 +9578,22 @@ class Cell: public HeapObject {
class PropertyCell : public HeapObject {
public:
+ // [property_details]: details of the global property.
+ DECL_ACCESSORS(property_details_raw, Object)
// [value]: value of the global property.
DECL_ACCESSORS(value, Object)
// [dependent_code]: dependent code that depends on the type of the global
// property.
DECL_ACCESSORS(dependent_code, DependentCode)
+ PropertyDetails property_details() {
+ return PropertyDetails(Smi::cast(property_details_raw()));
+ }
+
+ void set_property_details(PropertyDetails details) {
+ set_property_details_raw(details.AsSmi());
+ }
+
PropertyCellConstantType GetConstantType();
// Computes the new type of the cell's contents for the given value, but
@@ -9847,11 +9601,11 @@ class PropertyCell : public HeapObject {
static PropertyCellType UpdatedType(Handle<PropertyCell> cell,
Handle<Object> value,
PropertyDetails details);
- static void UpdateCell(Handle<NameDictionary> dictionary, int entry,
+ static void UpdateCell(Handle<GlobalDictionary> dictionary, int entry,
Handle<Object> value, PropertyDetails details);
- static Handle<PropertyCell> InvalidateEntry(Handle<NameDictionary> dictionary,
- int entry);
+ static Handle<PropertyCell> InvalidateEntry(
+ Handle<GlobalDictionary> dictionary, int entry);
static void SetValueWithInvalidation(Handle<PropertyCell> cell,
Handle<Object> new_value);
@@ -9863,7 +9617,8 @@ class PropertyCell : public HeapObject {
DECLARE_VERIFIER(PropertyCell)
// Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kDetailsOffset = HeapObject::kHeaderSize;
+ static const int kValueOffset = kDetailsOffset + kPointerSize;
static const int kDependentCodeOffset = kValueOffset + kPointerSize;
static const int kSize = kDependentCodeOffset + kPointerSize;
@@ -9893,6 +9648,10 @@ class WeakCell : public HeapObject {
DECL_ACCESSORS(next, Object)
+ inline void clear_next(Heap* heap);
+
+ inline bool next_cleared();
+
DECLARE_CAST(WeakCell)
DECLARE_PRINTER(WeakCell)
@@ -9925,10 +9684,6 @@ class JSProxy: public JSReceiver {
Handle<JSProxy> proxy,
Handle<Object> receiver,
Handle<Name> name);
- MUST_USE_RESULT static inline MaybeHandle<Object> GetElementWithHandler(
- Handle<JSProxy> proxy,
- Handle<Object> receiver,
- uint32_t index);
// If the handler defines an accessor property with a setter, invoke it.
// If it defines an accessor property without a setter, or a data property
@@ -9943,10 +9698,6 @@ class JSProxy: public JSReceiver {
GetPropertyAttributesWithHandler(Handle<JSProxy> proxy,
Handle<Object> receiver,
Handle<Name> name);
- MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetElementAttributeWithHandler(Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- uint32_t index);
MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithHandler(
Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
Handle<Object> value, LanguageMode language_mode);
@@ -9989,19 +9740,11 @@ class JSProxy: public JSReceiver {
private:
friend class JSReceiver;
- MUST_USE_RESULT static inline MaybeHandle<Object> SetElementWithHandler(
- Handle<JSProxy> proxy, Handle<JSReceiver> receiver, uint32_t index,
- Handle<Object> value, LanguageMode language_mode);
-
MUST_USE_RESULT static Maybe<bool> HasPropertyWithHandler(
Handle<JSProxy> proxy, Handle<Name> name);
- MUST_USE_RESULT static inline Maybe<bool> HasElementWithHandler(
- Handle<JSProxy> proxy, uint32_t index);
MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithHandler(
Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteElementWithHandler(
- Handle<JSProxy> proxy, uint32_t index, LanguageMode language_mode);
MUST_USE_RESULT Object* GetIdentityHash();
@@ -10236,6 +9979,10 @@ class JSWeakSet: public JSWeakCollection {
};
+// Whether a JSArrayBuffer is a SharedArrayBuffer or not.
+enum class SharedFlag { kNotShared, kShared };
+
+
class JSArrayBuffer: public JSObject {
public:
// [backing_store]: backing memory for this array
@@ -10256,6 +10003,9 @@ class JSArrayBuffer: public JSObject {
inline bool was_neutered();
inline void set_was_neutered(bool value);
+ inline bool is_shared();
+ inline void set_is_shared(bool value);
+
DECLARE_CAST(JSArrayBuffer)
void Neuter();
@@ -10280,6 +10030,7 @@ class JSArrayBuffer: public JSObject {
class IsExternal : public BitField<bool, 1, 1> {};
class IsNeuterable : public BitField<bool, 2, 1> {};
class WasNeutered : public BitField<bool, 3, 1> {};
+ class IsShared : public BitField<bool, 4, 1> {};
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
@@ -10322,6 +10073,7 @@ class JSTypedArray: public JSArrayBufferView {
public:
// [length]: length of typed array in elements.
DECL_ACCESSORS(length, Object)
+ inline uint32_t length_value() const;
DECLARE_CAST(JSTypedArray)
@@ -10414,10 +10166,6 @@ class JSArray: public JSObject {
// is set to a smi. This matches the set function on FixedArray.
inline void set_length(Smi* length);
- static void JSArrayUpdateLengthFromIndex(Handle<JSArray> array,
- uint32_t index,
- Handle<Object> value);
-
static bool HasReadOnlyLength(Handle<JSArray> array);
static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
static MaybeHandle<Object> ReadOnlyLengthError(Handle<JSArray> array);
@@ -10429,15 +10177,16 @@ class JSArray: public JSObject {
// If the JSArray has fast elements, and new_length would result in
// normalization, returns true.
- static inline bool SetElementsLengthWouldNormalize(
- Heap* heap, Handle<Object> new_length_handle);
+ bool SetLengthWouldNormalize(uint32_t new_length);
+ static inline bool SetLengthWouldNormalize(Heap* heap, uint32_t new_length);
// Initializes the array to a certain length.
- inline bool AllowsSetElementsLength();
- // Can cause GC.
- MUST_USE_RESULT static MaybeHandle<Object> SetElementsLength(
- Handle<JSArray> array,
- Handle<Object> length);
+ inline bool AllowsSetLength();
+
+ static void SetLength(Handle<JSArray> array, uint32_t length);
+ // Same as above but will also queue splice records if |array| is observed.
+ static MaybeHandle<Object> ObservableSetLength(Handle<JSArray> array,
+ uint32_t length);
// Set the content of the array to the content of storage.
static inline void SetContent(Handle<JSArray> array,
@@ -10445,16 +10194,6 @@ class JSArray: public JSObject {
DECLARE_CAST(JSArray)
- // Ensures that the fixed array backing the JSArray has at
- // least the stated size.
- static inline void EnsureSize(Handle<JSArray> array,
- int minimum_size_of_backing_fixed_array);
-
- // Expand the fixed array backing of a fast-case JSArray to at least
- // the requested size.
- static void Expand(Handle<JSArray> array,
- int minimum_size_of_backing_fixed_array);
-
// Dispatched behavior.
DECLARE_PRINTER(JSArray)
DECLARE_VERIFIER(JSArray)
@@ -10507,6 +10246,9 @@ class AccessorInfo: public Struct {
inline bool all_can_write();
inline void set_all_can_write(bool value);
+ inline bool is_special_data_property();
+ inline void set_is_special_data_property(bool value);
+
inline PropertyAttributes property_attributes();
inline void set_property_attributes(PropertyAttributes attributes);
@@ -10539,7 +10281,8 @@ class AccessorInfo: public Struct {
// Bit positions in flag.
static const int kAllCanReadBit = 0;
static const int kAllCanWriteBit = 1;
- class AttributesField: public BitField<PropertyAttributes, 2, 3> {};
+ static const int kSpecialDataProperty = 2;
+ class AttributesField : public BitField<PropertyAttributes, 3, 3> {};
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
};
@@ -10571,7 +10314,7 @@ class ExecutableAccessorInfo: public AccessorInfo {
static const int kDataOffset = kSetterOffset + kPointerSize;
static const int kSize = kDataOffset + kPointerSize;
- static inline void ClearSetter(Handle<ExecutableAccessorInfo> info);
+ static void ClearSetter(Handle<ExecutableAccessorInfo> info);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo);
@@ -10732,13 +10475,16 @@ class CallHandlerInfo: public Struct {
class TemplateInfo: public Struct {
public:
DECL_ACCESSORS(tag, Object)
+ inline int number_of_properties() const;
+ inline void set_number_of_properties(int value);
DECL_ACCESSORS(property_list, Object)
DECL_ACCESSORS(property_accessors, Object)
DECLARE_VERIFIER(TemplateInfo)
static const int kTagOffset = HeapObject::kHeaderSize;
- static const int kPropertyListOffset = kTagOffset + kPointerSize;
+ static const int kNumberOfProperties = kTagOffset + kPointerSize;
+ static const int kPropertyListOffset = kNumberOfProperties + kPointerSize;
static const int kPropertyAccessorsOffset =
kPropertyListOffset + kPointerSize;
static const int kHeaderSize = kPropertyAccessorsOffset + kPointerSize;
diff --git a/deps/v8/src/optimizing-compile-dispatcher.cc b/deps/v8/src/optimizing-compile-dispatcher.cc
index a2f2970fc8..f5c57cd1cf 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/optimizing-compile-dispatcher.cc
@@ -356,5 +356,5 @@ void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) {
osr_buffer_[osr_buffer_cursor_] = job;
osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 3b537d2375..aa0ec104a7 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -6,6 +6,7 @@
#include "src/api.h"
#include "src/ast.h"
+#include "src/ast-literal-reindexer.h"
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
@@ -333,10 +334,10 @@ void Parser::SetCachedData(ParseInfo* info) {
FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
- int pos, int end_pos) {
+ int pos, int end_pos,
+ LanguageMode language_mode) {
int materialized_literal_count = -1;
int expected_property_count = -1;
- int handler_count = 0;
int parameter_count = 0;
const AstRawString* name = ast_value_factory()->empty_string();
@@ -345,7 +346,7 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
: FunctionKind::kDefaultBaseConstructor;
Scope* function_scope = NewScope(scope, FUNCTION_SCOPE, kind);
function_scope->SetLanguageMode(
- static_cast<LanguageMode>(scope->language_mode() | STRICT_BIT));
+ static_cast<LanguageMode>(language_mode | STRICT_BIT));
// Set start and end position to the same value
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
@@ -359,8 +360,17 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
AddAssertIsConstruct(body, pos);
if (call_super) {
+ // %_DefaultConstructorCallSuper(new.target, .this_function)
ZoneList<Expression*>* args =
- new (zone()) ZoneList<Expression*>(0, zone());
+ new (zone()) ZoneList<Expression*>(2, zone());
+ VariableProxy* new_target_proxy = scope_->NewUnresolved(
+ factory(), ast_value_factory()->new_target_string(), Variable::NORMAL,
+ pos);
+ args->Add(new_target_proxy, zone());
+ VariableProxy* this_function_proxy = scope_->NewUnresolved(
+ factory(), ast_value_factory()->this_function_string(),
+ Variable::NORMAL, pos);
+ args->Add(this_function_proxy, zone());
CallRuntime* call = factory()->NewCallRuntime(
ast_value_factory()->empty_string(),
Runtime::FunctionForId(Runtime::kInlineDefaultConstructorCallSuper),
@@ -370,13 +380,12 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
- handler_count = function_state.handler_count();
}
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
name, ast_value_factory(), function_scope, body,
- materialized_literal_count, expected_property_count, handler_count,
- parameter_count, FunctionLiteral::kNoDuplicateParameters,
+ materialized_literal_count, expected_property_count, parameter_count,
+ FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
FunctionLiteral::kShouldLazyCompile, kind, pos);
@@ -636,45 +645,43 @@ Expression* ParserTraits::BuildUnaryExpression(Expression* expression,
}
-Expression* ParserTraits::NewThrowReferenceError(const char* message, int pos) {
- return NewThrowError(
- parser_->ast_value_factory()->make_reference_error_string(), message,
- parser_->ast_value_factory()->empty_string(), pos);
+Expression* ParserTraits::NewThrowReferenceError(
+ MessageTemplate::Template message, int pos) {
+ return NewThrowError(Runtime::kNewReferenceError, message,
+ parser_->ast_value_factory()->empty_string(), pos);
}
-Expression* ParserTraits::NewThrowSyntaxError(
- const char* message, const AstRawString* arg, int pos) {
- return NewThrowError(parser_->ast_value_factory()->make_syntax_error_string(),
- message, arg, pos);
+Expression* ParserTraits::NewThrowSyntaxError(MessageTemplate::Template message,
+ const AstRawString* arg,
+ int pos) {
+ return NewThrowError(Runtime::kNewSyntaxError, message, arg, pos);
}
-Expression* ParserTraits::NewThrowTypeError(
- const char* message, const AstRawString* arg, int pos) {
- return NewThrowError(parser_->ast_value_factory()->make_type_error_string(),
- message, arg, pos);
+Expression* ParserTraits::NewThrowTypeError(MessageTemplate::Template message,
+ const AstRawString* arg, int pos) {
+ return NewThrowError(Runtime::kNewTypeError, message, arg, pos);
}
-Expression* ParserTraits::NewThrowError(
- const AstRawString* constructor, const char* message,
- const AstRawString* arg, int pos) {
+Expression* ParserTraits::NewThrowError(Runtime::FunctionId id,
+ MessageTemplate::Template message,
+ const AstRawString* arg, int pos) {
Zone* zone = parser_->zone();
- const AstRawString* type =
- parser_->ast_value_factory()->GetOneByteString(message);
ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone);
- args->Add(parser_->factory()->NewStringLiteral(type, pos), zone);
+ args->Add(parser_->factory()->NewSmiLiteral(message, pos), zone);
args->Add(parser_->factory()->NewStringLiteral(arg, pos), zone);
- CallRuntime* call_constructor =
- parser_->factory()->NewCallRuntime(constructor, NULL, args, pos);
+ CallRuntime* call_constructor = parser_->factory()->NewCallRuntime(
+ parser_->ast_value_factory()->empty_string(), Runtime::FunctionForId(id),
+ args, pos);
return parser_->factory()->NewThrow(call_constructor, pos);
}
void ParserTraits::ReportMessageAt(Scanner::Location source_location,
- const char* message, const char* arg,
- ParseErrorType error_type) {
+ MessageTemplate::Template message,
+ const char* arg, ParseErrorType error_type) {
if (parser_->stack_overflow()) {
// Suppress the error message (syntax error or such) in the presence of a
// stack overflow. The isolate allows only one pending exception at at time
@@ -687,14 +694,15 @@ void ParserTraits::ReportMessageAt(Scanner::Location source_location,
}
-void ParserTraits::ReportMessage(const char* message, const char* arg,
- ParseErrorType error_type) {
+void ParserTraits::ReportMessage(MessageTemplate::Template message,
+ const char* arg, ParseErrorType error_type) {
Scanner::Location source_location = parser_->scanner()->location();
ReportMessageAt(source_location, message, arg, error_type);
}
-void ParserTraits::ReportMessage(const char* message, const AstRawString* arg,
+void ParserTraits::ReportMessage(MessageTemplate::Template message,
+ const AstRawString* arg,
ParseErrorType error_type) {
Scanner::Location source_location = parser_->scanner()->location();
ReportMessageAt(source_location, message, arg, error_type);
@@ -702,7 +710,8 @@ void ParserTraits::ReportMessage(const char* message, const AstRawString* arg,
void ParserTraits::ReportMessageAt(Scanner::Location source_location,
- const char* message, const AstRawString* arg,
+ MessageTemplate::Template message,
+ const AstRawString* arg,
ParseErrorType error_type) {
if (parser_->stack_overflow()) {
// Suppress the error message (syntax error or such) in the presence of a
@@ -740,20 +749,56 @@ const AstRawString* ParserTraits::GetNextSymbol(Scanner* scanner) {
Expression* ParserTraits::ThisExpression(Scope* scope, AstNodeFactory* factory,
int pos) {
- return factory->NewVariableProxy(scope->receiver(), pos);
+ return scope->NewUnresolved(factory,
+ parser_->ast_value_factory()->this_string(),
+ Variable::THIS, pos, pos + 4);
}
-Expression* ParserTraits::SuperReference(Scope* scope, AstNodeFactory* factory,
- int pos) {
- return factory->NewSuperReference(
- ThisExpression(scope, factory, pos)->AsVariableProxy(),
- pos);
+
+Expression* ParserTraits::SuperPropertyReference(Scope* scope,
+ AstNodeFactory* factory,
+ int pos) {
+ // this_function[home_object_symbol]
+ VariableProxy* this_function_proxy = scope->NewUnresolved(
+ factory, parser_->ast_value_factory()->this_function_string(),
+ Variable::NORMAL, pos);
+ Expression* home_object_symbol_literal =
+ factory->NewSymbolLiteral("home_object_symbol", RelocInfo::kNoPosition);
+ Expression* home_object = factory->NewProperty(
+ this_function_proxy, home_object_symbol_literal, pos);
+ return factory->NewSuperPropertyReference(
+ ThisExpression(scope, factory, pos)->AsVariableProxy(), home_object, pos);
+}
+
+
+Expression* ParserTraits::SuperCallReference(Scope* scope,
+ AstNodeFactory* factory, int pos) {
+ VariableProxy* new_target_proxy = scope->NewUnresolved(
+ factory, parser_->ast_value_factory()->new_target_string(),
+ Variable::NORMAL, pos);
+ VariableProxy* this_function_proxy = scope->NewUnresolved(
+ factory, parser_->ast_value_factory()->this_function_string(),
+ Variable::NORMAL, pos);
+ return factory->NewSuperCallReference(
+ ThisExpression(scope, factory, pos)->AsVariableProxy(), new_target_proxy,
+ this_function_proxy, pos);
+}
+
+
+Expression* ParserTraits::NewTargetExpression(Scope* scope,
+ AstNodeFactory* factory,
+ int pos) {
+ static const int kNewTargetStringLength = 10;
+ return scope->NewUnresolved(
+ factory, parser_->ast_value_factory()->new_target_string(),
+ Variable::NORMAL, pos, pos + kNewTargetStringLength);
}
Expression* ParserTraits::DefaultConstructor(bool call_super, Scope* scope,
- int pos, int end_pos) {
- return parser_->DefaultConstructor(call_super, scope, pos, end_pos);
+ int pos, int end_pos,
+ LanguageMode mode) {
+ return parser_->DefaultConstructor(call_super, scope, pos, end_pos, mode);
}
@@ -772,8 +817,9 @@ Literal* ParserTraits::ExpressionFromLiteral(Token::Value token, int pos,
return factory->NewSmiLiteral(value, pos);
}
case Token::NUMBER: {
+ bool has_dot = scanner->ContainsDot();
double value = scanner->DoubleValue();
- return factory->NewNumberLiteral(value, pos);
+ return factory->NewNumberLiteral(value, pos, has_dot);
}
default:
DCHECK(false);
@@ -788,7 +834,8 @@ Expression* ParserTraits::ExpressionFromIdentifier(const AstRawString* name,
Scope* scope,
AstNodeFactory* factory) {
if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
- return scope->NewUnresolved(factory, name, start_position, end_position);
+ return scope->NewUnresolved(factory, name, Variable::NORMAL, start_position,
+ end_position);
}
@@ -828,10 +875,11 @@ FunctionLiteral* ParserTraits::ParseFunctionLiteral(
const AstRawString* name, Scanner::Location function_name_location,
bool name_is_strict_reserved, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok) {
return parser_->ParseFunctionLiteral(
name, function_name_location, name_is_strict_reserved, kind,
- function_token_position, type, arity_restriction, ok);
+ function_token_position, type, arity_restriction, language_mode, ok);
}
@@ -864,8 +912,6 @@ Parser::Parser(ParseInfo* info)
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
set_allow_harmony_modules(!info->is_native() && FLAG_harmony_modules);
set_allow_harmony_arrow_functions(FLAG_harmony_arrow_functions);
- set_allow_harmony_classes(FLAG_harmony_classes);
- set_allow_harmony_object_literals(FLAG_harmony_object_literals);
set_allow_harmony_sloppy(FLAG_harmony_sloppy);
set_allow_harmony_unicode(FLAG_harmony_unicode);
set_allow_harmony_computed_property_names(
@@ -873,7 +919,10 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_rest_params(FLAG_harmony_rest_parameters);
set_allow_harmony_spreadcalls(FLAG_harmony_spreadcalls);
set_allow_harmony_destructuring(FLAG_harmony_destructuring);
+ set_allow_harmony_spread_arrays(FLAG_harmony_spread_arrays);
+ set_allow_harmony_new_target(FLAG_harmony_new_target);
set_allow_strong_mode(FLAG_strong_mode);
+ set_allow_legacy_const(FLAG_legacy_const);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -967,6 +1016,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
FunctionLiteral* result = NULL;
{
+ // TODO(wingo): Add an outer GLOBAL_SCOPE corresponding to the native
+ // context, which will have the "this" binding for script scopes.
Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
info->set_script_scope(scope);
if (!info->context().is_null() && !info->context()->IsNativeContext()) {
@@ -1022,7 +1073,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
!body->at(0)->IsExpressionStatement() ||
!body->at(0)->AsExpressionStatement()->
expression()->IsFunctionLiteral()) {
- ReportMessage("single_function_literal");
+ ReportMessage(MessageTemplate::kSingleFunctionLiteral);
ok = false;
}
}
@@ -1031,8 +1082,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
result = factory()->NewFunctionLiteral(
ast_value_factory()->empty_string(), ast_value_factory(), scope_,
body, function_state.materialized_literal_count(),
- function_state.expected_property_count(),
- function_state.handler_count(), 0,
+ function_state.expected_property_count(), 0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kGlobalOrEval,
FunctionLiteral::kShouldLazyCompile, FunctionKind::kNormalFunction,
@@ -1121,7 +1171,6 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
DCHECK(is_sloppy(scope->language_mode()) ||
is_strict(info->language_mode()));
DCHECK(info->language_mode() == shared_info->language_mode());
- scope->SetLanguageMode(shared_info->language_mode());
FunctionLiteral::FunctionType function_type = shared_info->is_expression()
? (shared_info->is_anonymous()
? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -1130,24 +1179,34 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
bool ok = true;
if (shared_info->is_arrow()) {
- Scope* scope = NewScope(scope_, ARROW_SCOPE);
+ Scope* scope =
+ NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
+ scope->SetLanguageMode(shared_info->language_mode());
scope->set_start_position(shared_info->start_position());
- FormalParameterErrorLocations error_locs;
- bool has_rest = false;
- if (Check(Token::LPAREN)) {
- // '(' StrictFormalParameters ')'
- ParseFormalParameterList(scope, &error_locs, &has_rest, &ok);
- if (ok) ok = Check(Token::RPAREN);
- } else {
- // BindingIdentifier
- ParseFormalParameter(scope, &error_locs, has_rest, &ok);
+ ExpressionClassifier formals_classifier;
+ ParserFormalParameterParsingState parsing_state(scope);
+ Checkpoint checkpoint(this);
+ {
+ // Parsing patterns as variable reference expression creates
+ // NewUnresolved references in current scope. Entrer arrow function
+ // scope for formal parameter parsing.
+ BlockState block_state(&scope_, scope);
+ if (Check(Token::LPAREN)) {
+ // '(' StrictFormalParameters ')'
+ ParseFormalParameterList(&parsing_state, &formals_classifier, &ok);
+ if (ok) ok = Check(Token::RPAREN);
+ } else {
+ // BindingIdentifier
+ const bool is_rest = false;
+ ParseFormalParameter(is_rest, &parsing_state, &formals_classifier,
+ &ok);
+ }
}
if (ok) {
- ExpressionClassifier classifier;
- Expression* expression = ParseArrowFunctionLiteral(
- scope, error_locs, has_rest, &classifier, &ok);
- ValidateExpression(&classifier, &ok);
+ checkpoint.Restore(&parsing_state.materialized_literals_count);
+ Expression* expression =
+ ParseArrowFunctionLiteral(parsing_state, formals_classifier, &ok);
if (ok) {
// Scanning must end at the same position that was recorded
// previously. If not, parsing has been interrupted due to a stack
@@ -1168,13 +1227,13 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
} else if (shared_info->is_default_constructor()) {
result = DefaultConstructor(IsSubclassConstructor(shared_info->kind()),
scope, shared_info->start_position(),
- shared_info->end_position());
+ shared_info->end_position(),
+ shared_info->language_mode());
} else {
- result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
- false, // Strict mode name already checked.
- shared_info->kind(), RelocInfo::kNoPosition,
- function_type,
- FunctionLiteral::NORMAL_ARITY, &ok);
+ result = ParseFunctionLiteral(
+ raw_name, Scanner::Location::invalid(), false, shared_info->kind(),
+ RelocInfo::kNoPosition, function_type, FunctionLiteral::NORMAL_ARITY,
+ shared_info->language_mode(), &ok);
}
// Make sure the results agree.
DCHECK(ok == (result != NULL));
@@ -1222,13 +1281,13 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
Scanner::Location super_loc = function_state_->super_location();
if (this_loc.beg_pos != old_this_loc.beg_pos &&
this_loc.beg_pos != token_loc.beg_pos) {
- ReportMessageAt(this_loc, "strong_constructor_this");
+ ReportMessageAt(this_loc, MessageTemplate::kStrongConstructorThis);
*ok = false;
return nullptr;
}
if (super_loc.beg_pos != old_super_loc.beg_pos &&
super_loc.beg_pos != token_loc.beg_pos) {
- ReportMessageAt(super_loc, "strong_constructor_super");
+ ReportMessageAt(super_loc, MessageTemplate::kStrongConstructorSuper);
*ok = false;
return nullptr;
}
@@ -1324,16 +1383,21 @@ Statement* Parser::ParseStatementListItem(bool* ok) {
}
return ParseClassDeclaration(NULL, ok);
case Token::CONST:
+ if (allow_const()) {
+ return ParseVariableStatement(kStatementListItem, NULL, ok);
+ }
+ break;
case Token::VAR:
return ParseVariableStatement(kStatementListItem, NULL, ok);
case Token::LET:
if (is_strict(language_mode())) {
return ParseVariableStatement(kStatementListItem, NULL, ok);
}
- // Fall through.
+ break;
default:
- return ParseStatement(NULL, ok);
+ break;
}
+ return ParseStatement(NULL, ok);
}
@@ -1382,7 +1446,8 @@ void* Parser::ParseModuleItemList(ZoneList<Statement*>* body, bool* ok) {
// TODO(adamk): Pass both local_name and export_name once ParserTraits
// supports multiple arg error messages.
// Also try to report this at a better location.
- ParserTraits::ReportMessage("module_export_undefined", it.local_name());
+ ParserTraits::ReportMessage(MessageTemplate::kModuleExportUndefined,
+ it.local_name());
*ok = false;
return NULL;
}
@@ -1479,21 +1544,21 @@ ZoneList<ImportDeclaration*>* Parser::ParseNamedImports(int pos, bool* ok) {
}
if (!Token::IsIdentifier(scanner()->current_token(), STRICT, false)) {
*ok = false;
- ReportMessage("unexpected_reserved");
+ ReportMessage(MessageTemplate::kUnexpectedReserved);
return NULL;
} else if (IsEvalOrArguments(local_name)) {
*ok = false;
- ReportMessage("strict_eval_arguments");
+ ReportMessage(MessageTemplate::kStrictEvalArguments);
return NULL;
} else if (is_strong(language_mode()) && IsUndefined(local_name)) {
*ok = false;
- ReportMessage("strong_undefined");
+ ReportMessage(MessageTemplate::kStrongUndefined);
return NULL;
}
VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
ImportDeclaration* declaration =
factory()->NewImportDeclaration(proxy, import_name, NULL, scope_, pos);
- Declare(declaration, true, CHECK_OK);
+ Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
result->Add(declaration, zone());
if (peek() == Token::RBRACE) break;
Expect(Token::COMMA, CHECK_OK);
@@ -1541,7 +1606,8 @@ Statement* Parser::ParseImportDeclaration(bool* ok) {
VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
import_default_declaration = factory()->NewImportDeclaration(
proxy, ast_value_factory()->default_string(), NULL, scope_, pos);
- Declare(import_default_declaration, true, CHECK_OK);
+ Declare(import_default_declaration, DeclarationDescriptor::NORMAL, true,
+ CHECK_OK);
}
const AstRawString* module_instance_binding = NULL;
@@ -1631,8 +1697,8 @@ Statement* Parser::ParseExportDefault(bool* ok) {
if (names.length() == 1) {
scope_->module()->AddLocalExport(default_string, names.first(), zone(), ok);
if (!*ok) {
- ParserTraits::ReportMessageAt(default_loc, "duplicate_export",
- default_string);
+ ParserTraits::ReportMessageAt(
+ default_loc, MessageTemplate::kDuplicateExport, default_string);
return NULL;
}
} else {
@@ -1695,7 +1761,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
} else if (reserved_loc.IsValid()) {
// No FromClause, so reserved words are invalid in ExportClause.
*ok = false;
- ReportMessageAt(reserved_loc, "unexpected_reserved");
+ ReportMessageAt(reserved_loc, MessageTemplate::kUnexpectedReserved);
return NULL;
}
ExpectSemicolon(CHECK_OK);
@@ -1708,7 +1774,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
zone(), ok);
if (!*ok) {
ParserTraits::ReportMessageAt(export_locations[i],
- "duplicate_export", export_names[i]);
+ MessageTemplate::kDuplicateExport,
+ export_names[i]);
return NULL;
}
}
@@ -1748,7 +1815,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
descriptor->AddLocalExport(names[i], names[i], zone(), ok);
if (!*ok) {
// TODO(adamk): Possibly report this error at the right place.
- ParserTraits::ReportMessage("duplicate_export", names[i]);
+ ParserTraits::ReportMessage(MessageTemplate::kDuplicateExport, names[i]);
return NULL;
}
}
@@ -1803,7 +1870,8 @@ Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
case Token::SEMICOLON:
if (is_strong(language_mode())) {
- ReportMessageAt(scanner()->peek_location(), "strong_empty");
+ ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kStrongEmpty);
*ok = false;
return NULL;
}
@@ -1860,7 +1928,8 @@ Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
// Statement:
// GeneratorDeclaration
if (is_strict(language_mode())) {
- ReportMessageAt(scanner()->peek_location(), "strict_function");
+ ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kStrictFunction);
*ok = false;
return NULL;
}
@@ -1877,7 +1946,7 @@ Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
// In ES6 CONST is not allowed as a Statement, only as a
// LexicalDeclaration, however we continue to allow it in sloppy mode for
// backwards compatibility.
- if (is_sloppy(language_mode())) {
+ if (is_sloppy(language_mode()) && allow_legacy_const()) {
return ParseVariableStatement(kStatement, NULL, ok);
}
@@ -1919,13 +1988,15 @@ VariableProxy* Parser::NewUnresolved(const AstRawString* name,
// scope.
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
- return DeclarationScope(mode)->NewUnresolved(factory(), name,
- scanner()->location().beg_pos,
- scanner()->location().end_pos);
+ return DeclarationScope(mode)->NewUnresolved(
+ factory(), name, Variable::NORMAL, scanner()->location().beg_pos,
+ scanner()->location().end_pos);
}
-Variable* Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
+Variable* Parser::Declare(Declaration* declaration,
+ DeclarationDescriptor::Kind declaration_kind,
+ bool resolve, bool* ok) {
VariableProxy* proxy = declaration->proxy();
DCHECK(proxy->raw_name() != NULL);
const AstRawString* name = proxy->raw_name();
@@ -1984,12 +2055,16 @@ Variable* Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
if (is_strict(language_mode())) {
// In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
- ParserTraits::ReportMessage("var_redeclaration", name);
+ if (declaration_kind == DeclarationDescriptor::NORMAL) {
+ ParserTraits::ReportMessage(MessageTemplate::kVarRedeclaration, name);
+ } else {
+ ParserTraits::ReportMessage(MessageTemplate::kStrictParamDupe);
+ }
*ok = false;
return nullptr;
}
- Expression* expression = NewThrowTypeError(
- "var_redeclaration", name, declaration->position());
+ Expression* expression = NewThrowSyntaxError(
+ MessageTemplate::kVarRedeclaration, name, declaration->position());
declaration_scope->SetIllegalRedeclaration(expression);
} else if (mode == VAR) {
var->set_maybe_assigned();
@@ -2029,7 +2104,7 @@ Variable* Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// TODO(sigurds) figure out if kNotAssigned is OK here
var = new (zone()) Variable(declaration_scope, name, mode, kind,
declaration->initialization(), kNotAssigned);
- var->AllocateTo(Variable::LOOKUP, -1);
+ var->AllocateTo(VariableLocation::LOOKUP, -1);
resolve = true;
}
@@ -2098,7 +2173,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
VariableProxy* proxy = NewUnresolved(name, VAR);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, VAR, scope_, pos);
- Declare(declaration, true, CHECK_OK);
+ Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
name, extension_, RelocInfo::kNoPosition);
return factory()->NewExpressionStatement(
@@ -2121,12 +2196,12 @@ Statement* Parser::ParseFunctionDeclaration(
bool is_strict_reserved = false;
const AstRawString* name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
- FunctionLiteral* fun =
- ParseFunctionLiteral(name, scanner()->location(), is_strict_reserved,
- is_generator ? FunctionKind::kGeneratorFunction
- : FunctionKind::kNormalFunction,
- pos, FunctionLiteral::DECLARATION,
- FunctionLiteral::NORMAL_ARITY, CHECK_OK);
+ FunctionLiteral* fun = ParseFunctionLiteral(
+ name, scanner()->location(), is_strict_reserved,
+ is_generator ? FunctionKind::kGeneratorFunction
+ : FunctionKind::kNormalFunction,
+ pos, FunctionLiteral::DECLARATION, FunctionLiteral::NORMAL_ARITY,
+ language_mode(), CHECK_OK);
// Even if we're not at the top-level of the global or a function
// scope, we treat it as such and introduce the function with its
// initial value upon entering the corresponding scope.
@@ -2143,7 +2218,7 @@ Statement* Parser::ParseFunctionDeclaration(
VariableProxy* proxy = NewUnresolved(name, mode);
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
- Declare(declaration, true, CHECK_OK);
+ Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
if (names) names->Add(name, zone());
return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
@@ -2166,7 +2241,7 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
Expect(Token::CLASS, CHECK_OK);
if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
- ReportMessage("sloppy_lexical");
+ ReportMessage(MessageTemplate::kSloppyLexical);
*ok = false;
return NULL;
}
@@ -2184,7 +2259,8 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
Declaration* declaration = factory()->NewVariableDeclaration(
proxy, mode, scope_, pos, is_class_declaration,
scope_->class_declaration_group_start());
- Variable* outer_class_variable = Declare(declaration, true, CHECK_OK);
+ Variable* outer_class_variable =
+ Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
proxy->var()->set_initializer_position(position());
// This is needed because a class ("class Name { }") creates two bindings (one
// in the outer scope, and one in the class scope). The method is a function
@@ -2273,30 +2349,58 @@ Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels,
}
+const AstRawString* Parser::DeclarationParsingResult::SingleName() const {
+ if (declarations.length() != 1) return nullptr;
+ const Declaration& declaration = declarations.at(0);
+ if (declaration.pattern->IsVariableProxy()) {
+ return declaration.pattern->AsVariableProxy()->raw_name();
+ }
+ return nullptr;
+}
+
+
+Block* Parser::DeclarationParsingResult::BuildInitializationBlock(
+ ZoneList<const AstRawString*>* names, bool* ok) {
+ Block* result = descriptor.parser->factory()->NewBlock(
+ NULL, 1, true, descriptor.declaration_pos);
+ for (auto declaration : declarations) {
+ PatternRewriter::DeclareAndInitializeVariables(
+ result, &descriptor, &declaration, names, CHECK_OK);
+ }
+ return result;
+}
+
+
Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
ZoneList<const AstRawString*>* names,
bool* ok) {
// VariableStatement ::
// VariableDeclarations ';'
- const AstRawString* ignore;
- Block* result = ParseVariableDeclarations(
- var_context, nullptr, names, &ignore, nullptr, nullptr, CHECK_OK);
+ // The scope of a var/const declared variable anywhere inside a function
+ // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
+ // transform a source-level var/const declaration into a (Function)
+ // Scope declaration, and rewrite the source-level initialization into an
+ // assignment statement. We use a block to collect multiple assignments.
+ //
+ // We mark the block as initializer block because we don't want the
+ // rewriter to add a '.result' assignment to such a block (to get compliant
+ // behavior for code such as print(eval('var x = 7')), and for cosmetic
+ // reasons when pretty-printing. Also, unless an assignment (initialization)
+ // is inside an initializer block, it is ignored.
+
+ DeclarationParsingResult parsing_result;
+ ParseVariableDeclarations(var_context, &parsing_result, CHECK_OK);
ExpectSemicolon(CHECK_OK);
+
+ Block* result = parsing_result.BuildInitializationBlock(names, CHECK_OK);
return result;
}
-// If the variable declaration declares exactly one non-const
-// variable, then *out is set to that variable. In all other cases,
-// *out is untouched; in particular, it is the caller's responsibility
-// to initialize it properly. This mechanism is used for the parsing
-// of 'for-in' loops.
-Block* Parser::ParseVariableDeclarations(
- VariableDeclarationContext var_context, int* num_decl,
- ZoneList<const AstRawString*>* names, const AstRawString** out,
- Scanner::Location* first_initializer_loc, Scanner::Location* bindings_loc,
- bool* ok) {
+void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
+ DeclarationParsingResult* parsing_result,
+ bool* ok) {
// VariableDeclarations ::
// ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
//
@@ -2311,313 +2415,142 @@ Block* Parser::ParseVariableDeclarations(
// ConstBinding ::
// BindingPattern '=' AssignmentExpression
- int pos = peek_position();
- VariableMode mode = VAR;
+ parsing_result->descriptor.parser = this;
+ parsing_result->descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
+ parsing_result->descriptor.declaration_pos = peek_position();
+ parsing_result->descriptor.initialization_pos = peek_position();
+ parsing_result->descriptor.mode = VAR;
// True if the binding needs initialization. 'let' and 'const' declared
// bindings are created uninitialized by their declaration nodes and
// need initialization. 'var' declared bindings are always initialized
// immediately by their declaration nodes.
- bool needs_init = false;
- bool is_const = false;
- Token::Value init_op = Token::INIT_VAR;
+ parsing_result->descriptor.needs_init = false;
+ parsing_result->descriptor.is_const = false;
+ parsing_result->descriptor.init_op = Token::INIT_VAR;
if (peek() == Token::VAR) {
if (is_strong(language_mode())) {
Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, "strong_var");
+ ReportMessageAt(location, MessageTemplate::kStrongVar);
*ok = false;
- return NULL;
+ return;
}
Consume(Token::VAR);
- } else if (peek() == Token::CONST) {
+ } else if (peek() == Token::CONST && allow_const()) {
Consume(Token::CONST);
- if (is_sloppy(language_mode())) {
- mode = CONST_LEGACY;
- init_op = Token::INIT_CONST_LEGACY;
+ if (is_sloppy(language_mode()) && allow_legacy_const()) {
+ parsing_result->descriptor.mode = CONST_LEGACY;
+ parsing_result->descriptor.init_op = Token::INIT_CONST_LEGACY;
++use_counts_[v8::Isolate::kLegacyConst];
} else {
+ DCHECK(is_strict(language_mode()));
DCHECK(var_context != kStatement);
- mode = CONST;
- init_op = Token::INIT_CONST;
+ parsing_result->descriptor.mode = CONST;
+ parsing_result->descriptor.init_op = Token::INIT_CONST;
}
- is_const = true;
- needs_init = true;
+ parsing_result->descriptor.is_const = true;
+ parsing_result->descriptor.needs_init = true;
} else if (peek() == Token::LET && is_strict(language_mode())) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
- mode = LET;
- needs_init = true;
- init_op = Token::INIT_LET;
+ parsing_result->descriptor.mode = LET;
+ parsing_result->descriptor.needs_init = true;
+ parsing_result->descriptor.init_op = Token::INIT_LET;
} else {
UNREACHABLE(); // by current callers
}
- Scope* declaration_scope = DeclarationScope(mode);
+ parsing_result->descriptor.declaration_scope =
+ DeclarationScope(parsing_result->descriptor.mode);
+ parsing_result->descriptor.scope = scope_;
- // The scope of a var/const declared variable anywhere inside a function
- // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
- // transform a source-level var/const declaration into a (Function)
- // Scope declaration, and rewrite the source-level initialization into an
- // assignment statement. We use a block to collect multiple assignments.
- //
- // We mark the block as initializer block because we don't want the
- // rewriter to add a '.result' assignment to such a block (to get compliant
- // behavior for code such as print(eval('var x = 7')), and for cosmetic
- // reasons when pretty-printing. Also, unless an assignment (initialization)
- // is inside an initializer block, it is ignored.
- //
- // Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true, pos);
- int nvars = 0; // the number of variables declared
+
+ bool first_declaration = true;
int bindings_start = peek_position();
- const AstRawString* name = NULL;
- const AstRawString* first_name = NULL;
bool is_for_iteration_variable;
do {
if (fni_ != NULL) fni_->Enter();
- // Parse variable name.
- if (nvars > 0) Consume(Token::COMMA);
+ // Parse name.
+ if (!first_declaration) Consume(Token::COMMA);
+ Expression* pattern;
{
ExpressionClassifier pattern_classifier;
Token::Value next = peek();
- Expression* pattern =
- ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
- ValidateBindingPattern(&pattern_classifier, CHECK_OK);
- if (pattern->IsVariableProxy() &&
- pattern->AsVariableProxy()->IsValidReferenceExpression()) {
- scope_->RemoveUnresolved(pattern->AsVariableProxy());
- name = pattern->AsVariableProxy()->raw_name();
- } else if (allow_harmony_destructuring()) {
- // TODO(dslomov): really destructure.
- name = ast_value_factory()->GetOneByteString(".temp.variable");
- } else {
+ pattern = ParsePrimaryExpression(&pattern_classifier, ok);
+ if (!*ok) return;
+ ValidateBindingPattern(&pattern_classifier, ok);
+ if (!*ok) return;
+ if (!allow_harmony_destructuring() && !pattern->IsVariableProxy()) {
ReportUnexpectedToken(next);
*ok = false;
- return nullptr;
+ return;
}
}
- if (!first_name) first_name = name;
Scanner::Location variable_loc = scanner()->location();
- if (fni_ != NULL) fni_->PushVariableName(name);
-
- // Declare variable.
- // Note that we *always* must treat the initial value via a separate init
- // assignment for variables and constants because the value must be assigned
- // when the variable is encountered in the source. But the variable/constant
- // is declared (and set to 'undefined') upon entering the function within
- // which the variable or constant is declared. Only function variables have
- // an initial value in the declaration (because they are initialized upon
- // entering the function).
- //
- // If we have a const declaration, in an inner scope, the proxy is always
- // bound to the declared variable (independent of possibly surrounding with
- // statements).
- // For let/const declarations in harmony mode, we can also immediately
- // pre-resolve the proxy because it resides in the same scope as the
- // declaration.
+ const AstRawString* single_name =
+ pattern->IsVariableProxy() ? pattern->AsVariableProxy()->raw_name()
+ : nullptr;
+ if (single_name != nullptr) {
+ if (fni_ != NULL) fni_->PushVariableName(single_name);
+ }
+
is_for_iteration_variable =
var_context == kForStatement &&
(peek() == Token::IN || PeekContextualKeyword(CStrVector("of")));
- if (is_for_iteration_variable && mode == CONST) {
- needs_init = false;
- }
-
- VariableProxy* proxy = NewUnresolved(name, mode);
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
- Variable* var = Declare(declaration, mode != VAR, CHECK_OK);
- DCHECK_NOT_NULL(var);
- DCHECK(!proxy->is_resolved() || proxy->var() == var);
- nvars++;
- if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
- ReportMessage("too_many_variables");
- *ok = false;
- return NULL;
+ if (is_for_iteration_variable && parsing_result->descriptor.mode == CONST) {
+ parsing_result->descriptor.needs_init = false;
}
- if (names) names->Add(name, zone());
- // Parse initialization expression if present and/or needed. A
- // declaration of the form:
- //
- // var v = x;
- //
- // is syntactic sugar for:
- //
- // var v; v = x;
- //
- // In particular, we need to re-lookup 'v' (in scope_, not
- // declaration_scope) as it may be a different 'v' than the 'v' in the
- // declaration (e.g., if we are inside a 'with' statement or 'catch'
- // block).
- //
- // However, note that const declarations are different! A const
- // declaration of the form:
- //
- // const c = x;
- //
- // is *not* syntactic sugar for:
- //
- // const c; c = x;
- //
- // The "variable" c initialized to x is the same as the declared
- // one - there is no re-lookup (see the last parameter of the
- // Declare() call above).
-
- Scope* initialization_scope = is_const ? declaration_scope : scope_;
Expression* value = NULL;
- int pos = -1;
// Harmony consts have non-optional initializers.
- if (peek() == Token::ASSIGN ||
- (mode == CONST && !is_for_iteration_variable)) {
- Expect(Token::ASSIGN, CHECK_OK);
- pos = position();
+ int initializer_position = RelocInfo::kNoPosition;
+ if (peek() == Token::ASSIGN || (parsing_result->descriptor.mode == CONST &&
+ !is_for_iteration_variable)) {
+ Expect(Token::ASSIGN, ok);
+ if (!*ok) return;
ExpressionClassifier classifier;
value = ParseAssignmentExpression(var_context != kForStatement,
- &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ &classifier, ok);
+ if (!*ok) return;
+ ValidateExpression(&classifier, ok);
+ if (!*ok) return;
variable_loc.end_pos = scanner()->location().end_pos;
- if (first_initializer_loc && !first_initializer_loc->IsValid()) {
- *first_initializer_loc = variable_loc;
+ if (!parsing_result->first_initializer_loc.IsValid()) {
+ parsing_result->first_initializer_loc = variable_loc;
}
// Don't infer if it is "a = function(){...}();"-like expression.
- if (fni_ != NULL &&
- value->AsCall() == NULL &&
- value->AsCallNew() == NULL) {
- fni_->Infer();
- } else {
- fni_->RemoveLastFunction();
+ if (single_name) {
+ if (fni_ != NULL && value->AsCall() == NULL &&
+ value->AsCallNew() == NULL) {
+ fni_->Infer();
+ } else {
+ fni_->RemoveLastFunction();
+ }
}
// End position of the initializer is after the assignment expression.
- var->set_initializer_position(scanner()->location().end_pos);
+ initializer_position = scanner()->location().end_pos;
} else {
// End position of the initializer is after the variable.
- var->set_initializer_position(position());
+ initializer_position = position();
}
// Make sure that 'const x' and 'let x' initialize 'x' to undefined.
- if (value == NULL && needs_init) {
+ if (value == NULL && parsing_result->descriptor.needs_init) {
value = GetLiteralUndefined(position());
}
- // Global variable declarations must be compiled in a specific
- // way. When the script containing the global variable declaration
- // is entered, the global variable must be declared, so that if it
- // doesn't exist (on the global object itself, see ES5 errata) it
- // gets created with an initial undefined value. This is handled
- // by the declarations part of the function representing the
- // top-level global code; see Runtime::DeclareGlobalVariable. If
- // it already exists (in the object or in a prototype), it is
- // *not* touched until the variable declaration statement is
- // executed.
- //
- // Executing the variable declaration statement will always
- // guarantee to give the global object an own property.
- // This way, global variable declarations can shadow
- // properties in the prototype chain, but only after the variable
- // declaration statement has been executed. This is important in
- // browsers where the global object (window) has lots of
- // properties defined in prototype objects.
- if (initialization_scope->is_script_scope() &&
- !IsLexicalVariableMode(mode)) {
- // Compute the arguments for the runtime call.
- ZoneList<Expression*>* arguments =
- new(zone()) ZoneList<Expression*>(3, zone());
- // We have at least 1 parameter.
- arguments->Add(factory()->NewStringLiteral(name, pos), zone());
- CallRuntime* initialize;
-
- if (is_const) {
- arguments->Add(value, zone());
- value = NULL; // zap the value to avoid the unnecessary assignment
-
- // Construct the call to Runtime_InitializeConstGlobal
- // and add it to the initialization statement block.
- // Note that the function does different things depending on
- // the number of arguments (1 or 2).
- initialize = factory()->NewCallRuntime(
- ast_value_factory()->initialize_const_global_string(),
- Runtime::FunctionForId(Runtime::kInitializeConstGlobal), arguments,
- pos);
- } else {
- // Add language mode.
- // We may want to pass singleton to avoid Literal allocations.
- LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode, pos), zone());
-
- // Be careful not to assign a value to the global variable if
- // we're in a with. The initialization value should not
- // necessarily be stored in the global object in that case,
- // which is why we need to generate a separate assignment node.
- if (value != NULL && !inside_with()) {
- arguments->Add(value, zone());
- value = NULL; // zap the value to avoid the unnecessary assignment
- // Construct the call to Runtime_InitializeVarGlobal
- // and add it to the initialization statement block.
- initialize = factory()->NewCallRuntime(
- ast_value_factory()->initialize_var_global_string(),
- Runtime::FunctionForId(Runtime::kInitializeVarGlobal), arguments,
- pos);
- } else {
- initialize = NULL;
- }
- }
-
- if (initialize != NULL) {
- block->AddStatement(factory()->NewExpressionStatement(
- initialize, RelocInfo::kNoPosition),
- zone());
- }
- } else if (needs_init) {
- // Constant initializations always assign to the declared constant which
- // is always at the function scope level. This is only relevant for
- // dynamically looked-up variables and constants (the start context for
- // constant lookups is always the function context, while it is the top
- // context for var declared variables). Sigh...
- // For 'let' and 'const' declared variables in harmony mode the
- // initialization also always assigns to the declared variable.
- DCHECK(proxy != NULL);
- DCHECK(proxy->var() != NULL);
- DCHECK(value != NULL);
- Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, pos);
- block->AddStatement(
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
- zone());
- value = NULL;
- }
-
- // Add an assignment node to the initialization statement block if we still
- // have a pending initialization value.
- if (value != NULL) {
- DCHECK(mode == VAR);
- // 'var' initializations are simply assignments (with all the consequences
- // if they are inside a 'with' statement - they may change a 'with' object
- // property).
- VariableProxy* proxy =
- initialization_scope->NewUnresolved(factory(), name);
- Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, pos);
- block->AddStatement(
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
- zone());
- }
-
- if (fni_ != NULL) fni_->Leave();
+ if (single_name && fni_ != NULL) fni_->Leave();
+ parsing_result->declarations.Add(DeclarationParsingResult::Declaration(
+ pattern, initializer_position, value));
+ first_declaration = false;
} while (peek() == Token::COMMA);
- if (bindings_loc) {
- *bindings_loc =
- Scanner::Location(bindings_start, scanner()->location().end_pos);
- }
-
- if (num_decl) *num_decl = nvars;
- *out = first_name;
-
- return block;
+ parsing_result->bindings_loc =
+ Scanner::Location(bindings_start, scanner()->location().end_pos);
}
@@ -2656,6 +2589,8 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
return nullptr;
case Token::THIS:
+ if (!FLAG_strong_this) break;
+ // Fall through.
case Token::SUPER:
if (is_strong(language_mode()) &&
i::IsConstructor(function_state_->kind())) {
@@ -2678,8 +2613,9 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
default:
if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
ReportMessageAt(function_state_->this_location(),
- is_this ? "strong_constructor_this"
- : "strong_constructor_super");
+ is_this
+ ? MessageTemplate::kStrongConstructorThis
+ : MessageTemplate::kStrongConstructorSuper);
*ok = false;
return nullptr;
}
@@ -2710,7 +2646,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
// structured. However, these are probably changes we want to
// make later anyway so we should go back and fix this then.
if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
- ParserTraits::ReportMessage("label_redeclaration", label);
+ ParserTraits::ReportMessage(MessageTemplate::kLabelRedeclaration, label);
*ok = false;
return NULL;
}
@@ -2743,7 +2679,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
if (peek() == Token::IDENTIFIER && expr->AsVariableProxy() != NULL &&
expr->AsVariableProxy()->raw_name() ==
ast_value_factory()->let_string()) {
- ReportMessage("sloppy_lexical", NULL);
+ ReportMessage(MessageTemplate::kSloppyLexical, NULL);
*ok = false;
return NULL;
}
@@ -2791,9 +2727,9 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
IterationStatement* target = LookupContinueTarget(label, CHECK_OK);
if (target == NULL) {
// Illegal continue statement.
- const char* message = "illegal_continue";
+ MessageTemplate::Template message = MessageTemplate::kIllegalContinue;
if (label != NULL) {
- message = "unknown_label";
+ message = MessageTemplate::kUnknownLabel;
}
ParserTraits::ReportMessage(message, label);
*ok = false;
@@ -2828,9 +2764,9 @@ Statement* Parser::ParseBreakStatement(ZoneList<const AstRawString*>* labels,
target = LookupBreakTarget(label, CHECK_OK);
if (target == NULL) {
// Illegal break statement.
- const char* message = "illegal_break";
+ MessageTemplate::Template message = MessageTemplate::kIllegalBreak;
if (label != NULL) {
- message = "unknown_label";
+ message = MessageTemplate::kUnknownLabel;
}
ParserTraits::ReportMessage(message, label);
*ok = false;
@@ -2869,7 +2805,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
i::IsConstructor(function_state_->kind())) {
int pos = peek_position();
ReportMessageAt(Scanner::Location(pos, pos + 1),
- "strong_constructor_return_value");
+ MessageTemplate::kStrongConstructorReturnValue);
*ok = false;
return NULL;
}
@@ -2893,7 +2829,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Token::ASSIGN, factory()->NewVariableProxy(temp), return_value, pos);
Expression* throw_expression =
- NewThrowTypeError("derived_constructor_return",
+ NewThrowTypeError(MessageTemplate::kDerivedConstructorReturn,
ast_value_factory()->empty_string(), pos);
// %_IsSpecObject(temp)
@@ -2935,7 +2871,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Scope* decl_scope = scope_->DeclarationScope();
if (decl_scope->is_script_scope() || decl_scope->is_eval_scope()) {
- ReportMessageAt(loc, "illegal_return");
+ ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
*ok = false;
return NULL;
}
@@ -2952,7 +2888,7 @@ Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
int pos = position();
if (is_strict(language_mode())) {
- ReportMessage("strict_mode_with");
+ ReportMessage(MessageTemplate::kStrictWith);
*ok = false;
return NULL;
}
@@ -2985,7 +2921,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
} else {
Expect(Token::DEFAULT, CHECK_OK);
if (*default_seen_ptr) {
- ReportMessage("multiple_defaults_in_switch");
+ ReportMessage(MessageTemplate::kMultipleDefaultsInSwitch);
*ok = false;
return NULL;
}
@@ -3004,7 +2940,8 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
}
if (is_strong(language_mode()) && stat != NULL && !stat->IsJump() &&
peek() != Token::RBRACE) {
- ReportMessageAt(scanner()->location(), "strong_switch_fallthrough");
+ ReportMessageAt(scanner()->location(),
+ MessageTemplate::kStrongSwitchFallthrough);
*ok = false;
return NULL;
}
@@ -3047,7 +2984,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
int pos = position();
if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- ReportMessage("newline_after_throw");
+ ReportMessage(MessageTemplate::kNewlineAfterThrow);
*ok = false;
return NULL;
}
@@ -3078,7 +3015,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessage("no_catch_or_finally");
+ ReportMessage(MessageTemplate::kNoCatchOrFinally);
*ok = false;
return NULL;
}
@@ -3121,10 +3058,9 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL && finally_block != NULL) {
// If we have both, create an inner try/catch.
DCHECK(catch_scope != NULL && catch_variable != NULL);
- int index = function_state_->NextHandlerIndex();
- TryCatchStatement* statement = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block,
- RelocInfo::kNoPosition);
+ TryCatchStatement* statement =
+ factory()->NewTryCatchStatement(try_block, catch_scope, catch_variable,
+ catch_block, RelocInfo::kNoPosition);
try_block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
try_block->AddStatement(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
@@ -3134,14 +3070,11 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
DCHECK(finally_block == NULL);
DCHECK(catch_scope != NULL && catch_variable != NULL);
- int index = function_state_->NextHandlerIndex();
- result = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block, pos);
+ result = factory()->NewTryCatchStatement(try_block, catch_scope,
+ catch_variable, catch_block, pos);
} else {
DCHECK(finally_block != NULL);
- int index = function_state_->NextHandlerIndex();
- result = factory()->NewTryFinallyStatement(
- index, try_block, finally_block, pos);
+ result = factory()->NewTryFinallyStatement(try_block, finally_block, pos);
}
return result;
@@ -3195,6 +3128,48 @@ WhileStatement* Parser::ParseWhileStatement(
}
+// !%_IsSpecObject(result = iterator.next()) &&
+// %ThrowIteratorResultNotAnObject(result)
+Expression* Parser::BuildIteratorNextResult(Expression* iterator,
+ Variable* result, int pos) {
+ Expression* next_literal = factory()->NewStringLiteral(
+ ast_value_factory()->next_string(), RelocInfo::kNoPosition);
+ Expression* next_property =
+ factory()->NewProperty(iterator, next_literal, RelocInfo::kNoPosition);
+ ZoneList<Expression*>* next_arguments =
+ new (zone()) ZoneList<Expression*>(0, zone());
+ Expression* next_call =
+ factory()->NewCall(next_property, next_arguments, pos);
+ Expression* result_proxy = factory()->NewVariableProxy(result);
+ Expression* left =
+ factory()->NewAssignment(Token::ASSIGN, result_proxy, next_call, pos);
+
+ // %_IsSpecObject(...)
+ ZoneList<Expression*>* is_spec_object_args =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ is_spec_object_args->Add(left, zone());
+ Expression* is_spec_object_call = factory()->NewCallRuntime(
+ ast_value_factory()->is_spec_object_string(),
+ Runtime::FunctionForId(Runtime::kInlineIsSpecObject), is_spec_object_args,
+ pos);
+
+ // %ThrowIteratorResultNotAnObject(result)
+ Expression* result_proxy_again = factory()->NewVariableProxy(result);
+ ZoneList<Expression*>* throw_arguments =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ throw_arguments->Add(result_proxy_again, zone());
+ Expression* throw_call = factory()->NewCallRuntime(
+ ast_value_factory()->throw_iterator_result_not_an_object_string(),
+ Runtime::FunctionForId(Runtime::kThrowIteratorResultNotAnObject),
+ throw_arguments, pos);
+
+ return factory()->NewBinaryOperation(
+ Token::AND,
+ factory()->NewUnaryOperation(Token::NOT, is_spec_object_call, pos),
+ throw_call, pos);
+}
+
+
void Parser::InitializeForEachStatement(ForEachStatement* stmt,
Expression* each,
Expression* subject,
@@ -3222,41 +3197,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
{
// result = iterator.next()
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- Expression* next_literal = factory()->NewStringLiteral(
- ast_value_factory()->next_string(), RelocInfo::kNoPosition);
- Expression* next_property = factory()->NewProperty(
- iterator_proxy, next_literal, RelocInfo::kNoPosition);
- ZoneList<Expression*>* next_arguments =
- new (zone()) ZoneList<Expression*>(0, zone());
- Expression* next_call = factory()->NewCall(next_property, next_arguments,
- subject->position());
- Expression* result_proxy = factory()->NewVariableProxy(result);
- next_result = factory()->NewAssignment(Token::ASSIGN, result_proxy,
- next_call, subject->position());
-
- // %_IsSpecObject(...)
- ZoneList<Expression*>* is_spec_object_args =
- new (zone()) ZoneList<Expression*>(1, zone());
- is_spec_object_args->Add(next_result, zone());
- Expression* is_spec_object_call = factory()->NewCallRuntime(
- ast_value_factory()->is_spec_object_string(),
- Runtime::FunctionForId(Runtime::kInlineIsSpecObject),
- is_spec_object_args, subject->position());
-
- // %ThrowIteratorResultNotAnObject(result)
- Expression* result_proxy_again = factory()->NewVariableProxy(result);
- ZoneList<Expression*>* throw_arguments =
- new (zone()) ZoneList<Expression*>(1, zone());
- throw_arguments->Add(result_proxy_again, zone());
- Expression* throw_call = factory()->NewCallRuntime(
- ast_value_factory()->throw_iterator_result_not_an_object_string(),
- Runtime::FunctionForId(Runtime::kThrowIteratorResultNotAnObject),
- throw_arguments, subject->position());
-
- next_result = factory()->NewBinaryOperation(
- Token::AND, factory()->NewUnaryOperation(
- Token::NOT, is_spec_object_call, subject->position()),
- throw_call, subject->position());
+ next_result =
+ BuildIteratorNextResult(iterator_proxy, result, subject->position());
}
// result.done
@@ -3276,7 +3218,7 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
Expression* result_value = factory()->NewProperty(
result_proxy, value_literal, RelocInfo::kNoPosition);
assign_each = factory()->NewAssignment(Token::ASSIGN, each, result_value,
- each->position());
+ RelocInfo::kNoPosition);
}
for_of->Initialize(each, subject, body,
@@ -3309,14 +3251,21 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// let/const x = i;
// temp_x = x;
// first = 1;
+ // undefined;
// outer: for (;;) {
- // let/const x = temp_x;
- // if (first == 1) {
- // first = 0;
- // } else {
- // next;
+ // { // This block's only function is to ensure that the statements it
+ // // contains do not affect the normal completion value. This is
+ // // accomplished by setting its ignore_completion_value bit.
+ // // No new lexical scope is introduced, so lexically scoped variables
+ // // declared here will be scoped to the outer for loop.
+ // let/const x = temp_x;
+ // if (first == 1) {
+ // first = 0;
+ // } else {
+ // next;
+ // }
+ // flag = 1;
// }
- // flag = 1;
// labels: for (; flag == 1; flag = 0, temp_x = x) {
// if (cond) {
// body
@@ -3369,6 +3318,13 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
outer_block->AddStatement(assignment_statement, zone());
}
+ // make statement: undefined;
+ outer_block->AddStatement(
+ factory()->NewExpressionStatement(
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
+
// Make statement: outer: for (;;)
// Note that we don't actually create the label, or set this loop up as an
// explicit break target, instead handing it directly to those nodes that
@@ -3381,10 +3337,11 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
outer_block->set_scope(for_scope);
scope_ = inner_scope;
- Block* inner_block = factory()->NewBlock(NULL, names->length() + 4, false,
- RelocInfo::kNoPosition);
+ Block* inner_block =
+ factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
+ Block* ignore_completion_block = factory()->NewBlock(
+ NULL, names->length() + 2, true, RelocInfo::kNoPosition);
ZoneList<Variable*> inner_vars(names->length(), zone());
-
// For each let variable x:
// make statement: let/const x = temp_x.
VariableMode mode = is_const ? CONST : LET;
@@ -3392,7 +3349,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
VariableProxy* proxy = NewUnresolved(names->at(i), mode);
Declaration* declaration = factory()->NewVariableDeclaration(
proxy, mode, scope_, RelocInfo::kNoPosition);
- Declare(declaration, true, CHECK_OK);
+ Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
inner_vars.Add(declaration->proxy()->var(), zone());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
Assignment* assignment =
@@ -3400,8 +3357,9 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
proxy, temp_proxy, RelocInfo::kNoPosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
+ DCHECK(init->position() != RelocInfo::kNoPosition);
proxy->var()->set_initializer_position(init->position());
- inner_block->AddStatement(assignment_statement, zone());
+ ignore_completion_block->AddStatement(assignment_statement, zone());
}
// Make statement: if (first == 1) { first = 0; } else { next; }
@@ -3427,7 +3385,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
}
Statement* clear_first_or_next = factory()->NewIfStatement(
compare, clear_first, next, RelocInfo::kNoPosition);
- inner_block->AddStatement(clear_first_or_next, zone());
+ ignore_completion_block->AddStatement(clear_first_or_next, zone());
}
Variable* flag = scope_->DeclarationScope()->NewTemporary(temp_name);
@@ -3439,9 +3397,9 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
- inner_block->AddStatement(assignment_statement, zone());
+ ignore_completion_block->AddStatement(assignment_statement, zone());
}
-
+ inner_block->AddStatement(ignore_completion_block, zone());
// Make cond expression for main loop: flag == 1.
Expression* flag_cond = NULL;
{
@@ -3489,7 +3447,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
}
// Make statement: labels: for (; flag == 1; flag = 0, temp_x = x)
- // Note that we re-use the original loop node, which retains it labels
+ // Note that we re-use the original loop node, which retains its labels
// and ensures that any break or continue statements in body point to
// the right place.
loop->Initialize(NULL, flag_cond, compound_next_statement, body_or_stop);
@@ -3536,21 +3494,18 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Scope* saved_scope = scope_;
Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
scope_ = for_scope;
-
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
for_scope->set_start_position(scanner()->location().beg_pos);
bool is_let_identifier_expression = false;
+ DeclarationParsingResult parsing_result;
if (peek() != Token::SEMICOLON) {
- if (peek() == Token::VAR ||
- (peek() == Token::CONST && is_sloppy(language_mode()))) {
- const AstRawString* name = NULL;
- Scanner::Location first_initializer_loc = Scanner::Location::invalid();
- Scanner::Location bindings_loc = Scanner::Location::invalid();
- int num_decl;
- Block* variable_statement = ParseVariableDeclarations(
- kForStatement, &num_decl, nullptr, &name, &first_initializer_loc,
- &bindings_loc, CHECK_OK);
+ if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
+ (peek() == Token::LET && is_strict(language_mode()))) {
+ ParseVariableDeclarations(kForStatement, &parsing_result, CHECK_OK);
+ is_const = parsing_result.descriptor.mode == CONST;
+
+ int num_decl = parsing_result.declarations.length();
bool accept_IN = num_decl >= 1;
bool accept_OF = true;
ForEachStatement::VisitMode mode;
@@ -3563,133 +3518,151 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
const char* loop_type =
mode == ForEachStatement::ITERATE ? "for-of" : "for-in";
ParserTraits::ReportMessageAt(
- bindings_loc, "for_inof_loop_multi_bindings", loop_type);
+ parsing_result.bindings_loc,
+ MessageTemplate::kForInOfLoopMultiBindings, loop_type);
*ok = false;
return nullptr;
}
- if (first_initializer_loc.IsValid() &&
+ if (parsing_result.first_initializer_loc.IsValid() &&
(is_strict(language_mode()) || mode == ForEachStatement::ITERATE)) {
if (mode == ForEachStatement::ITERATE) {
- ReportMessageAt(first_initializer_loc, "for_of_loop_initializer");
+ ReportMessageAt(parsing_result.first_initializer_loc,
+ MessageTemplate::kForOfLoopInitializer);
} else {
// TODO(caitp): This should be an error in sloppy mode too.
- ReportMessageAt(first_initializer_loc, "for_in_loop_initializer");
+ ReportMessageAt(parsing_result.first_initializer_loc,
+ MessageTemplate::kForInLoopInitializer);
}
*ok = false;
return nullptr;
}
- ForEachStatement* loop =
- factory()->NewForEachStatement(mode, labels, stmt_pos);
- Target target(&this->target_stack_, loop);
-
- Expression* enumerable = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- VariableProxy* each =
- scope_->NewUnresolved(factory(), name, each_beg_pos, each_end_pos);
- Statement* body = ParseSubStatement(NULL, CHECK_OK);
- InitializeForEachStatement(loop, each, enumerable, body);
- Block* result =
- factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
- result->AddStatement(variable_statement, zone());
- result->AddStatement(loop, zone());
- scope_ = saved_scope;
- for_scope->set_end_position(scanner()->location().end_pos);
- for_scope = for_scope->FinalizeBlockScope();
- DCHECK(for_scope == NULL);
- // Parsed for-in loop w/ variable/const declaration.
- return result;
- } else {
- init = variable_statement;
- }
- } else if ((peek() == Token::LET || peek() == Token::CONST) &&
- is_strict(language_mode())) {
- is_const = peek() == Token::CONST;
- const AstRawString* name = NULL;
- Scanner::Location first_initializer_loc = Scanner::Location::invalid();
- Scanner::Location bindings_loc = Scanner::Location::invalid();
- int num_decl;
- Block* variable_statement = ParseVariableDeclarations(
- kForStatement, &num_decl, &lexical_bindings, &name,
- &first_initializer_loc, &bindings_loc, CHECK_OK);
- bool accept_IN = num_decl >= 1;
- bool accept_OF = true;
- ForEachStatement::VisitMode mode;
- int each_beg_pos = scanner()->location().beg_pos;
- int each_end_pos = scanner()->location().end_pos;
-
- if (accept_IN && CheckInOrOf(accept_OF, &mode, ok)) {
- if (!*ok) return nullptr;
- if (num_decl != 1) {
- const char* loop_type =
- mode == ForEachStatement::ITERATE ? "for-of" : "for-in";
- ParserTraits::ReportMessageAt(
- bindings_loc, "for_inof_loop_multi_bindings", loop_type);
- *ok = false;
- return nullptr;
- }
- if (first_initializer_loc.IsValid() &&
- (is_strict(language_mode()) || mode == ForEachStatement::ITERATE)) {
- if (mode == ForEachStatement::ITERATE) {
- ReportMessageAt(first_initializer_loc, "for_of_loop_initializer");
- } else {
- ReportMessageAt(first_initializer_loc, "for_in_loop_initializer");
- }
- *ok = false;
- return nullptr;
+ DCHECK(parsing_result.declarations.length() == 1);
+ Block* init_block = nullptr;
+
+ // special case for legacy for (var/const x =.... in)
+ if (!IsLexicalVariableMode(parsing_result.descriptor.mode) &&
+ parsing_result.declarations[0].initializer != nullptr) {
+ VariableProxy* single_var = scope_->NewUnresolved(
+ factory(), parsing_result.SingleName(), Variable::NORMAL,
+ each_beg_pos, each_end_pos);
+ init_block = factory()->NewBlock(
+ nullptr, 2, true, parsing_result.descriptor.declaration_pos);
+ init_block->AddStatement(
+ factory()->NewExpressionStatement(
+ factory()->NewAssignment(
+ Token::ASSIGN, single_var,
+ parsing_result.declarations[0].initializer,
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
}
- // Rewrite a for-in statement of the form
+
+ // Rewrite a for-in/of statement of the form
//
- // for (let/const x in e) b
+ // for (let/const/var x in/of e) b
//
// into
//
- // <let x' be a temporary variable>
- // for (x' in e) {
- // let/const x;
- // x = x';
- // b;
+ // {
+ // <let x' be a temporary variable>
+ // for (x' in/of e) {
+ // let/const/var x;
+ // x = x';
+ // b;
+ // }
+ // let x; // for TDZ
// }
- // TODO(keuchel): Move the temporary variable to the block scope, after
- // implementing stack allocated block scoped variables.
Variable* temp = scope_->DeclarationScope()->NewTemporary(
ast_value_factory()->dot_for_string());
- VariableProxy* temp_proxy =
- factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
Target target(&this->target_stack_, loop);
- // The expression does not see the loop variable.
- scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
- scope_ = for_scope;
+
Expect(Token::RPAREN, CHECK_OK);
- VariableProxy* each =
- scope_->NewUnresolved(factory(), name, each_beg_pos, each_end_pos);
+ Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
+ body_scope->set_start_position(scanner()->location().beg_pos);
+ scope_ = body_scope;
+
Statement* body = ParseSubStatement(NULL, CHECK_OK);
+
Block* body_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
- Token::Value init_op = is_const ? Token::INIT_CONST : Token::ASSIGN;
- Assignment* assignment = factory()->NewAssignment(
- init_op, each, temp_proxy, RelocInfo::kNoPosition);
- Statement* assignment_statement = factory()->NewExpressionStatement(
- assignment, RelocInfo::kNoPosition);
- body_block->AddStatement(variable_statement, zone());
- body_block->AddStatement(assignment_statement, zone());
+
+ auto each_initialization_block =
+ factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
+ {
+ DCHECK(parsing_result.declarations.length() == 1);
+ DeclarationParsingResult::Declaration decl =
+ parsing_result.declarations[0];
+ auto descriptor = parsing_result.descriptor;
+ descriptor.declaration_pos = RelocInfo::kNoPosition;
+ descriptor.initialization_pos = RelocInfo::kNoPosition;
+ decl.initializer = factory()->NewVariableProxy(temp);
+
+ PatternRewriter::DeclareAndInitializeVariables(
+ each_initialization_block, &descriptor, &decl,
+ IsLexicalVariableMode(descriptor.mode) ? &lexical_bindings
+ : nullptr,
+ CHECK_OK);
+ }
+
+ body_block->AddStatement(each_initialization_block, zone());
body_block->AddStatement(body, zone());
+ VariableProxy* temp_proxy =
+ factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
+ scope_ = for_scope;
+ body_scope->set_end_position(scanner()->location().end_pos);
+ body_scope = body_scope->FinalizeBlockScope();
+ if (body_scope != nullptr) {
+ body_block->set_scope(body_scope);
+ }
+
+ // Create a TDZ for any lexically-bound names.
+ if (IsLexicalVariableMode(parsing_result.descriptor.mode)) {
+ DCHECK_NULL(init_block);
+
+ init_block =
+ factory()->NewBlock(nullptr, 1, false, RelocInfo::kNoPosition);
+
+ for (int i = 0; i < lexical_bindings.length(); ++i) {
+ // TODO(adamk): This needs to be some sort of special
+ // INTERNAL variable that's invisible to the debugger
+ // but visible to everything else.
+ VariableProxy* tdz_proxy = NewUnresolved(lexical_bindings[i], LET);
+ Declaration* tdz_decl = factory()->NewVariableDeclaration(
+ tdz_proxy, LET, scope_, RelocInfo::kNoPosition);
+ Variable* tdz_var = Declare(tdz_decl, DeclarationDescriptor::NORMAL,
+ true, CHECK_OK);
+ tdz_var->set_initializer_position(position());
+ }
+ }
+
scope_ = saved_scope;
for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
- body_block->set_scope(for_scope);
- // Parsed for-in loop w/ let declaration.
- return loop;
-
+ // Parsed for-in loop w/ variable declarations.
+ if (init_block != nullptr) {
+ init_block->AddStatement(loop, zone());
+ if (for_scope != nullptr) {
+ init_block->set_scope(for_scope);
+ }
+ return init_block;
+ } else {
+ DCHECK_NULL(for_scope);
+ return loop;
+ }
} else {
- init = variable_statement;
+ init = parsing_result.BuildInitializationBlock(
+ IsLexicalVariableMode(parsing_result.descriptor.mode)
+ ? &lexical_bindings
+ : nullptr,
+ CHECK_OK);
}
} else {
Scanner::Location lhs_location = scanner()->peek_location();
@@ -3697,14 +3670,15 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
ForEachStatement::VisitMode mode;
bool accept_OF = expression->IsVariableProxy();
is_let_identifier_expression =
- expression->IsVariableProxy() &&
- expression->AsVariableProxy()->raw_name() ==
- ast_value_factory()->let_string();
+ expression->IsVariableProxy() &&
+ expression->AsVariableProxy()->raw_name() ==
+ ast_value_factory()->let_string();
if (CheckInOrOf(accept_OF, &mode, ok)) {
if (!*ok) return nullptr;
expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_location, "invalid_lhs_in_for", CHECK_OK);
+ expression, lhs_location, MessageTemplate::kInvalidLhsInFor,
+ CHECK_OK);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
@@ -3723,7 +3697,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
return loop;
} else {
- init = factory()->NewExpressionStatement(expression, position());
+ init =
+ factory()->NewExpressionStatement(expression, lhs_location.beg_pos);
}
}
}
@@ -3736,7 +3711,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// Detect attempts at 'let' declarations in sloppy mode.
if (peek() == Token::IDENTIFIER && is_sloppy(language_mode()) &&
is_let_identifier_expression) {
- ReportMessage("sloppy_lexical", NULL);
+ ReportMessage(MessageTemplate::kSloppyLexical, NULL);
*ok = false;
return NULL;
}
@@ -3863,103 +3838,80 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
}
-void ParserTraits::DeclareArrowFunctionParameters(
- Scope* scope, Expression* expr, const Scanner::Location& params_loc,
- FormalParameterErrorLocations* error_locs, bool* ok) {
- if (scope->num_parameters() >= Code::kMaxArguments) {
- ReportMessageAt(params_loc, "malformed_arrow_function_parameter_list");
+void ParserTraits::ParseArrowFunctionFormalParameters(
+ ParserFormalParameterParsingState* parsing_state, Expression* expr,
+ const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
+ bool* ok) {
+ if (parsing_state->scope->num_parameters() >= Code::kMaxArguments) {
+ ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
*ok = false;
return;
}
// ArrowFunctionFormals ::
- // Binary(Token::COMMA, ArrowFunctionFormals, VariableProxy)
+ // Binary(Token::COMMA, NonTailArrowFunctionFormals, Tail)
+ // Tail
+ // NonTailArrowFunctionFormals ::
+ // Binary(Token::COMMA, NonTailArrowFunctionFormals, VariableProxy)
// VariableProxy
+ // Tail ::
+ // VariableProxy
+ // Spread(VariableProxy)
//
// As we need to visit the parameters in left-to-right order, we recurse on
// the left-hand side of comma expressions.
//
- // Sadly, for the various malformed_arrow_function_parameter_list errors, we
- // can't be more specific on the error message or on the location because we
- // need to match the pre-parser's behavior.
if (expr->IsBinaryOperation()) {
BinaryOperation* binop = expr->AsBinaryOperation();
- if (binop->op() != Token::COMMA) {
- ReportMessageAt(params_loc, "malformed_arrow_function_parameter_list");
- *ok = false;
- return;
- }
+ // The classifier has already run, so we know that the expression is a valid
+ // arrow function formals production.
+ DCHECK_EQ(binop->op(), Token::COMMA);
Expression* left = binop->left();
Expression* right = binop->right();
- if (left->is_single_parenthesized() || right->is_single_parenthesized()) {
- ReportMessageAt(params_loc, "malformed_arrow_function_parameter_list");
- *ok = false;
- return;
- }
- DeclareArrowFunctionParameters(scope, left, params_loc, error_locs, ok);
+ ParseArrowFunctionFormalParameters(parsing_state, left, params_loc,
+ duplicate_loc, ok);
if (!*ok) return;
// LHS of comma expression should be unparenthesized.
expr = right;
}
- // TODO(wingo): Support rest parameters.
- if (!expr->IsVariableProxy()) {
- ReportMessageAt(params_loc, "malformed_arrow_function_parameter_list");
- *ok = false;
- return;
- }
-
- const AstRawString* raw_name = expr->AsVariableProxy()->raw_name();
- Scanner::Location param_location(expr->position(),
- expr->position() + raw_name->length());
+ // Only the right-most expression may be a rest parameter.
+ DCHECK(!parsing_state->has_rest);
- if (expr->AsVariableProxy()->is_this()) {
- ReportMessageAt(param_location, "this_formal_parameter");
- *ok = false;
- return;
+ bool is_rest = false;
+ if (expr->IsSpread()) {
+ is_rest = true;
+ expr = expr->AsSpread()->expression();
}
- if (!error_locs->eval_or_arguments.IsValid() && IsEvalOrArguments(raw_name))
- error_locs->eval_or_arguments = param_location;
- if (!error_locs->reserved.IsValid() && IsFutureStrictReserved(raw_name))
- error_locs->reserved = param_location;
- if (!error_locs->undefined.IsValid() && IsUndefined(raw_name))
- error_locs->undefined = param_location;
-
- // When the formal parameter was originally seen, it was parsed as a
- // VariableProxy and recorded as unresolved in the scope. Here we undo that
- // parse-time side-effect.
- parser_->scope_->RemoveUnresolved(expr->AsVariableProxy());
+ if (expr->IsVariableProxy()) {
+ // When the formal parameter was originally seen, it was parsed as a
+ // VariableProxy and recorded as unresolved in the scope. Here we undo that
+ // parse-time side-effect for parameters that are single-names (not
+ // patterns; for patterns that happens uniformly in
+ // PatternRewriter::VisitVariableProxy).
+ parser_->scope_->RemoveUnresolved(expr->AsVariableProxy());
+ }
- bool is_rest = false;
- bool is_duplicate = DeclareFormalParameter(scope, raw_name, is_rest);
-
- if (is_duplicate) {
- // Arrow function parameter lists are parsed as StrictFormalParameters,
- // which means that they cannot have duplicates. Note that this is a subset
- // of the restrictions placed on parameters to functions whose body is
- // strict.
- ReportMessageAt(param_location,
- "duplicate_arrow_function_formal_parameter");
- *ok = false;
- return;
+ ExpressionClassifier classifier;
+ DeclareFormalParameter(parsing_state, expr, &classifier, is_rest);
+ if (!duplicate_loc->IsValid()) {
+ *duplicate_loc = classifier.duplicate_formal_parameter_error().location;
}
}
-void ParserTraits::ParseArrowFunctionFormalParameters(
- Scope* scope, Expression* params, const Scanner::Location& params_loc,
- FormalParameterErrorLocations* error_locs, bool* is_rest, bool* ok) {
- // Too many parentheses around expression:
- // (( ... )) => ...
- if (params->is_multi_parenthesized()) {
- // TODO(wingo): Make a better message.
- ReportMessageAt(params_loc, "malformed_arrow_function_parameter_list");
- *ok = false;
- return;
- }
+void ParserTraits::ReindexLiterals(
+ const ParserFormalParameterParsingState& parsing_state) {
+ if (parser_->function_state_->materialized_literal_count() > 0) {
+ AstLiteralReindexer reindexer;
- DeclareArrowFunctionParameters(scope, params, params_loc, error_locs, ok);
+ for (const auto p : parsing_state.params) {
+ if (p.pattern != nullptr) reindexer.Reindex(p.pattern);
+ }
+ DCHECK(reindexer.count() <=
+ parser_->function_state_->materialized_literal_count());
+ }
}
@@ -3967,7 +3919,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
const AstRawString* function_name, Scanner::Location function_name_location,
bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
//
@@ -4023,16 +3976,17 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope = function_type == FunctionLiteral::DECLARATION &&
- is_sloppy(language_mode()) &&
+ is_sloppy(language_mode) &&
(original_scope_ == original_declaration_scope ||
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
: NewScope(scope_, FUNCTION_SCOPE, kind);
+ scope->SetLanguageMode(language_mode);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
- int handler_count = 0;
- FormalParameterErrorLocations error_locs;
+ DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+ ExpressionClassifier formals_classifier(&duplicate_finder);
FunctionLiteral::EagerCompileHint eager_compile_hint =
parenthesized_function_ ? FunctionLiteral::kShouldEagerCompile
: FunctionLiteral::kShouldLazyCompile;
@@ -4058,18 +4012,18 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_state.set_generator_object_variable(temp);
}
- bool has_rest = false;
Expect(Token::LPAREN, CHECK_OK);
int start_position = scanner()->location().beg_pos;
scope_->set_start_position(start_position);
+ ParserFormalParameterParsingState parsing_state(scope);
num_parameters =
- ParseFormalParameterList(scope, &error_locs, &has_rest, CHECK_OK);
+ ParseFormalParameterList(&parsing_state, &formals_classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
int formals_end_position = scanner()->location().end_pos;
- CheckArityRestrictions(num_parameters, arity_restriction, start_position,
+ CheckArityRestrictions(num_parameters, arity_restriction,
+ parsing_state.has_rest, start_position,
formals_end_position, CHECK_OK);
-
Expect(Token::LBRACE, CHECK_OK);
// If we have a named function expression, we add a local variable
@@ -4081,11 +4035,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Variable* fvar = NULL;
Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
- if (is_strict(language_mode())) {
+ if (is_strict(language_mode)) {
fvar_init_op = Token::INIT_CONST;
}
- VariableMode fvar_mode =
- is_strict(language_mode()) ? CONST : CONST_LEGACY;
+ VariableMode fvar_mode = is_strict(language_mode) ? CONST : CONST_LEGACY;
DCHECK(function_name != NULL);
fvar = new (zone())
Variable(scope_, function_name, fvar_mode, Variable::NORMAL,
@@ -4129,9 +4082,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// To make this additional case work, both Parser and PreParser implement a
// logic where only top-level functions will be parsed lazily.
- bool is_lazily_parsed = (mode() == PARSE_LAZILY &&
- scope_->AllowsLazyCompilation() &&
- !parenthesized_function_);
+ bool is_lazily_parsed = mode() == PARSE_LAZILY &&
+ scope_->AllowsLazyParsing() &&
+ !parenthesized_function_;
parenthesized_function_ = false; // The bit was set for this function only.
// Eager or lazy parse?
@@ -4141,11 +4094,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// try to lazy parse in the first place, we'll have to parse eagerly.
Scanner::BookmarkScope bookmark(scanner());
if (is_lazily_parsed) {
- for (Scope* s = scope_->outer_scope();
- s != nullptr && (s != s->DeclarationScope()); s = s->outer_scope()) {
- s->ForceContextAllocation();
- }
-
Scanner::BookmarkScope* maybe_bookmark =
bookmark.Set() ? &bookmark : nullptr;
SkipLazyFunctionBody(&materialized_literal_count,
@@ -4164,47 +4112,55 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
}
if (!is_lazily_parsed) {
- body = ParseEagerFunctionBody(function_name, pos, fvar, fvar_init_op,
- kind, CHECK_OK);
+ body = ParseEagerFunctionBody(function_name, pos, parsing_state, fvar,
+ fvar_init_op, kind, CHECK_OK);
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
- handler_count = function_state.handler_count();
+ }
- if (is_strong(language_mode()) && IsSubclassConstructor(kind)) {
- if (!function_state.super_location().IsValid()) {
- ReportMessageAt(function_name_location,
- "strong_super_call_missing", kReferenceError);
- *ok = false;
- return nullptr;
- }
+ // Parsing the body may change the language mode in our scope.
+ language_mode = scope->language_mode();
+
+ if (is_strong(language_mode) && IsSubclassConstructor(kind)) {
+ if (!function_state.super_location().IsValid()) {
+ ReportMessageAt(function_name_location,
+ MessageTemplate::kStrongSuperCallMissing,
+ kReferenceError);
+ *ok = false;
+ return nullptr;
}
}
// Validate name and parameter names. We can do this only after parsing the
// function, since the function can declare itself strict.
- CheckFunctionName(language_mode(), kind, function_name,
+ CheckFunctionName(language_mode, kind, function_name,
name_is_strict_reserved, function_name_location,
CHECK_OK);
- const bool use_strict_params = has_rest || IsConciseMethod(kind);
- CheckFunctionParameterNames(language_mode(), use_strict_params, error_locs,
- CHECK_OK);
-
- if (is_strict(language_mode())) {
+ const bool use_strict_params =
+ !parsing_state.is_simple_parameter_list || IsConciseMethod(kind);
+ const bool allow_duplicate_parameters =
+ is_sloppy(language_mode) && !use_strict_params;
+ ValidateFormalParameters(&formals_classifier, language_mode,
+ allow_duplicate_parameters, CHECK_OK);
+
+ if (is_strict(language_mode)) {
CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
CHECK_OK);
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
}
+ bool has_duplicate_parameters =
+ !formals_classifier.is_valid_formal_parameter_list_without_duplicates();
FunctionLiteral::ParameterFlag duplicate_parameters =
- error_locs.duplicate.IsValid() ? FunctionLiteral::kHasDuplicateParameters
- : FunctionLiteral::kNoDuplicateParameters;
+ has_duplicate_parameters ? FunctionLiteral::kHasDuplicateParameters
+ : FunctionLiteral::kNoDuplicateParameters;
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
function_name, ast_value_factory(), scope, body,
- materialized_literal_count, expected_property_count, handler_count,
- num_parameters, duplicate_parameters, function_type,
- FunctionLiteral::kIsFunction, eager_compile_hint, kind, pos);
+ materialized_literal_count, expected_property_count, num_parameters,
+ duplicate_parameters, function_type, FunctionLiteral::kIsFunction,
+ eager_compile_hint, kind, pos);
function_literal->set_function_token_position(function_token_pos);
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
@@ -4247,6 +4203,7 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
*expected_property_count = entry.property_count();
scope_->SetLanguageMode(entry.language_mode());
if (entry.uses_super_property()) scope_->RecordSuperPropertyUsage();
+ if (entry.calls_eval()) scope_->RecordEvalCall();
return;
}
cached_parse_data_->Reject();
@@ -4281,16 +4238,19 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
*materialized_literal_count = logger.literals();
*expected_property_count = logger.properties();
scope_->SetLanguageMode(logger.language_mode());
- if (logger.scope_uses_super_property()) {
+ if (logger.uses_super_property()) {
scope_->RecordSuperPropertyUsage();
}
+ if (logger.calls_eval()) {
+ scope_->RecordEvalCall();
+ }
if (produce_cached_parse_data()) {
DCHECK(log_);
// Position right after terminal '}'.
int body_end = scanner()->location().end_pos;
log_->LogFunction(function_block_pos, body_end, *materialized_literal_count,
*expected_property_count, scope_->language_mode(),
- scope_->uses_super_property());
+ scope_->uses_super_property(), scope_->calls_eval());
}
}
@@ -4313,8 +4273,66 @@ void Parser::AddAssertIsConstruct(ZoneList<Statement*>* body, int pos) {
}
+Statement* Parser::BuildAssertIsCoercible(Variable* var) {
+ // if (var === null || var === undefined)
+ // throw /* type error kNonCoercible) */;
+
+ Expression* condition = factory()->NewBinaryOperation(
+ Token::OR, factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(var),
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(var),
+ factory()->NewNullLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ Expression* throw_type_error = this->NewThrowTypeError(
+ MessageTemplate::kNonCoercible, ast_value_factory()->empty_string(),
+ RelocInfo::kNoPosition);
+ IfStatement* if_statement = factory()->NewIfStatement(
+ condition, factory()->NewExpressionStatement(throw_type_error,
+ RelocInfo::kNoPosition),
+ factory()->NewEmptyStatement(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ return if_statement;
+}
+
+
+Block* Parser::BuildParameterInitializationBlock(
+ const ParserFormalParameterParsingState& formal_parameters, bool* ok) {
+ DCHECK(scope_->is_function_scope());
+ Block* init_block = nullptr;
+ for (auto parameter : formal_parameters.params) {
+ if (parameter.pattern == nullptr) continue;
+ if (init_block == nullptr) {
+ init_block = factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
+ }
+
+ DeclarationDescriptor descriptor;
+ descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
+ descriptor.parser = this;
+ descriptor.declaration_scope = scope_;
+ descriptor.scope = scope_;
+ descriptor.mode = LET;
+ descriptor.is_const = false;
+ descriptor.needs_init = true;
+ descriptor.declaration_pos = parameter.pattern->position();
+ descriptor.initialization_pos = parameter.pattern->position();
+ descriptor.init_op = Token::INIT_LET;
+ DeclarationParsingResult::Declaration decl(
+ parameter.pattern, parameter.pattern->position(),
+ factory()->NewVariableProxy(parameter.var));
+ PatternRewriter::DeclareAndInitializeVariables(init_block, &descriptor,
+ &decl, nullptr, CHECK_OK);
+ }
+ return init_block;
+}
+
+
ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
- const AstRawString* function_name, int pos, Variable* fvar,
+ const AstRawString* function_name, int pos,
+ const ParserFormalParameterParsingState& formal_parameters, Variable* fvar,
Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
// Everything inside an eagerly parsed function will be parsed eagerly
// (see comment above).
@@ -4338,6 +4356,12 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
AddAssertIsConstruct(body, pos);
}
+ auto init_block =
+ BuildParameterInitializationBlock(formal_parameters, CHECK_OK);
+ if (init_block != nullptr) {
+ body->Add(init_block, zone());
+ }
+
// For generators, allocate and yield an iterator on function entry.
if (IsGeneratorFunction(kind)) {
ZoneList<Expression*>* arguments =
@@ -4399,24 +4423,20 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
reusable_preparser_ = new PreParser(zone(), &scanner_, ast_value_factory(),
NULL, stack_limit_);
reusable_preparser_->set_allow_lazy(true);
- reusable_preparser_->set_allow_natives(allow_natives());
- reusable_preparser_->set_allow_harmony_modules(allow_harmony_modules());
- reusable_preparser_->set_allow_harmony_arrow_functions(
- allow_harmony_arrow_functions());
- reusable_preparser_->set_allow_harmony_classes(allow_harmony_classes());
- reusable_preparser_->set_allow_harmony_object_literals(
- allow_harmony_object_literals());
- reusable_preparser_->set_allow_harmony_sloppy(allow_harmony_sloppy());
- reusable_preparser_->set_allow_harmony_unicode(allow_harmony_unicode());
- reusable_preparser_->set_allow_harmony_computed_property_names(
- allow_harmony_computed_property_names());
- reusable_preparser_->set_allow_harmony_rest_params(
- allow_harmony_rest_params());
- reusable_preparser_->set_allow_harmony_spreadcalls(
- allow_harmony_spreadcalls());
- reusable_preparser_->set_allow_harmony_destructuring(
- allow_harmony_destructuring());
- reusable_preparser_->set_allow_strong_mode(allow_strong_mode());
+#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
+ SET_ALLOW(natives);
+ SET_ALLOW(harmony_modules);
+ SET_ALLOW(harmony_arrow_functions);
+ SET_ALLOW(harmony_sloppy);
+ SET_ALLOW(harmony_unicode);
+ SET_ALLOW(harmony_computed_property_names);
+ SET_ALLOW(harmony_rest_params);
+ SET_ALLOW(harmony_spreadcalls);
+ SET_ALLOW(harmony_destructuring);
+ SET_ALLOW(harmony_spread_arrays);
+ SET_ALLOW(harmony_new_target);
+ SET_ALLOW(strong_mode);
+#undef SET_ALLOW
}
PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
language_mode(), function_state_->kind(), logger, bookmark);
@@ -4433,26 +4453,23 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
bool* ok) {
// All parts of a ClassDeclaration and ClassExpression are strict code.
if (name_is_strict_reserved) {
- ReportMessageAt(class_name_location, "unexpected_strict_reserved");
+ ReportMessageAt(class_name_location,
+ MessageTemplate::kUnexpectedStrictReserved);
*ok = false;
return NULL;
}
if (IsEvalOrArguments(name)) {
- ReportMessageAt(class_name_location, "strict_eval_arguments");
+ ReportMessageAt(class_name_location, MessageTemplate::kStrictEvalArguments);
*ok = false;
return NULL;
}
if (is_strong(language_mode()) && IsUndefined(name)) {
- ReportMessageAt(class_name_location, "strong_undefined");
+ ReportMessageAt(class_name_location, MessageTemplate::kStrongUndefined);
*ok = false;
return NULL;
}
- // Create a block scope which is additionally tagged as class scope; this is
- // important for resolving variable references to the class name in the strong
- // mode.
Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
- block_scope->tag_as_class_scope();
BlockState block_state(&scope_, block_scope);
scope_->SetLanguageMode(
static_cast<LanguageMode>(scope_->language_mode() | STRICT_BIT));
@@ -4465,7 +4482,7 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
Declaration* declaration = factory()->NewVariableDeclaration(
proxy, CONST, block_scope, pos, is_class_declaration,
scope_->class_declaration_group_start());
- Declare(declaration, true, CHECK_OK);
+ Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
}
Expression* extends = NULL;
@@ -4517,8 +4534,8 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
int end_pos = scanner()->location().end_pos;
if (constructor == NULL) {
- constructor =
- DefaultConstructor(extends != NULL, block_scope, pos, end_pos);
+ constructor = DefaultConstructor(extends != NULL, block_scope, pos, end_pos,
+ block_scope->language_mode());
}
block_scope->set_end_position(end_pos);
@@ -4572,7 +4589,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
return args->at(0);
} else {
- ReportMessage("not_isvar");
+ ReportMessage(MessageTemplate::kNotIsvar);
*ok = false;
return NULL;
}
@@ -4582,14 +4599,14 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (function != NULL &&
function->nargs != -1 &&
function->nargs != args->length()) {
- ReportMessage("illegal_access");
+ ReportMessage(MessageTemplate::kIllegalAccess);
*ok = false;
return NULL;
}
// Check that the function is defined if it's an inline runtime call.
if (function == NULL && name->FirstCharacter() == '_') {
- ParserTraits::ReportMessage("not_defined", name);
+ ParserTraits::ReportMessage(MessageTemplate::kNotDefined, name);
*ok = false;
return NULL;
}
@@ -4614,7 +4631,8 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Scanner::Location location = position == RelocInfo::kNoPosition
? Scanner::Location::invalid()
: Scanner::Location(position, position + 1);
- ParserTraits::ReportMessageAt(location, "var_redeclaration", name);
+ ParserTraits::ReportMessageAt(location, MessageTemplate::kVarRedeclaration,
+ name);
*ok = false;
}
}
@@ -5756,11 +5774,12 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(4, zone());
args->Add(factory()->NewArrayLiteral(
const_cast<ZoneList<Expression*>*>(cooked_strings),
- cooked_idx, pos),
+ cooked_idx, is_strong(language_mode()), pos),
zone());
args->Add(
factory()->NewArrayLiteral(
- const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx, pos),
+ const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx,
+ is_strong(language_mode()), pos),
zone());
// Ensure hash is suitable as a Smi value
@@ -5851,6 +5870,7 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
}
int literal_index = function_state_->NextMaterializedLiteralIndex();
args->Add(factory()->NewArrayLiteral(unspread, literal_index,
+ is_strong(language_mode()),
RelocInfo::kNoPosition),
zone());
@@ -5881,13 +5901,14 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
Expression* Parser::SpreadCall(Expression* function,
ZoneList<v8::internal::Expression*>* args,
int pos) {
- if (function->IsSuperReference()) {
+ if (function->IsSuperCallReference()) {
// Super calls
+ // %_CallSuperWithSpread(%ReflectConstruct(<super>, args, new.target))
args->InsertAt(0, function, zone());
- args->Add(factory()->NewVariableProxy(scope_->new_target_var()), zone());
+ args->Add(function->AsSuperCallReference()->new_target_var(), zone());
Expression* result = factory()->NewCallRuntime(
ast_value_factory()->reflect_construct_string(), NULL, args, pos);
- args = new (zone()) ZoneList<Expression*>(0, zone());
+ args = new (zone()) ZoneList<Expression*>(1, zone());
args->Add(result, zone());
return factory()->NewCallRuntime(
ast_value_factory()->empty_string(),
@@ -5895,17 +5916,24 @@ Expression* Parser::SpreadCall(Expression* function,
} else {
if (function->IsProperty()) {
// Method calls
- Variable* temp =
- scope_->NewTemporary(ast_value_factory()->empty_string());
- VariableProxy* obj = factory()->NewVariableProxy(temp);
- Assignment* assign_obj = factory()->NewAssignment(
- Token::ASSIGN, obj, function->AsProperty()->obj(),
- RelocInfo::kNoPosition);
- function = factory()->NewProperty(
- assign_obj, function->AsProperty()->key(), RelocInfo::kNoPosition);
- args->InsertAt(0, function, zone());
- obj = factory()->NewVariableProxy(temp);
- args->InsertAt(1, obj, zone());
+ if (function->AsProperty()->IsSuperAccess()) {
+ Expression* home =
+ ThisExpression(scope_, factory(), RelocInfo::kNoPosition);
+ args->InsertAt(0, function, zone());
+ args->InsertAt(1, home, zone());
+ } else {
+ Variable* temp =
+ scope_->NewTemporary(ast_value_factory()->empty_string());
+ VariableProxy* obj = factory()->NewVariableProxy(temp);
+ Assignment* assign_obj = factory()->NewAssignment(
+ Token::ASSIGN, obj, function->AsProperty()->obj(),
+ RelocInfo::kNoPosition);
+ function = factory()->NewProperty(
+ assign_obj, function->AsProperty()->key(), RelocInfo::kNoPosition);
+ args->InsertAt(0, function, zone());
+ obj = factory()->NewVariableProxy(temp);
+ args->InsertAt(1, obj, zone());
+ }
} else {
// Non-method calls
args->InsertAt(0, function, zone());
@@ -5926,4 +5954,5 @@ Expression* Parser::SpreadCallNew(Expression* function,
return factory()->NewCallRuntime(
ast_value_factory()->reflect_construct_string(), NULL, args, pos);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 1a84ec4157..94aa28a1f9 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -216,6 +216,7 @@ class FunctionEntry BASE_EMBEDDED {
kPropertyCountIndex,
kLanguageModeIndex,
kUsesSuperPropertyIndex,
+ kCallsEvalIndex,
kSize
};
@@ -233,6 +234,7 @@ class FunctionEntry BASE_EMBEDDED {
return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
}
bool uses_super_property() { return backing_[kUsesSuperPropertyIndex]; }
+ bool calls_eval() { return backing_[kCallsEvalIndex]; }
bool is_valid() { return !backing_.is_empty(); }
@@ -536,6 +538,27 @@ class RegExpParser BASE_EMBEDDED {
class Parser;
class SingletonLogger;
+
+struct ParserFormalParameterParsingState
+ : public PreParserFormalParameterParsingState {
+ struct Parameter {
+ Parameter(Variable* var, Expression* pattern)
+ : var(var), pattern(pattern) {}
+ Variable* var;
+ Expression* pattern;
+ };
+
+ explicit ParserFormalParameterParsingState(Scope* scope)
+ : PreParserFormalParameterParsingState(scope), params(4, scope->zone()) {}
+
+ ZoneList<Parameter> params;
+
+ void AddParameter(Variable* var, Expression* pattern) {
+ params.Add(Parameter(var, pattern), scope->zone());
+ }
+};
+
+
class ParserTraits {
public:
struct Type {
@@ -558,7 +581,7 @@ class ParserTraits {
typedef ZoneList<v8::internal::Expression*>* ExpressionList;
typedef ZoneList<ObjectLiteral::Property*>* PropertyList;
typedef const v8::internal::AstRawString* FormalParameter;
- typedef Scope FormalParameterScope;
+ typedef ParserFormalParameterParsingState FormalParameterParsingState;
typedef ZoneList<v8::internal::Statement*>* StatementList;
// For constructing objects returned by the traversing functions.
@@ -659,33 +682,36 @@ class ParserTraits {
int pos, AstNodeFactory* factory);
// Generate AST node that throws a ReferenceError with the given type.
- Expression* NewThrowReferenceError(const char* type, int pos);
+ Expression* NewThrowReferenceError(MessageTemplate::Template message,
+ int pos);
// Generate AST node that throws a SyntaxError with the given
// type. The first argument may be null (in the handle sense) in
// which case no arguments are passed to the constructor.
- Expression* NewThrowSyntaxError(
- const char* type, const AstRawString* arg, int pos);
+ Expression* NewThrowSyntaxError(MessageTemplate::Template message,
+ const AstRawString* arg, int pos);
// Generate AST node that throws a TypeError with the given
// type. Both arguments must be non-null (in the handle sense).
- Expression* NewThrowTypeError(const char* type, const AstRawString* arg,
- int pos);
+ Expression* NewThrowTypeError(MessageTemplate::Template message,
+ const AstRawString* arg, int pos);
// Generic AST generator for throwing errors from compiled code.
- Expression* NewThrowError(
- const AstRawString* constructor, const char* type,
- const AstRawString* arg, int pos);
+ Expression* NewThrowError(Runtime::FunctionId function_id,
+ MessageTemplate::Template message,
+ const AstRawString* arg, int pos);
// Reporting errors.
- void ReportMessageAt(Scanner::Location source_location, const char* message,
+ void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate::Template message,
const char* arg = NULL,
ParseErrorType error_type = kSyntaxError);
- void ReportMessage(const char* message, const char* arg = NULL,
+ void ReportMessage(MessageTemplate::Template message, const char* arg = NULL,
ParseErrorType error_type = kSyntaxError);
- void ReportMessage(const char* message, const AstRawString* arg,
+ void ReportMessage(MessageTemplate::Template message, const AstRawString* arg,
ParseErrorType error_type = kSyntaxError);
- void ReportMessageAt(Scanner::Location source_location, const char* message,
+ void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate::Template message,
const AstRawString* arg,
ParseErrorType error_type = kSyntaxError);
@@ -721,10 +747,14 @@ class ParserTraits {
Expression* ThisExpression(Scope* scope, AstNodeFactory* factory,
int pos = RelocInfo::kNoPosition);
- Expression* SuperReference(Scope* scope, AstNodeFactory* factory,
- int pos = RelocInfo::kNoPosition);
+ Expression* SuperPropertyReference(Scope* scope, AstNodeFactory* factory,
+ int pos);
+ Expression* SuperCallReference(Scope* scope, AstNodeFactory* factory,
+ int pos);
+ Expression* NewTargetExpression(Scope* scope, AstNodeFactory* factory,
+ int pos);
Expression* DefaultConstructor(bool call_super, Scope* scope, int pos,
- int end_pos);
+ int end_pos, LanguageMode language_mode);
Literal* ExpressionFromLiteral(Token::Value token, int pos, Scanner* scanner,
AstNodeFactory* factory);
Expression* ExpressionFromIdentifier(const AstRawString* name,
@@ -742,29 +772,23 @@ class ParserTraits {
ZoneList<v8::internal::Statement*>* NewStatementList(int size, Zone* zone) {
return new(zone) ZoneList<v8::internal::Statement*>(size, zone);
}
+
+ V8_INLINE void AddParameterInitializationBlock(
+ const ParserFormalParameterParsingState& formal_parameters,
+ ZoneList<v8::internal::Statement*>* body, bool* ok);
+
V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type,
FunctionKind kind = kNormalFunction);
- bool DeclareFormalParameter(Scope* scope, const AstRawString* name,
- bool is_rest) {
- bool is_duplicate = false;
- Variable* var = scope->DeclareParameter(name, VAR, is_rest, &is_duplicate);
- if (is_sloppy(scope->language_mode())) {
- // TODO(sigurds) Mark every parameter as maybe assigned. This is a
- // conservative approximation necessary to account for parameters
- // that are assigned via the arguments array.
- var->set_maybe_assigned();
- }
- return is_duplicate;
- }
-
- void DeclareArrowFunctionParameters(Scope* scope, Expression* expr,
- const Scanner::Location& params_loc,
- FormalParameterErrorLocations* error_locs,
- bool* ok);
+ V8_INLINE void DeclareFormalParameter(
+ ParserFormalParameterParsingState* parsing_state, Expression* name,
+ ExpressionClassifier* classifier, bool is_rest);
void ParseArrowFunctionFormalParameters(
- Scope* scope, Expression* params, const Scanner::Location& params_loc,
- FormalParameterErrorLocations* error_locs, bool* is_rest, bool* ok);
+ ParserFormalParameterParsingState* scope, Expression* params,
+ const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
+ bool* ok);
+
+ void ReindexLiterals(const ParserFormalParameterParsingState& parsing_state);
// Temporary glue; these functions will move to ParserBase.
Expression* ParseV8Intrinsic(bool* ok);
@@ -772,13 +796,15 @@ class ParserTraits {
const AstRawString* name, Scanner::Location function_name_location,
bool name_is_strict_reserved, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok);
V8_INLINE void SkipLazyFunctionBody(
int* materialized_literal_count, int* expected_property_count, bool* ok,
Scanner::BookmarkScope* bookmark = nullptr);
V8_INLINE ZoneList<Statement*>* ParseEagerFunctionBody(
- const AstRawString* name, int pos, Variable* fvar,
- Token::Value fvar_init_op, FunctionKind kind, bool* ok);
+ const AstRawString* name, int pos,
+ const ParserFormalParameterParsingState& formal_parameters,
+ Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok);
ClassLiteral* ParseClassLiteral(const AstRawString* name,
Scanner::Location class_name_location,
@@ -937,12 +963,96 @@ class Parser : public ParserBase<ParserTraits> {
Block* ParseVariableStatement(VariableDeclarationContext var_context,
ZoneList<const AstRawString*>* names,
bool* ok);
- Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
- int* num_decl,
- ZoneList<const AstRawString*>* names,
- const AstRawString** out,
- Scanner::Location* first_initializer_loc,
- Scanner::Location* bindings_loc, bool* ok);
+
+ struct DeclarationDescriptor {
+ enum Kind { NORMAL, PARAMETER };
+ Parser* parser;
+ Scope* declaration_scope;
+ Scope* scope;
+ VariableMode mode;
+ bool is_const;
+ bool needs_init;
+ int declaration_pos;
+ int initialization_pos;
+ Token::Value init_op;
+ Kind declaration_kind;
+ };
+
+ struct DeclarationParsingResult {
+ struct Declaration {
+ Declaration(Expression* pattern, int initializer_position,
+ Expression* initializer)
+ : pattern(pattern),
+ initializer_position(initializer_position),
+ initializer(initializer) {}
+
+ Expression* pattern;
+ int initializer_position;
+ Expression* initializer;
+ };
+
+ DeclarationParsingResult()
+ : declarations(4),
+ first_initializer_loc(Scanner::Location::invalid()),
+ bindings_loc(Scanner::Location::invalid()) {}
+
+ Block* BuildInitializationBlock(ZoneList<const AstRawString*>* names,
+ bool* ok);
+ const AstRawString* SingleName() const;
+
+ DeclarationDescriptor descriptor;
+ List<Declaration> declarations;
+ Scanner::Location first_initializer_loc;
+ Scanner::Location bindings_loc;
+ };
+
+ class PatternRewriter : private AstVisitor {
+ public:
+ static void DeclareAndInitializeVariables(
+ Block* block, const DeclarationDescriptor* declaration_descriptor,
+ const DeclarationParsingResult::Declaration* declaration,
+ ZoneList<const AstRawString*>* names, bool* ok);
+
+ void set_initializer_position(int pos) { initializer_position_ = pos; }
+
+ private:
+ PatternRewriter() {}
+
+#define DECLARE_VISIT(type) void Visit##type(v8::internal::type* node) override;
+ // Visiting functions for AST nodes make this an AstVisitor.
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+ virtual void Visit(AstNode* node) override;
+
+ void RecurseIntoSubpattern(AstNode* pattern, Expression* value) {
+ Expression* old_value = current_value_;
+ current_value_ = value;
+ pattern->Accept(this);
+ current_value_ = old_value;
+ }
+
+ Variable* CreateTempVar(Expression* value = nullptr);
+
+ AstNodeFactory* factory() const { return descriptor_->parser->factory(); }
+ AstValueFactory* ast_value_factory() const {
+ return descriptor_->parser->ast_value_factory();
+ }
+ bool inside_with() const { return descriptor_->parser->inside_with(); }
+ Zone* zone() const { return descriptor_->parser->zone(); }
+
+ Expression* pattern_;
+ int initializer_position_;
+ Block* block_;
+ const DeclarationDescriptor* descriptor_;
+ ZoneList<const AstRawString*>* names_;
+ Expression* current_value_;
+ bool* ok_;
+ };
+
+
+ void ParseVariableDeclarations(VariableDeclarationContext var_context,
+ DeclarationParsingResult* parsing_result,
+ bool* ok);
Statement* ParseExpressionOrLabelledStatement(
ZoneList<const AstRawString*>* labels, bool* ok);
IfStatement* ParseIfStatement(ZoneList<const AstRawString*>* labels,
@@ -969,6 +1079,12 @@ class Parser : public ParserBase<ParserTraits> {
// Support for hamony block scoped bindings.
Block* ParseScopedBlock(ZoneList<const AstRawString*>* labels, bool* ok);
+ // !%_IsSpecObject(result = iterator.next()) &&
+ // %ThrowIteratorResultNotAnObject(result)
+ Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
+ int pos);
+
+
// Initialize the components of a for-in / for-of statement.
void InitializeForEachStatement(ForEachStatement* stmt,
Expression* each,
@@ -983,7 +1099,8 @@ class Parser : public ParserBase<ParserTraits> {
const AstRawString* name, Scanner::Location function_name_location,
bool name_is_strict_reserved, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok);
ClassLiteral* ParseClassLiteral(const AstRawString* name,
@@ -1010,17 +1127,20 @@ class Parser : public ParserBase<ParserTraits> {
// Parser support
VariableProxy* NewUnresolved(const AstRawString* name, VariableMode mode);
- Variable* Declare(Declaration* declaration, bool resolve, bool* ok);
+ Variable* Declare(Declaration* declaration,
+ DeclarationDescriptor::Kind declaration_kind, bool resolve,
+ bool* ok);
bool TargetStackContainsLabel(const AstRawString* label);
BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
IterationStatement* LookupContinueTarget(const AstRawString* label, bool* ok);
void AddAssertIsConstruct(ZoneList<Statement*>* body, int pos);
+ Statement* BuildAssertIsCoercible(Variable* var);
// Factory methods.
FunctionLiteral* DefaultConstructor(bool call_super, Scope* scope, int pos,
- int end_pos);
+ int end_pos, LanguageMode language_mode);
// Skip over a lazy function, either using cached data if we have it, or
// by parsing the function with PreParser. Consumes the ending }.
@@ -1035,10 +1155,14 @@ class Parser : public ParserBase<ParserTraits> {
PreParser::PreParseResult ParseLazyFunctionBodyWithPreParser(
SingletonLogger* logger, Scanner::BookmarkScope* bookmark = nullptr);
+ Block* BuildParameterInitializationBlock(
+ const ParserFormalParameterParsingState& formal_parameters, bool* ok);
+
// Consumes the ending }.
ZoneList<Statement*>* ParseEagerFunctionBody(
- const AstRawString* function_name, int pos, Variable* fvar,
- Token::Value fvar_init_op, FunctionKind kind, bool* ok);
+ const AstRawString* function_name, int pos,
+ const ParserFormalParameterParsingState& formal_parameters,
+ Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok);
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -1102,10 +1226,11 @@ void ParserTraits::SkipLazyFunctionBody(int* materialized_literal_count,
ZoneList<Statement*>* ParserTraits::ParseEagerFunctionBody(
- const AstRawString* name, int pos, Variable* fvar,
+ const AstRawString* name, int pos,
+ const ParserFormalParameterParsingState& formal_parameters, Variable* fvar,
Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
- return parser_->ParseEagerFunctionBody(name, pos, fvar, fvar_init_op, kind,
- ok);
+ return parser_->ParseEagerFunctionBody(name, pos, formal_parameters, fvar,
+ fvar_init_op, kind, ok);
}
void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope,
@@ -1182,6 +1307,44 @@ Expression* ParserTraits::SpreadCallNew(
Expression* function, ZoneList<v8::internal::Expression*>* args, int pos) {
return parser_->SpreadCallNew(function, args, pos);
}
+
+
+void ParserTraits::DeclareFormalParameter(
+ ParserFormalParameterParsingState* parsing_state, Expression* pattern,
+ ExpressionClassifier* classifier, bool is_rest) {
+ bool is_duplicate = false;
+ bool is_simple_name = pattern->IsVariableProxy();
+ DCHECK(parser_->allow_harmony_destructuring() || is_simple_name);
+
+ const AstRawString* name = is_simple_name
+ ? pattern->AsVariableProxy()->raw_name()
+ : parser_->ast_value_factory()->empty_string();
+ Variable* var =
+ parsing_state->scope->DeclareParameter(name, VAR, is_rest, &is_duplicate);
+ parsing_state->AddParameter(var, is_simple_name ? nullptr : pattern);
+ if (is_sloppy(parsing_state->scope->language_mode())) {
+ // TODO(sigurds) Mark every parameter as maybe assigned. This is a
+ // conservative approximation necessary to account for parameters
+ // that are assigned via the arguments array.
+ var->set_maybe_assigned();
+ }
+ if (is_duplicate) {
+ classifier->RecordDuplicateFormalParameterError(
+ parser_->scanner()->location());
+ }
+}
+
+
+void ParserTraits::AddParameterInitializationBlock(
+ const ParserFormalParameterParsingState& formal_parameters,
+ ZoneList<v8::internal::Statement*>* body, bool* ok) {
+ auto* init_block =
+ parser_->BuildParameterInitializationBlock(formal_parameters, ok);
+ if (!*ok) return;
+ if (init_block != nullptr) {
+ body->Add(init_block, parser_->zone());
+ }
+}
} } // namespace v8::internal
#endif // V8_PARSER_H_
diff --git a/deps/v8/src/pattern-rewriter.cc b/deps/v8/src/pattern-rewriter.cc
new file mode 100644
index 0000000000..6969cf214e
--- /dev/null
+++ b/deps/v8/src/pattern-rewriter.cc
@@ -0,0 +1,423 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast.h"
+#include "src/messages.h"
+#include "src/parser.h"
+
+namespace v8 {
+
+namespace internal {
+
+
+void Parser::PatternRewriter::DeclareAndInitializeVariables(
+ Block* block, const DeclarationDescriptor* declaration_descriptor,
+ const DeclarationParsingResult::Declaration* declaration,
+ ZoneList<const AstRawString*>* names, bool* ok) {
+ PatternRewriter rewriter;
+
+ rewriter.pattern_ = declaration->pattern;
+ rewriter.initializer_position_ = declaration->initializer_position;
+ rewriter.block_ = block;
+ rewriter.descriptor_ = declaration_descriptor;
+ rewriter.names_ = names;
+ rewriter.ok_ = ok;
+
+ rewriter.RecurseIntoSubpattern(rewriter.pattern_, declaration->initializer);
+}
+
+
+void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
+ Expression* value = current_value_;
+ descriptor_->scope->RemoveUnresolved(pattern->AsVariableProxy());
+
+ // Declare variable.
+ // Note that we *always* must treat the initial value via a separate init
+ // assignment for variables and constants because the value must be assigned
+ // when the variable is encountered in the source. But the variable/constant
+ // is declared (and set to 'undefined') upon entering the function within
+ // which the variable or constant is declared. Only function variables have
+ // an initial value in the declaration (because they are initialized upon
+ // entering the function).
+ //
+ // If we have a legacy const declaration, in an inner scope, the proxy
+ // is always bound to the declared variable (independent of possibly
+ // surrounding 'with' statements).
+ // For let/const declarations in harmony mode, we can also immediately
+ // pre-resolve the proxy because it resides in the same scope as the
+ // declaration.
+ Parser* parser = descriptor_->parser;
+ const AstRawString* name = pattern->raw_name();
+ VariableProxy* proxy = parser->NewUnresolved(name, descriptor_->mode);
+ Declaration* declaration = factory()->NewVariableDeclaration(
+ proxy, descriptor_->mode, descriptor_->scope,
+ descriptor_->declaration_pos);
+ Variable* var = parser->Declare(declaration, descriptor_->declaration_kind,
+ descriptor_->mode != VAR, ok_);
+ if (!*ok_) return;
+ DCHECK_NOT_NULL(var);
+ DCHECK(!proxy->is_resolved() || proxy->var() == var);
+ var->set_initializer_position(initializer_position_);
+
+ DCHECK(initializer_position_ != RelocInfo::kNoPosition);
+
+ if (descriptor_->declaration_scope->num_var_or_const() >
+ kMaxNumFunctionLocals) {
+ parser->ReportMessage(MessageTemplate::kTooManyVariables);
+ *ok_ = false;
+ return;
+ }
+ if (names_) {
+ names_->Add(name, zone());
+ }
+
+ // Initialize variables if needed. A
+ // declaration of the form:
+ //
+ // var v = x;
+ //
+ // is syntactic sugar for:
+ //
+ // var v; v = x;
+ //
+ // In particular, we need to re-lookup 'v' (in scope_, not
+ // declaration_scope) as it may be a different 'v' than the 'v' in the
+ // declaration (e.g., if we are inside a 'with' statement or 'catch'
+ // block).
+ //
+ // However, note that const declarations are different! A const
+ // declaration of the form:
+ //
+ // const c = x;
+ //
+ // is *not* syntactic sugar for:
+ //
+ // const c; c = x;
+ //
+ // The "variable" c initialized to x is the same as the declared
+ // one - there is no re-lookup (see the last parameter of the
+ // Declare() call above).
+ Scope* initialization_scope = descriptor_->is_const
+ ? descriptor_->declaration_scope
+ : descriptor_->scope;
+
+
+ // Global variable declarations must be compiled in a specific
+ // way. When the script containing the global variable declaration
+ // is entered, the global variable must be declared, so that if it
+ // doesn't exist (on the global object itself, see ES5 errata) it
+ // gets created with an initial undefined value. This is handled
+ // by the declarations part of the function representing the
+ // top-level global code; see Runtime::DeclareGlobalVariable. If
+ // it already exists (in the object or in a prototype), it is
+ // *not* touched until the variable declaration statement is
+ // executed.
+ //
+ // Executing the variable declaration statement will always
+ // guarantee to give the global object an own property.
+ // This way, global variable declarations can shadow
+ // properties in the prototype chain, but only after the variable
+ // declaration statement has been executed. This is important in
+ // browsers where the global object (window) has lots of
+ // properties defined in prototype objects.
+ if (initialization_scope->is_script_scope() &&
+ !IsLexicalVariableMode(descriptor_->mode)) {
+ // Compute the arguments for the runtime
+ // call.test-parsing/InitializedDeclarationsInStrictForOfError
+ ZoneList<Expression*>* arguments =
+ new (zone()) ZoneList<Expression*>(3, zone());
+ // We have at least 1 parameter.
+ arguments->Add(
+ factory()->NewStringLiteral(name, descriptor_->declaration_pos),
+ zone());
+ CallRuntime* initialize;
+
+ if (descriptor_->is_const) {
+ arguments->Add(value, zone());
+ value = NULL; // zap the value to avoid the unnecessary assignment
+
+ // Construct the call to Runtime_InitializeConstGlobal
+ // and add it to the initialization statement block.
+ // Note that the function does different things depending on
+ // the number of arguments (1 or 2).
+ initialize = factory()->NewCallRuntime(
+ ast_value_factory()->initialize_const_global_string(),
+ Runtime::FunctionForId(Runtime::kInitializeConstGlobal), arguments,
+ descriptor_->initialization_pos);
+ } else {
+ // Add language mode.
+ // We may want to pass singleton to avoid Literal allocations.
+ LanguageMode language_mode = initialization_scope->language_mode();
+ arguments->Add(factory()->NewNumberLiteral(language_mode,
+ descriptor_->declaration_pos),
+ zone());
+
+ // Be careful not to assign a value to the global variable if
+ // we're in a with. The initialization value should not
+ // necessarily be stored in the global object in that case,
+ // which is why we need to generate a separate assignment node.
+ if (value != NULL && !inside_with()) {
+ arguments->Add(value, zone());
+ value = NULL; // zap the value to avoid the unnecessary assignment
+ // Construct the call to Runtime_InitializeVarGlobal
+ // and add it to the initialization statement block.
+ initialize = factory()->NewCallRuntime(
+ ast_value_factory()->initialize_var_global_string(),
+ Runtime::FunctionForId(Runtime::kInitializeVarGlobal), arguments,
+ descriptor_->declaration_pos);
+ } else {
+ initialize = NULL;
+ }
+ }
+
+ if (initialize != NULL) {
+ block_->AddStatement(
+ factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition),
+ zone());
+ }
+ } else if (value != nullptr && (descriptor_->needs_init ||
+ IsLexicalVariableMode(descriptor_->mode))) {
+ // Constant initializations always assign to the declared constant which
+ // is always at the function scope level. This is only relevant for
+ // dynamically looked-up variables and constants (the
+ // start context for constant lookups is always the function context,
+ // while it is the top context for var declared variables). Sigh...
+ // For 'let' and 'const' declared variables in harmony mode the
+ // initialization also always assigns to the declared variable.
+ DCHECK_NOT_NULL(proxy);
+ DCHECK_NOT_NULL(proxy->var());
+ DCHECK_NOT_NULL(value);
+ Assignment* assignment = factory()->NewAssignment(
+ descriptor_->init_op, proxy, value, descriptor_->initialization_pos);
+ block_->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
+ value = NULL;
+ }
+
+ // Add an assignment node to the initialization statement block if we still
+ // have a pending initialization value.
+ if (value != NULL) {
+ DCHECK(descriptor_->mode == VAR);
+ // 'var' initializations are simply assignments (with all the consequences
+ // if they are inside a 'with' statement - they may change a 'with' object
+ // property).
+ VariableProxy* proxy = initialization_scope->NewUnresolved(factory(), name);
+ Assignment* assignment = factory()->NewAssignment(
+ descriptor_->init_op, proxy, value, descriptor_->initialization_pos);
+ block_->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
+ }
+}
+
+
+Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
+ auto temp_scope = descriptor_->parser->scope_->DeclarationScope();
+ auto temp = temp_scope->NewTemporary(ast_value_factory()->empty_string());
+ if (value != nullptr) {
+ auto assignment = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(temp), value,
+ RelocInfo::kNoPosition);
+
+ block_->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
+ }
+ return temp;
+}
+
+
+void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern) {
+ auto temp = CreateTempVar(current_value_);
+
+ block_->AddStatement(descriptor_->parser->BuildAssertIsCoercible(temp),
+ zone());
+
+ for (ObjectLiteralProperty* property : *pattern->properties()) {
+ RecurseIntoSubpattern(
+ property->value(),
+ factory()->NewProperty(factory()->NewVariableProxy(temp),
+ property->key(), RelocInfo::kNoPosition));
+ }
+}
+
+
+void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
+ auto iterator = CreateTempVar(
+ descriptor_->parser->GetIterator(current_value_, factory()));
+ auto done = CreateTempVar(
+ factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition));
+ auto result = CreateTempVar();
+ auto v = CreateTempVar();
+
+ Spread* spread = nullptr;
+ for (Expression* value : *node->values()) {
+ if (value->IsSpread()) {
+ spread = value->AsSpread();
+ break;
+ }
+
+ // if (!done) {
+ // result = IteratorNext(iterator);
+ // v = (done = result.done) ? undefined : result.value;
+ // }
+ auto next_block =
+ factory()->NewBlock(nullptr, 2, true, RelocInfo::kNoPosition);
+ next_block->AddStatement(factory()->NewExpressionStatement(
+ descriptor_->parser->BuildIteratorNextResult(
+ factory()->NewVariableProxy(iterator),
+ result, RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
+
+ auto assign_to_done = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(done),
+ factory()->NewProperty(
+ factory()->NewVariableProxy(result),
+ factory()->NewStringLiteral(ast_value_factory()->done_string(),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ auto next_value = factory()->NewConditional(
+ assign_to_done, factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ factory()->NewProperty(
+ factory()->NewVariableProxy(result),
+ factory()->NewStringLiteral(ast_value_factory()->value_string(),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ next_block->AddStatement(
+ factory()->NewExpressionStatement(
+ factory()->NewAssignment(Token::ASSIGN,
+ factory()->NewVariableProxy(v), next_value,
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
+
+ auto if_statement = factory()->NewIfStatement(
+ factory()->NewUnaryOperation(Token::NOT,
+ factory()->NewVariableProxy(done),
+ RelocInfo::kNoPosition),
+ next_block, factory()->NewEmptyStatement(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ block_->AddStatement(if_statement, zone());
+
+ if (!(value->IsLiteral() && value->AsLiteral()->raw_value()->IsTheHole())) {
+ RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
+ }
+ }
+
+ if (spread != nullptr) {
+ // array = [];
+ // if (!done) $concatIterableToArray(array, iterator);
+ auto empty_exprs = new (zone()) ZoneList<Expression*>(0, zone());
+ auto array = CreateTempVar(factory()->NewArrayLiteral(
+ empty_exprs,
+ // Reuse pattern's literal index - it is unused since there is no
+ // actual literal allocated.
+ node->literal_index(), is_strong(descriptor_->parser->language_mode()),
+ RelocInfo::kNoPosition));
+
+ auto arguments = new (zone()) ZoneList<Expression*>(2, zone());
+ arguments->Add(factory()->NewVariableProxy(array), zone());
+ arguments->Add(factory()->NewVariableProxy(iterator), zone());
+ auto spread_into_array_call = factory()->NewCallRuntime(
+ ast_value_factory()->concat_iterable_to_array_string(), nullptr,
+ arguments, RelocInfo::kNoPosition);
+
+ auto if_statement = factory()->NewIfStatement(
+ factory()->NewUnaryOperation(Token::NOT,
+ factory()->NewVariableProxy(done),
+ RelocInfo::kNoPosition),
+ factory()->NewExpressionStatement(spread_into_array_call,
+ RelocInfo::kNoPosition),
+ factory()->NewEmptyStatement(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ block_->AddStatement(if_statement, zone());
+
+
+ RecurseIntoSubpattern(spread->expression(),
+ factory()->NewVariableProxy(array));
+ }
+}
+
+
+void Parser::PatternRewriter::VisitAssignment(Assignment* node) {
+ // let {<pattern> = <init>} = <value>
+ // becomes
+ // temp = <value>;
+ // <pattern> = temp === undefined ? <init> : temp;
+ DCHECK(node->op() == Token::ASSIGN);
+ auto temp = CreateTempVar(current_value_);
+ Expression* is_undefined = factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(temp),
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ Expression* value = factory()->NewConditional(
+ is_undefined, node->value(), factory()->NewVariableProxy(temp),
+ RelocInfo::kNoPosition);
+ RecurseIntoSubpattern(node->target(), value);
+}
+
+
+void Parser::PatternRewriter::VisitSpread(Spread* node) {
+ // TODO(dslomov): implement.
+}
+
+
+// =============== UNREACHABLE =============================
+
+void Parser::PatternRewriter::Visit(AstNode* node) { UNREACHABLE(); }
+
+#define NOT_A_PATTERN(Node) \
+ void Parser::PatternRewriter::Visit##Node(v8::internal::Node*) { \
+ UNREACHABLE(); \
+ }
+
+NOT_A_PATTERN(BinaryOperation)
+NOT_A_PATTERN(Block)
+NOT_A_PATTERN(BreakStatement)
+NOT_A_PATTERN(Call)
+NOT_A_PATTERN(CallNew)
+NOT_A_PATTERN(CallRuntime)
+NOT_A_PATTERN(CaseClause)
+NOT_A_PATTERN(ClassLiteral)
+NOT_A_PATTERN(CompareOperation)
+NOT_A_PATTERN(Conditional)
+NOT_A_PATTERN(ContinueStatement)
+NOT_A_PATTERN(CountOperation)
+NOT_A_PATTERN(DebuggerStatement)
+NOT_A_PATTERN(DoWhileStatement)
+NOT_A_PATTERN(EmptyStatement)
+NOT_A_PATTERN(ExportDeclaration)
+NOT_A_PATTERN(ExpressionStatement)
+NOT_A_PATTERN(ForInStatement)
+NOT_A_PATTERN(ForOfStatement)
+NOT_A_PATTERN(ForStatement)
+NOT_A_PATTERN(FunctionDeclaration)
+NOT_A_PATTERN(FunctionLiteral)
+NOT_A_PATTERN(IfStatement)
+NOT_A_PATTERN(ImportDeclaration)
+NOT_A_PATTERN(Literal)
+NOT_A_PATTERN(NativeFunctionLiteral)
+NOT_A_PATTERN(Property)
+NOT_A_PATTERN(RegExpLiteral)
+NOT_A_PATTERN(ReturnStatement)
+NOT_A_PATTERN(SuperPropertyReference)
+NOT_A_PATTERN(SuperCallReference)
+NOT_A_PATTERN(SwitchStatement)
+NOT_A_PATTERN(ThisFunction)
+NOT_A_PATTERN(Throw)
+NOT_A_PATTERN(TryCatchStatement)
+NOT_A_PATTERN(TryFinallyStatement)
+NOT_A_PATTERN(UnaryOperation)
+NOT_A_PATTERN(VariableDeclaration)
+NOT_A_PATTERN(WhileStatement)
+NOT_A_PATTERN(WithStatement)
+NOT_A_PATTERN(Yield)
+
+#undef NOT_A_PATTERN
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
index f0449d82a9..10a10320a6 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -17,32 +17,35 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
if (!has_pending_error_) return;
MessageLocation location(script, start_position_, end_position_);
Factory* factory = isolate->factory();
- bool has_arg = arg_ != NULL || char_arg_ != NULL || !handle_arg_.is_null();
- Handle<FixedArray> elements = factory->NewFixedArray(has_arg ? 1 : 0);
+ Handle<String> argument;
if (arg_ != NULL) {
- Handle<String> arg_string = arg_->string();
- elements->set(0, *arg_string);
+ argument = arg_->string();
} else if (char_arg_ != NULL) {
- Handle<String> arg_string =
+ argument =
factory->NewStringFromUtf8(CStrVector(char_arg_)).ToHandleChecked();
- elements->set(0, *arg_string);
} else if (!handle_arg_.is_null()) {
- elements->set(0, *handle_arg_);
+ argument = handle_arg_;
}
isolate->debug()->OnCompileError(script);
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
Handle<Object> error;
-
switch (error_type_) {
case kReferenceError:
- error = factory->NewReferenceError(message_, array);
+ error = factory->NewError("MakeReferenceError", message_, argument);
break;
case kSyntaxError:
- error = factory->NewSyntaxError(message_, array);
+ error = factory->NewError("MakeSyntaxError", message_, argument);
+ break;
+ default:
+ UNREACHABLE();
break;
}
+ if (!error->IsJSObject()) {
+ isolate->Throw(*error, &location);
+ return;
+ }
+
Handle<JSObject> jserror = Handle<JSObject>::cast(error);
Handle<Name> key_start_pos = factory->error_start_pos_symbol();
@@ -60,5 +63,5 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
isolate->Throw(*error, &location);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/pending-compilation-error-handler.h b/deps/v8/src/pending-compilation-error-handler.h
index c75f23d039..6190d49f52 100644
--- a/deps/v8/src/pending-compilation-error-handler.h
+++ b/deps/v8/src/pending-compilation-error-handler.h
@@ -8,6 +8,7 @@
#include "src/base/macros.h"
#include "src/globals.h"
#include "src/handles.h"
+#include "src/messages.h"
namespace v8 {
namespace internal {
@@ -24,13 +25,14 @@ class PendingCompilationErrorHandler {
: has_pending_error_(false),
start_position_(-1),
end_position_(-1),
- message_(nullptr),
+ message_(MessageTemplate::kNone),
arg_(nullptr),
char_arg_(nullptr),
error_type_(kSyntaxError) {}
void ReportMessageAt(int start_position, int end_position,
- const char* message, const char* arg = nullptr,
+ MessageTemplate::Template message,
+ const char* arg = nullptr,
ParseErrorType error_type = kSyntaxError) {
if (has_pending_error_) return;
has_pending_error_ = true;
@@ -43,7 +45,8 @@ class PendingCompilationErrorHandler {
}
void ReportMessageAt(int start_position, int end_position,
- const char* message, const AstRawString* arg,
+ MessageTemplate::Template message,
+ const AstRawString* arg,
ParseErrorType error_type = kSyntaxError) {
if (has_pending_error_) return;
has_pending_error_ = true;
@@ -56,7 +59,7 @@ class PendingCompilationErrorHandler {
}
void ReportMessageAt(int start_position, int end_position,
- const char* message, Handle<String> arg,
+ MessageTemplate::Template message, Handle<String> arg,
ParseErrorType error_type = kSyntaxError) {
if (has_pending_error_) return;
has_pending_error_ = true;
@@ -77,7 +80,7 @@ class PendingCompilationErrorHandler {
bool has_pending_error_;
int start_position_;
int end_position_;
- const char* message_;
+ MessageTemplate::Template message_;
const AstRawString* arg_;
const char* char_arg_;
Handle<String> handle_arg_;
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
deleted file mode 100644
index 819fe4eef4..0000000000
--- a/deps/v8/src/perf-jit.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/perf-jit.h"
-
-#if V8_OS_LINUX
-#include <fcntl.h>
-#include <unistd.h>
-#include "src/third_party/kernel/tools/perf/util/jitdump.h"
-#endif // V8_OS_LINUX
-
-namespace v8 {
-namespace internal {
-
-#if V8_OS_LINUX
-
-const char PerfJitLogger::kFilenameFormatString[] = "perfjit-%d.dump";
-
-// Extra padding for the PID in the filename
-const int PerfJitLogger::kFilenameBufferPadding = 16;
-
-
-PerfJitLogger::PerfJitLogger() : perf_output_handle_(NULL), code_index_(0) {
- if (!base::TimeTicks::KernelTimestampAvailable()) {
- FATAL("Cannot profile with perf JIT - kernel timestamps not available.");
- }
-
- // Open the perf JIT dump file.
- int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
- ScopedVector<char> perf_dump_name(bufferSize);
- int size = SNPrintF(perf_dump_name, kFilenameFormatString,
- base::OS::GetCurrentProcessId());
- CHECK_NE(size, -1);
- perf_output_handle_ =
- base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
- CHECK_NOT_NULL(perf_output_handle_);
- setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
-
- LogWriteHeader();
-}
-
-
-PerfJitLogger::~PerfJitLogger() {
- fclose(perf_output_handle_);
- perf_output_handle_ = NULL;
-}
-
-
-uint64_t PerfJitLogger::GetTimestamp() {
- return static_cast<int64_t>(
- base::TimeTicks::KernelTimestampNow().ToInternalValue());
-}
-
-
-void PerfJitLogger::LogRecordedBuffer(Code* code, SharedFunctionInfo*,
- const char* name, int length) {
- DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
- DCHECK(perf_output_handle_ != NULL);
-
- const char* code_name = name;
- uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start());
- uint32_t code_size = code->is_crankshafted() ? code->safepoint_table_offset()
- : code->instruction_size();
-
- static const char string_terminator[] = "\0";
-
- jr_code_load code_load;
- code_load.p.id = JIT_CODE_LOAD;
- code_load.p.total_size = sizeof(code_load) + length + 1 + code_size;
- code_load.p.timestamp = GetTimestamp();
- code_load.pid = static_cast<uint32_t>(base::OS::GetCurrentProcessId());
- code_load.tid = static_cast<uint32_t>(base::OS::GetCurrentThreadId());
- code_load.vma = 0x0; // Our addresses are absolute.
- code_load.code_addr = reinterpret_cast<uint64_t>(code_pointer);
- code_load.code_size = code_size;
- code_load.code_index = code_index_;
-
- code_index_++;
-
- LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
- LogWriteBytes(code_name, length);
- LogWriteBytes(string_terminator, 1);
- LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
-}
-
-
-void PerfJitLogger::CodeMoveEvent(Address from, Address to) {
- // Code relocation not supported.
- UNREACHABLE();
-}
-
-
-void PerfJitLogger::CodeDeleteEvent(Address from) {
- // V8 does not send notification on code unload
-}
-
-
-void PerfJitLogger::SnapshotPositionEvent(Address addr, int pos) {}
-
-
-void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
- size_t rv = fwrite(bytes, 1, size, perf_output_handle_);
- DCHECK(static_cast<size_t>(size) == rv);
- USE(rv);
-}
-
-
-void PerfJitLogger::LogWriteHeader() {
- DCHECK(perf_output_handle_ != NULL);
- jitheader header;
- header.magic = JITHEADER_MAGIC;
- header.version = JITHEADER_VERSION;
- header.total_size = sizeof(jitheader);
- header.pad1 = 0xdeadbeef;
- header.elf_mach = GetElfMach();
- header.pid = base::OS::GetCurrentProcessId();
- header.timestamp =
- static_cast<uint64_t>(base::OS::TimeCurrentMillis() * 1000.0);
- LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
-}
-
-#endif // V8_OS_LINUX
-}
-} // namespace v8::internal
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/perf-jit.h
deleted file mode 100644
index e1436cd9cb..0000000000
--- a/deps/v8/src/perf-jit.h
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PERF_JIT_H_
-#define V8_PERF_JIT_H_
-
-#include "src/v8.h"
-
-namespace v8 {
-namespace internal {
-
-#if V8_OS_LINUX
-
-// Linux perf tool logging support
-class PerfJitLogger : public CodeEventLogger {
- public:
- PerfJitLogger();
- virtual ~PerfJitLogger();
-
- virtual void CodeMoveEvent(Address from, Address to);
- virtual void CodeDeleteEvent(Address from);
- virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {}
- virtual void SnapshotPositionEvent(Address addr, int pos);
-
- private:
- uint64_t GetTimestamp();
- virtual void LogRecordedBuffer(Code* code, SharedFunctionInfo* shared,
- const char* name, int length);
-
- // Extension added to V8 log file name to get the low-level log name.
- static const char kFilenameFormatString[];
- static const int kFilenameBufferPadding;
-
- // File buffer size of the low-level log. We don't use the default to
- // minimize the associated overhead.
- static const int kLogBufferSize = 2 * MB;
-
- void LogWriteBytes(const char* bytes, int size);
- void LogWriteHeader();
-
- static const uint32_t kElfMachIA32 = 3;
- static const uint32_t kElfMachX64 = 62;
- static const uint32_t kElfMachARM = 40;
- static const uint32_t kElfMachMIPS = 10;
-
- uint32_t GetElfMach() {
-#if V8_TARGET_ARCH_IA32
- return kElfMachIA32;
-#elif V8_TARGET_ARCH_X64
- return kElfMachX64;
-#elif V8_TARGET_ARCH_ARM
- return kElfMachARM;
-#elif V8_TARGET_ARCH_MIPS
- return kElfMachMIPS;
-#else
- UNIMPLEMENTED();
- return 0;
-#endif
- }
-
- FILE* perf_output_handle_;
- uint64_t code_index_;
-};
-
-#else
-
-// PerfJitLogger is only implemented on Linux
-class PerfJitLogger : public CodeEventLogger {
- public:
- virtual void CodeMoveEvent(Address from, Address to) { UNIMPLEMENTED(); }
-
- virtual void CodeDeleteEvent(Address from) { UNIMPLEMENTED(); }
-
- virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
- UNIMPLEMENTED();
- }
-
- virtual void SnapshotPositionEvent(Address addr, int pos) { UNIMPLEMENTED(); }
-
- virtual void LogRecordedBuffer(Code* code, SharedFunctionInfo* shared,
- const char* name, int length) {
- UNIMPLEMENTED();
- }
-};
-
-#endif // V8_OS_LINUX
-}
-} // namespace v8::internal
-#endif
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index d95c7ec596..0e759efec1 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -94,6 +94,14 @@ Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+ if (FLAG_enable_embedded_constant_pool &&
+ Assembler::IsConstantPoolLoadStart(pc_)) {
+ // We return the PC for embedded constant pool since this function is used
+ // by the serializer and expects the address to reside within the code
+ // object.
+ return reinterpret_cast<Address>(pc_);
+ }
+
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
@@ -108,6 +116,14 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
+ if (FLAG_enable_embedded_constant_pool) {
+ Address constant_pool = host_->constant_pool();
+ DCHECK(constant_pool);
+ ConstantPoolEntry::Access access;
+ if (Assembler::IsConstantPoolLoadStart(pc_, &access))
+ return Assembler::target_constant_pool_address_at(
+ pc_, constant_pool, access, ConstantPoolEntry::INTPTR);
+ }
UNREACHABLE();
return NULL;
}
@@ -143,12 +159,28 @@ Address Assembler::target_address_from_return_address(Address pc) {
// mtlr ip
// blrl
// @ return address
- return pc - (kMovInstructions + 2) * kInstrSize;
+ int len;
+ ConstantPoolEntry::Access access;
+ if (FLAG_enable_embedded_constant_pool &&
+ IsConstantPoolLoadEnd(pc - 3 * kInstrSize, &access)) {
+ len = (access == ConstantPoolEntry::OVERFLOWED) ? 2 : 1;
+ } else {
+ len = kMovInstructionsNoConstantPool;
+ }
+ return pc - (len + 2) * kInstrSize;
}
Address Assembler::return_address_from_call_start(Address pc) {
- return pc + (kMovInstructions + 2) * kInstrSize;
+ int len;
+ ConstantPoolEntry::Access access;
+ if (FLAG_enable_embedded_constant_pool &&
+ IsConstantPoolLoadStart(pc, &access)) {
+ len = (access == ConstantPoolEntry::OVERFLOWED) ? 2 : 1;
+ } else {
+ len = kMovInstructionsNoConstantPool;
+ }
+ return pc + (len + 2) * kInstrSize;
}
@@ -226,8 +258,10 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
}
-static const int kNoCodeAgeInstructions = 6;
-static const int kCodeAgingInstructions = Assembler::kMovInstructions + 3;
+static const int kNoCodeAgeInstructions =
+ FLAG_enable_embedded_constant_pool ? 7 : 6;
+static const int kCodeAgingInstructions =
+ Assembler::kMovInstructionsNoConstantPool + 3;
static const int kNoCodeAgeSequenceInstructions =
((kNoCodeAgeInstructions >= kCodeAgingInstructions)
? kNoCodeAgeInstructions
@@ -448,8 +482,14 @@ bool Operand::is_reg() const { return rm_.is_valid(); }
// Fetch the 32bit value from the FIXED_SEQUENCE lis/ori
-Address Assembler::target_address_at(Address pc,
- ConstantPoolArray* constant_pool) {
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
+ if (FLAG_enable_embedded_constant_pool && constant_pool) {
+ ConstantPoolEntry::Access access;
+ if (IsConstantPoolLoadStart(pc, &access))
+ return Memory::Address_at(target_constant_pool_address_at(
+ pc, constant_pool, access, ConstantPoolEntry::INTPTR));
+ }
+
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
// Interpret 2 instructions generated by lis/ori
@@ -475,6 +515,127 @@ Address Assembler::target_address_at(Address pc,
}
+#if V8_TARGET_ARCH_PPC64
+const int kLoadIntptrOpcode = LD;
+#else
+const int kLoadIntptrOpcode = LWZ;
+#endif
+
+// Constant pool load sequence detection:
+// 1) REGULAR access:
+// load <dst>, kConstantPoolRegister + <offset>
+//
+// 2) OVERFLOWED access:
+// addis <scratch>, kConstantPoolRegister, <offset_high>
+// load <dst>, <scratch> + <offset_low>
+bool Assembler::IsConstantPoolLoadStart(Address pc,
+ ConstantPoolEntry::Access* access) {
+ Instr instr = instr_at(pc);
+ int opcode = instr & kOpcodeMask;
+ if (!GetRA(instr).is(kConstantPoolRegister)) return false;
+ bool overflowed = (opcode == ADDIS);
+#ifdef DEBUG
+ if (overflowed) {
+ opcode = instr_at(pc + kInstrSize) & kOpcodeMask;
+ }
+ DCHECK(opcode == kLoadIntptrOpcode || opcode == LFD);
+#endif
+ if (access) {
+ *access = (overflowed ? ConstantPoolEntry::OVERFLOWED
+ : ConstantPoolEntry::REGULAR);
+ }
+ return true;
+}
+
+
+bool Assembler::IsConstantPoolLoadEnd(Address pc,
+ ConstantPoolEntry::Access* access) {
+ Instr instr = instr_at(pc);
+ int opcode = instr & kOpcodeMask;
+ bool overflowed = false;
+ if (!(opcode == kLoadIntptrOpcode || opcode == LFD)) return false;
+ if (!GetRA(instr).is(kConstantPoolRegister)) {
+ instr = instr_at(pc - kInstrSize);
+ opcode = instr & kOpcodeMask;
+ if ((opcode != ADDIS) || !GetRA(instr).is(kConstantPoolRegister)) {
+ return false;
+ }
+ overflowed = true;
+ }
+ if (access) {
+ *access = (overflowed ? ConstantPoolEntry::OVERFLOWED
+ : ConstantPoolEntry::REGULAR);
+ }
+ return true;
+}
+
+
+int Assembler::GetConstantPoolOffset(Address pc,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ bool overflowed = (access == ConstantPoolEntry::OVERFLOWED);
+#ifdef DEBUG
+ ConstantPoolEntry::Access access_check =
+ static_cast<ConstantPoolEntry::Access>(-1);
+ DCHECK(IsConstantPoolLoadStart(pc, &access_check));
+ DCHECK(access_check == access);
+#endif
+ int offset;
+ if (overflowed) {
+ offset = (instr_at(pc) & kImm16Mask) << 16;
+ offset += SIGN_EXT_IMM16(instr_at(pc + kInstrSize) & kImm16Mask);
+ DCHECK(!is_int16(offset));
+ } else {
+ offset = SIGN_EXT_IMM16((instr_at(pc) & kImm16Mask));
+ }
+ return offset;
+}
+
+
+void Assembler::PatchConstantPoolAccessInstruction(
+ int pc_offset, int offset, ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ Address pc = buffer_ + pc_offset;
+ bool overflowed = (access == ConstantPoolEntry::OVERFLOWED);
+#ifdef DEBUG
+ ConstantPoolEntry::Access access_check =
+ static_cast<ConstantPoolEntry::Access>(-1);
+ DCHECK(IsConstantPoolLoadStart(pc, &access_check));
+ DCHECK(access_check == access);
+ DCHECK(overflowed != is_int16(offset));
+#endif
+ if (overflowed) {
+ int hi_word = static_cast<int>(offset >> 16);
+ int lo_word = static_cast<int>(offset & 0xffff);
+ if (lo_word & 0x8000) hi_word++;
+
+ Instr instr1 = instr_at(pc);
+ Instr instr2 = instr_at(pc + kInstrSize);
+ instr1 &= ~kImm16Mask;
+ instr1 |= (hi_word & kImm16Mask);
+ instr2 &= ~kImm16Mask;
+ instr2 |= (lo_word & kImm16Mask);
+ instr_at_put(pc, instr1);
+ instr_at_put(pc + kInstrSize, instr2);
+ } else {
+ Instr instr = instr_at(pc);
+ instr &= ~kImm16Mask;
+ instr |= (offset & kImm16Mask);
+ instr_at_put(pc, instr);
+ }
+}
+
+
+Address Assembler::target_constant_pool_address_at(
+ Address pc, Address constant_pool, ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ Address addr = constant_pool;
+ DCHECK(addr);
+ addr += GetConstantPoolOffset(pc, access, type);
+ return addr;
+}
+
+
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the mov instructions etc.
@@ -497,10 +658,18 @@ void Assembler::deserialization_set_target_internal_reference_at(
// This code assumes the FIXED_SEQUENCE of lis/ori
-void Assembler::set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
+ if (FLAG_enable_embedded_constant_pool && constant_pool) {
+ ConstantPoolEntry::Access access;
+ if (IsConstantPoolLoadStart(pc, &access)) {
+ Memory::Address_at(target_constant_pool_address_at(
+ pc, constant_pool, access, ConstantPoolEntry::INTPTR)) = target;
+ return;
+ }
+ }
+
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
// Interpret 2 instructions generated by lis/ori
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 7778ab1ce1..b74a9f17cb 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -148,13 +148,17 @@ const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially
// coded. Being specially coded on PPC means that it is a lis/ori
- // instruction sequence, and these are always the case inside code
- // objects.
+ // instruction sequence or is a constant pool entry, and these are
+ // always the case inside code objects.
return true;
}
bool RelocInfo::IsInConstantPool() {
+ if (FLAG_enable_embedded_constant_pool) {
+ Address constant_pool = host_->constant_pool();
+ return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
+ }
return false;
}
@@ -201,11 +205,13 @@ MemOperand::MemOperand(Register ra, Register rb) {
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
+ constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
no_trampoline_pool_before_ = 0;
trampoline_pool_blocked_nesting_ = 0;
+ constant_pool_entry_sharing_blocked_nesting_ = 0;
// We leave space (kMaxBlockTrampolineSectionSize)
// for BlockTrampolinePoolScope buffer.
next_buffer_check_ =
@@ -213,6 +219,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
kMaxBlockTrampolineSectionSize;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
+ optimizable_cmpi_pos_ = -1;
trampoline_emitted_ = FLAG_force_long_branches;
unbound_labels_count_ = 0;
ClearRecordedAstId();
@@ -221,6 +228,9 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
+ // Emit constant pool if necessary.
+ int constant_pool_offset = EmitConstantPool();
+
EmitRelocations();
// Set up code descriptor.
@@ -228,16 +238,15 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->constant_pool_size =
+ (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
desc->origin = this;
}
void Assembler::Align(int m) {
-#if V8_TARGET_ARCH_PPC64
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m));
-#else
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
-#endif
+ DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -329,6 +338,9 @@ bool Assembler::IsRlwinm(Instr instr) {
}
+bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
+
+
#if V8_TARGET_ARCH_PPC64
bool Assembler::IsRldicl(Instr instr) {
return (((instr & kOpcodeMask) == EXT5) &&
@@ -471,7 +483,8 @@ void Assembler::target_at_put(int pos, int target_pos) {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- kMovInstructions, CodePatcher::DONT_FLUSH);
+ kMovInstructionsNoConstantPool,
+ CodePatcher::DONT_FLUSH);
// Keep internal references relative until EmitRelocations.
patcher.masm()->bitwise_mov(dst, target_pos);
break;
@@ -480,7 +493,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
// Keep internal references relative until EmitRelocations.
- patcher.masm()->emit_ptr(target_pos);
+ patcher.masm()->dp(target_pos);
break;
}
default:
@@ -1018,9 +1031,17 @@ void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.imm_;
int L = 0;
+ int pos = pc_offset();
DCHECK(is_int16(imm16));
DCHECK(cr.code() >= 0 && cr.code() <= 7);
imm16 &= kImm16Mask;
+
+ // For cmpwi against 0, save postition and cr for later examination
+ // of potential optimization.
+ if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
+ optimizable_cmpi_pos_ = pos;
+ cmpi_cr_ = cr;
+ }
emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
}
@@ -1492,13 +1513,56 @@ void Assembler::function_descriptor() {
Label instructions;
DCHECK(pc_offset() == 0);
emit_label_addr(&instructions);
- emit_ptr(0);
- emit_ptr(0);
+ dp(0);
+ dp(0);
bind(&instructions);
#endif
}
+int Assembler::instructions_required_for_mov(Register dst,
+ const Operand& src) const {
+ bool canOptimize =
+ !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
+ if (use_constant_pool_for_mov(dst, src, canOptimize)) {
+ if (ConstantPoolAccessIsInOverflow()) {
+ return kMovInstructionsConstantPool + 1;
+ }
+ return kMovInstructionsConstantPool;
+ }
+ DCHECK(!canOptimize);
+ return kMovInstructionsNoConstantPool;
+}
+
+
+bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
+ bool canOptimize) const {
+ if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
+ // If there is no constant pool available, we must use a mov
+ // immediate sequence.
+ return false;
+ }
+
+ intptr_t value = src.immediate();
+#if V8_TARGET_ARCH_PPC64
+ bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
+#else
+ bool allowOverflow = !(canOptimize || dst.is(r0));
+#endif
+ if (canOptimize && is_int16(value)) {
+ // Prefer a single-instruction load-immediate.
+ return false;
+ }
+ if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
+ // Prefer non-relocatable two-instruction bitwise-mov32 over
+ // overflow sequence.
+ return false;
+ }
+
+ return true;
+}
+
+
void Assembler::EnsureSpaceFor(int space_needed) {
if (buffer_space() <= (kGap + space_needed)) {
GrowBuffer(space_needed);
@@ -1531,6 +1595,30 @@ void Assembler::mov(Register dst, const Operand& src) {
canOptimize =
!(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
+ if (use_constant_pool_for_mov(dst, src, canOptimize)) {
+ DCHECK(is_constant_pool_available());
+ if (relocatable) {
+ RecordRelocInfo(src.rmode_);
+ }
+ ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
+#if V8_TARGET_ARCH_PPC64
+ if (access == ConstantPoolEntry::OVERFLOWED) {
+ addis(dst, kConstantPoolRegister, Operand::Zero());
+ ld(dst, MemOperand(dst, 0));
+ } else {
+ ld(dst, MemOperand(kConstantPoolRegister, 0));
+ }
+#else
+ if (access == ConstantPoolEntry::OVERFLOWED) {
+ addis(dst, kConstantPoolRegister, Operand::Zero());
+ lwz(dst, MemOperand(dst, 0));
+ } else {
+ lwz(dst, MemOperand(kConstantPoolRegister, 0));
+ }
+#endif
+ return;
+ }
+
if (canOptimize) {
if (is_int16(value)) {
li(dst, Operand(value));
@@ -1696,8 +1784,8 @@ void Assembler::mov_label_addr(Register dst, Label* label) {
BlockTrampolinePoolScope block_trampoline_pool(this);
emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
emit(dst.code());
- DCHECK(kMovInstructions >= 2);
- for (int i = 0; i < kMovInstructions - 2; i++) nop();
+ DCHECK(kMovInstructionsNoConstantPool >= 2);
+ for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
}
}
@@ -1708,7 +1796,7 @@ void Assembler::emit_label_addr(Label* label) {
int position = link(label);
if (label->is_bound()) {
// Keep internal references relative until EmitRelocations.
- emit_ptr(position);
+ dp(position);
} else {
// Encode internal reference to unbound label. We use a dummy opcode
// such that it won't collide with any opcode that might appear in the
@@ -1839,6 +1927,7 @@ void Assembler::isync() { emit(EXT1 | ISYNC); }
void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
+ DCHECK(!ra.is(r0));
DCHECK(is_int16(offset));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -1849,6 +1938,7 @@ void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
+ DCHECK(!ra.is(r0));
DCHECK(is_int16(offset));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -2248,51 +2338,33 @@ void Assembler::dd(uint32_t data) {
}
-void Assembler::emit_ptr(intptr_t data) {
+void Assembler::dq(uint64_t value) {
CheckBuffer();
- *reinterpret_cast<intptr_t*>(pc_) = data;
- pc_ += sizeof(intptr_t);
+ *reinterpret_cast<uint64_t*>(pc_) = value;
+ pc_ += sizeof(uint64_t);
}
-void Assembler::emit_double(double value) {
+void Assembler::dp(uintptr_t data) {
CheckBuffer();
- *reinterpret_cast<double*>(pc_) = value;
- pc_ += sizeof(double);
+ *reinterpret_cast<uintptr_t*>(pc_) = data;
+ pc_ += sizeof(uintptr_t);
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- DeferredRelocInfo rinfo(pc_offset(), rmode, data);
- RecordRelocInfo(rinfo);
-}
-
-
-void Assembler::RecordRelocInfo(const DeferredRelocInfo& rinfo) {
- if (rinfo.rmode() >= RelocInfo::JS_RETURN &&
- rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) {
- // Adjust code for new modes.
- DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) ||
- RelocInfo::IsJSReturn(rinfo.rmode()) ||
- RelocInfo::IsComment(rinfo.rmode()) ||
- RelocInfo::IsPosition(rinfo.rmode()));
+ if (RelocInfo::IsNone(rmode) ||
+ // Don't record external references unless the heap will be serialized.
+ (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
+ !emit_debug_code())) {
+ return;
}
- if (!RelocInfo::IsNone(rinfo.rmode())) {
- // Don't record external references unless the heap will be serialized.
- if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
- if (!serializer_enabled() && !emit_debug_code()) {
- return;
- }
- }
- if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
- DeferredRelocInfo reloc_info_with_ast_id(rinfo.position(), rinfo.rmode(),
- RecordedAstId().ToInt());
- ClearRecordedAstId();
- relocations_.push_back(reloc_info_with_ast_id);
- } else {
- relocations_.push_back(rinfo);
- }
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ data = RecordedAstId().ToInt();
+ ClearRecordedAstId();
}
+ DeferredRelocInfo rinfo(pc_offset(), rmode, data);
+ relocations_.push_back(rinfo);
}
@@ -2378,16 +2450,7 @@ void Assembler::CheckTrampolinePool() {
}
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
- DCHECK(!FLAG_enable_ool_constant_pool);
- return isolate->factory()->empty_constant_pool_array();
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
- DCHECK(!FLAG_enable_ool_constant_pool);
-}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index bcc2d8f6b6..82d068503d 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -108,7 +108,8 @@ struct Register {
static const int kAllocatableLowRangeBegin = 3;
static const int kAllocatableLowRangeEnd = 10;
static const int kAllocatableHighRangeBegin = 14;
- static const int kAllocatableHighRangeEnd = 28;
+ static const int kAllocatableHighRangeEnd =
+ FLAG_enable_embedded_constant_pool ? 27 : 28;
static const int kAllocatableContext = 30;
static const int kNumAllocatableLow =
@@ -177,6 +178,10 @@ struct Register {
"r28",
"cp",
};
+ if (FLAG_enable_embedded_constant_pool &&
+ (index == kMaxNumAllocatableRegisters - 2)) {
+ return names[index + 1];
+ }
return names[index];
}
@@ -184,7 +189,7 @@ struct Register {
1 << 3 | 1 << 4 | 1 << 5 | 1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 |
1 << 14 | 1 << 15 | 1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 | 1 << 20 |
1 << 21 | 1 << 22 | 1 << 23 | 1 << 24 | 1 << 25 | 1 << 26 | 1 << 27 |
- 1 << 28 | 1 << 30;
+ (FLAG_enable_embedded_constant_pool ? 0 : 1 << 28) | 1 << 30;
static Register from_code(int code) {
Register r = {code};
@@ -242,7 +247,7 @@ const int kRegister_r24_Code = 24;
const int kRegister_r25_Code = 25;
const int kRegister_r26_Code = 26;
const int kRegister_r27_Code = 27;
-const int kRegister_r28_Code = 28;
+const int kRegister_r28_Code = 28; // constant pool pointer
const int kRegister_r29_Code = 29; // roots array pointer
const int kRegister_r30_Code = 30; // context pointer
const int kRegister_fp_Code = 31; // frame pointer
@@ -286,6 +291,7 @@ const Register fp = {kRegister_fp_Code};
// Give alias names to registers
const Register cp = {kRegister_r30_Code}; // JavaScript context pointer
const Register kRootRegister = {kRegister_r29_Code}; // Roots array pointer.
+const Register kConstantPoolRegister = {kRegister_r28_Code}; // Constant pool
// Double word FP register.
struct DoubleRegister {
@@ -595,20 +601,36 @@ class Assembler : public AssemblerBase {
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
+ INLINE(static bool IsConstantPoolLoadStart(
+ Address pc, ConstantPoolEntry::Access* access = nullptr));
+ INLINE(static bool IsConstantPoolLoadEnd(
+ Address pc, ConstantPoolEntry::Access* access = nullptr));
+ INLINE(static int GetConstantPoolOffset(Address pc,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type));
+ INLINE(void PatchConstantPoolAccessInstruction(
+ int pc_offset, int offset, ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type));
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc, or the object in a mov.
+ INLINE(static Address target_constant_pool_address_at(
+ Address pc, Address constant_pool, ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type));
+
// Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc,
- ConstantPoolArray* constant_pool));
+ INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Address pc, ConstantPoolArray* constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
- ConstantPoolArray* constant_pool = NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(
Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- ConstantPoolArray* constant_pool = NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@@ -646,12 +668,21 @@ class Assembler : public AssemblerBase {
// Number of instructions to load an address via a mov sequence.
#if V8_TARGET_ARCH_PPC64
- static const int kMovInstructions = 5;
+ static const int kMovInstructionsConstantPool = 1;
+ static const int kMovInstructionsNoConstantPool = 5;
+#if defined(V8_PPC_TAGGING_OPT)
+ static const int kTaggedLoadInstructions = 1;
+#else
static const int kTaggedLoadInstructions = 2;
+#endif
#else
- static const int kMovInstructions = 2;
+ static const int kMovInstructionsConstantPool = 1;
+ static const int kMovInstructionsNoConstantPool = 2;
static const int kTaggedLoadInstructions = 1;
#endif
+ static const int kMovInstructions = FLAG_enable_embedded_constant_pool
+ ? kMovInstructionsConstantPool
+ : kMovInstructionsNoConstantPool;
// Distance between the instruction referring to the address of the call
// target and the return address.
@@ -682,13 +713,15 @@ class Assembler : public AssemblerBase {
// This is the length of the BreakLocation::SetDebugBreakAtReturn()
// code patch FIXED_SEQUENCE
- static const int kJSReturnSequenceInstructions = kMovInstructions + 3;
+ static const int kJSReturnSequenceInstructions =
+ kMovInstructionsNoConstantPool + 3;
static const int kJSReturnSequenceLength =
kJSReturnSequenceInstructions * kInstrSize;
// This is the length of the code sequence from SetDebugBreakAtSlot()
// FIXED_SEQUENCE
- static const int kDebugBreakSlotInstructions = kMovInstructions + 2;
+ static const int kDebugBreakSlotInstructions =
+ kMovInstructionsNoConstantPool + 2;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -703,6 +736,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -719,11 +755,49 @@ class Assembler : public AssemblerBase {
// Convenience branch instructions using labels
void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L, false), lk); }
+ inline CRegister cmpi_optimization(CRegister cr) {
+ // Check whether the branch is preceeded by an optimizable cmpi against 0.
+ // The cmpi can be deleted if it is also preceeded by an instruction that
+ // sets the register used by the compare and supports a dot form.
+ unsigned int sradi_mask = kOpcodeMask | kExt2OpcodeVariant2Mask;
+ unsigned int srawi_mask = kOpcodeMask | kExt2OpcodeMask;
+ int pos = pc_offset();
+ int cmpi_pos = pc_offset() - kInstrSize;
+
+ if (cmpi_pos > 0 && optimizable_cmpi_pos_ == cmpi_pos &&
+ cmpi_cr_.code() == cr.code() && last_bound_pos_ != pos) {
+ int xpos = cmpi_pos - kInstrSize;
+ int xinstr = instr_at(xpos);
+ int cmpi_ra = (instr_at(cmpi_pos) & 0x1f0000) >> 16;
+ // ra is at the same bit position for the three cases below.
+ int ra = (xinstr & 0x1f0000) >> 16;
+ if (cmpi_ra == ra) {
+ if ((xinstr & sradi_mask) == (EXT2 | SRADIX)) {
+ cr = cr0;
+ instr_at_put(xpos, xinstr | SetRC);
+ pc_ -= kInstrSize;
+ } else if ((xinstr & srawi_mask) == (EXT2 | SRAWIX)) {
+ cr = cr0;
+ instr_at_put(xpos, xinstr | SetRC);
+ pc_ -= kInstrSize;
+ } else if ((xinstr & kOpcodeMask) == ANDIx) {
+ cr = cr0;
+ pc_ -= kInstrSize;
+ // nothing to do here since andi. records.
+ }
+ // didn't match one of the above, must keep cmpwi.
+ }
+ }
+ return cr;
+ }
+
void bc_short(Condition cond, Label* L, CRegister cr = cr7,
LKBit lk = LeaveLK) {
DCHECK(cond != al);
DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ cr = cmpi_optimization(cr);
+
int b_offset = branch_offset(L, false);
switch (cond) {
@@ -768,6 +842,8 @@ class Assembler : public AssemblerBase {
DCHECK(cond != al);
DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ cr = cmpi_optimization(cr);
+
switch (cond) {
case eq:
isel(rt, ra, rb, encode_crbit(cr, CR_EQ));
@@ -1201,6 +1277,23 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
};
+ // Class for scoping disabling constant pool entry merging
+ class BlockConstantPoolEntrySharingScope {
+ public:
+ explicit BlockConstantPoolEntrySharingScope(Assembler* assem)
+ : assem_(assem) {
+ assem_->StartBlockConstantPoolEntrySharing();
+ }
+ ~BlockConstantPoolEntrySharingScope() {
+ assem_->EndBlockConstantPoolEntrySharing();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstantPoolEntrySharingScope);
+ };
+
// Debugging
// Mark address of the ExitJSFrame code.
@@ -1237,8 +1330,8 @@ class Assembler : public AssemblerBase {
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
- void emit_ptr(intptr_t data);
- void emit_double(double data);
+ void dq(uint64_t data);
+ void dp(uintptr_t data);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
@@ -1271,6 +1364,7 @@ class Assembler : public AssemblerBase {
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
static bool IsRlwinm(Instr instr);
+ static bool IsAndi(Instr instr);
#if V8_TARGET_ARCH_PPC64
static bool IsRldicl(Instr instr);
#endif
@@ -1284,6 +1378,19 @@ class Assembler : public AssemblerBase {
void BlockTrampolinePoolFor(int instructions);
void CheckTrampolinePool();
+ // For mov. Return the number of actual instructions required to
+ // load the operand into a register. This can be anywhere from
+ // one (constant pool small section) to five instructions (full
+ // 64-bit sequence).
+ //
+ // The value returned is only valid as long as no entries are added to the
+ // constant pool between this call and the actual instruction being emitted.
+ int instructions_required_for_mov(Register dst, const Operand& src) const;
+
+ // Decide between using the constant pool vs. a mov immediate sequence.
+ bool use_constant_pool_for_mov(Register dst, const Operand& src,
+ bool canOptimize) const;
+
// The code currently calls CheckBuffer() too often. This has the side
// effect of randomly growing the buffer in the middle of multi-instruction
// sequences.
@@ -1291,11 +1398,16 @@ class Assembler : public AssemblerBase {
// This function allows outside callers to check and grow the buffer
void EnsureSpaceFor(int space_needed);
- // Allocate a constant pool of the correct size for the generated code.
- Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+ int EmitConstantPool() { return constant_pool_builder_.Emit(this); }
+
+ bool ConstantPoolAccessIsInOverflow() const {
+ return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
+ ConstantPoolEntry::OVERFLOWED;
+ }
- // Generate the constant pool for the generated code.
- void PopulateConstantPool(ConstantPoolArray* constant_pool);
+ Label* ConstantPoolPosition() {
+ return constant_pool_builder_.EmittedPosition();
+ }
void EmitRelocations();
@@ -1315,7 +1427,16 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
- void RecordRelocInfo(const DeferredRelocInfo& rinfo);
+ ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
+ intptr_t value) {
+ bool sharing_ok = RelocInfo::IsNone(rmode) ||
+ !(serializer_enabled() || rmode < RelocInfo::CELL ||
+ is_constant_pool_entry_sharing_blocked());
+ return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
+ }
+ ConstantPoolEntry::Access ConstantPoolAddEntry(double value) {
+ return constant_pool_builder_.AddEntry(pc_offset(), value);
+ }
// Block the emission of the trampoline pool before pc_offset.
void BlockTrampolinePoolBefore(int pc_offset) {
@@ -1329,6 +1450,16 @@ class Assembler : public AssemblerBase {
return trampoline_pool_blocked_nesting_ > 0;
}
+ void StartBlockConstantPoolEntrySharing() {
+ constant_pool_entry_sharing_blocked_nesting_++;
+ }
+ void EndBlockConstantPoolEntrySharing() {
+ constant_pool_entry_sharing_blocked_nesting_--;
+ }
+ bool is_constant_pool_entry_sharing_blocked() const {
+ return constant_pool_entry_sharing_blocked_nesting_ > 0;
+ }
+
bool has_exception() const { return internal_trampoline_exception_; }
bool is_trampoline_emitted() const { return trampoline_emitted_; }
@@ -1350,6 +1481,9 @@ class Assembler : public AssemblerBase {
int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
int no_trampoline_pool_before_; // Block emission before this pc offset.
+ // Do not share constant pool entries.
+ int constant_pool_entry_sharing_blocked_nesting_;
+
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -1358,6 +1492,11 @@ class Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Optimizable cmpi information.
+ int optimizable_cmpi_pos_;
+ CRegister cmpi_cr_;
+
+ ConstantPoolBuilder constant_pool_builder_;
// Code emission
inline void CheckBuffer();
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index eed082d6f9..a588eb298d 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -233,7 +233,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r3);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
@@ -253,7 +253,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
@@ -263,7 +263,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push function as parameter to the runtime call.
__ Push(r4, r4);
@@ -337,6 +337,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
+ bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -354,16 +355,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Enter a construct frame.
{
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
__ AssertUndefinedOrAllocationSite(r5, r7);
__ push(r5);
}
- // Preserve the two incoming parameters on the stack.
+ // Preserve the incoming parameters on the stack.
__ SmiTag(r3);
- __ Push(r3, r4);
+ if (use_new_target) {
+ __ Push(r3, r4, r6);
+ } else {
+ __ Push(r3, r4);
+ }
Label rt_call, allocated, normal_new, count_incremented;
__ cmp(r4, r6);
@@ -439,7 +444,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// r4: constructor function
// r5: initial map
- // r6: object size (not including memento if create_memento)
+ // r6: object size (including memento if create_memento)
// r7: JSObject (not tagged)
__ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
__ mr(r8, r7);
@@ -516,7 +521,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ addi(r7, r7, Operand(kHeapObjectTag));
// Check if a non-empty properties array is needed. Continue with
- // allocated object if not fall through to runtime call if it is.
+ // allocated object if not; allocate and initialize a FixedArray if yes.
// r4: constructor function
// r7: JSObject
// r8: start of next object (not tagged)
@@ -611,7 +616,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated);
if (create_memento) {
- __ LoadP(r5, MemOperand(sp, kPointerSize * 2));
+ int offset = (use_new_target ? 3 : 2) * kPointerSize;
+ __ LoadP(r5, MemOperand(sp, offset));
__ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
__ cmp(r5, r8);
__ beq(&count_incremented);
@@ -626,40 +632,45 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
- __ Push(r7, r7);
+ // Restore the parameters.
+ if (use_new_target) {
+ __ Pop(r4, ip);
+ } else {
+ __ pop(r4);
+ }
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ LoadP(r6, MemOperand(sp));
- // Reload the number of arguments and the constructor from the stack.
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
- __ LoadP(r6, MemOperand(sp, 3 * kPointerSize));
+ // Push new.target onto the construct frame. This is stored just below the
+ // receiver on the stack.
+ if (use_new_target) {
+ __ Push(ip, r7, r7);
+ } else {
+ __ Push(r7, r7);
+ }
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Set up number of arguments for function call below
- __ SmiUntag(r3, r6);
-
// Copy arguments and receiver to the expression stack.
- // r3: number of arguments
// r4: constructor function
// r5: address of last argument (caller sp)
// r6: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: new.target (if used)
+ // sp[2/3]: number of arguments (smi-tagged)
Label loop, no_args;
- __ cmpi(r3, Operand::Zero());
- __ beq(&no_args);
+ __ SmiUntag(r3, r6, SetRC);
+ __ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ sub(sp, sp, ip);
__ mtctr(r3);
__ bind(&loop);
__ subi(ip, ip, Operand(kPointerSize));
__ LoadPX(r0, MemOperand(r5, ip));
- __ push(r0);
+ __ StorePX(r0, MemOperand(sp, ip));
__ bdnz(&loop);
__ bind(&no_args);
@@ -676,15 +687,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ // TODO(arv): Remove the "!use_new_target" before supporting optimization
+ // of functions that reference new.target
+ if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r3: result
// sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@@ -695,8 +708,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// r3: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
__ JumpIfSmi(r3, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -714,9 +727,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// r3: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+ // sp[1]: new.target (if used)
+ // sp[1/2]: number of arguments (smi-tagged)
+ int offset = (use_new_target ? 2 : 1) * kPointerSize;
+ __ LoadP(r4, MemOperand(sp, offset));
// Leave construct frame.
}
@@ -730,12 +744,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@@ -753,7 +772,7 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
CHECK(!FLAG_pretenuring_call_new);
{
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
// Smi-tagged arguments count.
__ mr(r7, r3);
@@ -788,8 +807,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ bdnz(&loop);
__ bind(&no_args);
- __ addi(r3, r3, Operand(1));
-
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@@ -815,7 +832,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// r3: result
// sp[0]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ LoadP(r4, MemOperand(sp, 0));
+ // Get arguments count, skipping over new.target.
+ __ LoadP(r4, MemOperand(sp, kPointerSize));
// Leave construct frame.
}
@@ -962,7 +980,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push function as parameter to the runtime call.
__ Push(r4, r4);
@@ -1080,7 +1098,7 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
@@ -1109,7 +1127,7 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
__ push(r3);
@@ -1157,7 +1175,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r3);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
@@ -1176,8 +1194,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
{
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
+ }
+
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ LoadP(r4, FieldMemOperand(
@@ -1202,7 +1225,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ cmpl(sp, ip);
__ bge(&ok);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
@@ -1293,7 +1316,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r3);
__ Push(r3, r5);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -1397,7 +1420,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 5b. Get the code to call from the function and check that the number of
// expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
+ // (tail-call) to the code in register ip without checking arguments.
// r3: actual number of arguments
// r4: function
__ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
@@ -1422,6 +1445,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
const int limitOffset) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
// Copy all arguments from the array to the stack.
Label entry, loop;
@@ -1431,7 +1456,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ LoadP(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
+ FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
+ Handle<TypeFeedbackVector> feedback_vector =
+ masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
+ int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadSmiLiteral(slot, Smi::FromInt(index));
+ __ Move(vector, feedback_vector);
+ Handle<Code> ic =
+ KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
// Push the nth argument.
@@ -1460,7 +1492,7 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kStackSize = kFormalParameters + 1;
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
@@ -1589,7 +1621,7 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kStackSize = kFormalParameters + 1;
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
@@ -1689,7 +1721,11 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ mflr(r0);
__ push(r0);
- __ Push(fp, r7, r4, r3);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ Push(fp, kConstantPoolRegister, r7, r4, r3);
+ } else {
+ __ Push(fp, r7, r4, r3);
+ }
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize));
}
@@ -1763,6 +1799,38 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
+
+ // If the function is strong we need to throw an error.
+ Label no_strong_error;
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(r8, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBit(r8,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kStrongModeFunction,
+#else
+ SharedFunctionInfo::kStrongModeFunction + kSmiTagSize,
+#endif
+ r0);
+ __ beq(&no_strong_error, cr0);
+
+ // What we really care about is the required number of arguments.
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kLengthOffset));
+#if V8_TARGET_ARCH_PPC64
+ // See commment near kLenghtOffset in src/objects.h
+ __ srawi(r7, r7, kSmiTagSize);
+#else
+ __ SmiUntag(r7);
+#endif
+ __ cmp(r3, r7);
+ __ bge(&no_strong_error);
+
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ }
+
+ __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into r0 and copy end address is fp.
@@ -1834,7 +1902,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 2b848b29f3..cd7d30b1c6 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -94,7 +94,7 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond);
+ Condition cond, Strength strength);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
Register rhs, Label* lhs_not_nan,
Label* slow, bool strict);
@@ -108,15 +108,15 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- r3.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ r3.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor.GetEnvironmentParameterRegister(i));
+ __ push(descriptor.GetRegisterParameter(i));
}
__ CallExternalReference(miss, param_count);
}
@@ -249,7 +249,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond) {
+ Condition cond, Strength strength) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r3, r4);
@@ -260,10 +260,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
+ // Call runtime on identical JSObjects.
__ CompareObjectType(r3, r7, r7, FIRST_SPEC_OBJECT_TYPE);
__ bge(slow);
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
__ beq(slow);
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics, since
+ // we need to throw a TypeError. Smis have already been ruled out.
+ __ cmpi(r7, Operand(HEAP_NUMBER_TYPE));
+ __ beq(&return_equal);
+ __ andi(r0, r7, Operand(kIsNotStringMask));
+ __ bne(slow, cr0);
+ }
} else {
__ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
__ beq(&heap_number);
@@ -271,8 +281,16 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
if (cond != eq) {
__ cmpi(r7, Operand(FIRST_SPEC_OBJECT_TYPE));
__ bge(slow);
+ // Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
__ beq(slow);
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics,
+ // since we need to throw a TypeError. Smis and heap numbers have
+ // already been ruled out.
+ __ andi(r0, r7, Operand(kIsNotStringMask));
+ __ bne(slow, cr0);
+ }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -576,7 +594,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
+ EmitIdenticalObjectComparison(masm, &slow, cc, strength());
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -687,7 +705,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
- native = Builtins::COMPARE;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if (cc == lt || cc == le) {
ncr = GREATER;
@@ -1164,11 +1183,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&skip);
// Compute the handler entry address and jump to it.
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ mov(r4, Operand(pending_handler_code_address));
__ LoadP(r4, MemOperand(r4));
__ mov(r5, Operand(pending_handler_offset_address));
__ LoadP(r5, MemOperand(r5));
__ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r4);
+ }
__ add(ip, r4, r5);
__ Jump(ip);
}
@@ -1210,6 +1233,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r7: argv
__ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ push(r0);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ li(kConstantPoolRegister, Operand::Zero());
+ __ push(kConstantPoolRegister);
+ }
int marker = type();
__ LoadSmiLiteral(r0, Smi::FromInt(marker));
__ push(r0);
@@ -1352,15 +1379,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Register map = r6; // Map of the object.
const Register function = r4; // Function (rhs).
const Register prototype = r7; // Prototype of the function.
- const Register inline_site = r9;
+ // The map_check_delta was stored in r8
+ // The bool_load_delta was stored in r9
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register map_check_delta = r8;
+ const Register bool_load_delta = r9;
+ const Register inline_site = r10;
const Register scratch = r5;
Register scratch3 = no_reg;
-
- // delta = mov + tagged LoadP + cmp + bne
- const int32_t kDeltaToLoadBoolResult =
- (Assembler::kMovInstructions + Assembler::kTaggedLoadInstructions + 2) *
- Assembler::kInstrSize;
-
Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) {
@@ -1402,17 +1428,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
DCHECK(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
- // The offset was stored in r8
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- const Register offset = r8;
+ const Register offset = map_check_delta;
__ mflr(inline_site);
__ sub(inline_site, inline_site, offset);
- // Get the map location in r8 and patch it.
+ // Get the map location in offset and patch it.
__ GetRelocatedValue(inline_site, offset, scratch);
__ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0);
- __ mr(r10, map);
- __ RecordWriteField(offset, Cell::kValueOffset, r10, function,
+ __ mr(r11, map);
+ __ RecordWriteField(offset, Cell::kValueOffset, r11, function,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
@@ -1447,7 +1471,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} else {
// Patch the call site to return true.
__ LoadRoot(r3, Heap::kTrueValueRootIndex);
- __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ __ add(inline_site, inline_site, bool_load_delta);
// Get the boolean result location in scratch and patch it.
__ SetRelocatedValue(inline_site, scratch, r3);
@@ -1467,7 +1491,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} else {
// Patch the call site to return false.
__ LoadRoot(r3, Heap::kFalseValueRootIndex);
- __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ __ add(inline_site, inline_site, bool_load_delta);
// Get the boolean result location in scratch and patch it.
__ SetRelocatedValue(inline_site, scratch, r3);
@@ -1524,7 +1548,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r3, r4);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
@@ -1556,9 +1580,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(r7, r8, VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister()));
+ DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7,
r8, &miss);
@@ -1577,9 +1600,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register scratch = r8;
Register result = r3;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!FLAG_vector_ics ||
- (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
- result.is(VectorLoadICDescriptor::SlotRegister())));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+ result.is(LoadWithVectorDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@@ -1603,7 +1625,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@@ -1664,8 +1685,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[1] : receiver displacement
// sp[2] : function
- CHECK(!has_new_target());
-
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1696,8 +1715,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r9 : allocated object (tagged)
// r11 : mapped parameter count (tagged)
- CHECK(!has_new_target());
-
__ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
// r4 = parameter count (tagged)
@@ -1775,7 +1792,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kNormalOffset =
Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
const int kAliasedOffset =
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ LoadP(r7,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -1980,14 +1997,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- if (has_new_target()) {
- __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
- Label skip_decrement;
- __ beq(&skip_decrement);
- // Subtract 1 from smi-tagged arguments count.
- __ SubSmiLiteral(r4, r4, Smi::FromInt(1), r0);
- __ bind(&skip_decrement);
- }
__ StoreP(r4, MemOperand(sp, 0));
__ SmiToPtrArrayOffset(r6, r4);
__ add(r6, r5, r6);
@@ -2076,9 +2085,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry.
- // sp[0] : index of rest parameter
- // sp[4] : number of parameters
- // sp[8] : receiver displacement
+ // sp[0] : language mode
+ // sp[4] : index of rest parameter
+ // sp[8] : number of parameters
+ // sp[12] : receiver displacement
Label runtime;
__ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2088,14 +2098,14 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
+ __ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
__ SmiToPtrArrayOffset(r6, r4);
__ add(r6, r5, r6);
__ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
- __ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r6, MemOperand(sp, 3 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+ __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@@ -2536,7 +2546,7 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r5 : Feedback vector
// r6 : slot in feedback vector (Smi)
// r4 : the function to call
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r3);
@@ -2700,7 +2710,7 @@ static void EmitSlowCase(MacroAssembler* masm, int argc, Label* non_function) {
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(r4, r6);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ pop(r4);
@@ -2871,15 +2881,21 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ bne(&miss);
__ mov(r3, Operand(arg_count()));
- __ SmiToPtrArrayOffset(r7, r6);
- __ add(r7, r5, r7);
- __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+ __ SmiToPtrArrayOffset(r9, r6);
+ __ add(r9, r5, r9);
+ __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize));
// Verify that r7 contains an AllocationSite
__ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
__ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
__ bne(&miss);
+ // Increment the call count for monomorphic function calls.
+ const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+ __ LoadP(r6, FieldMemOperand(r9, count_offset));
+ __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+ __ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
+
__ mr(r5, r7);
__ mr(r6, r4);
ArrayConstructorStub stub(masm->isolate(), arg_count());
@@ -2911,9 +2927,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
ParameterCount actual(argc);
// The checks. First, does r4 match the recorded monomorphic target?
- __ SmiToPtrArrayOffset(r7, r6);
- __ add(r7, r5, r7);
- __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+ __ SmiToPtrArrayOffset(r9, r6);
+ __ add(r9, r5, r9);
+ __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize));
// We don't know that we have a weak cell. We might have a private symbol
// or an AllocationSite, but the memory is safe to examine.
@@ -2937,6 +2953,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(r4, &extra_checks_or_miss);
+ // Increment the call count for monomorphic function calls.
+ const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+ __ LoadP(r6, FieldMemOperand(r9, count_offset));
+ __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+ __ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2980,10 +3002,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AssertNotSmi(r7);
__ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
__ bne(&miss);
- __ SmiToPtrArrayOffset(r7, r6);
- __ add(r7, r5, r7);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+ __ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
// We have to update statistics for runtime profiling.
__ LoadP(r7, FieldMemOperand(r5, with_types_offset));
__ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
@@ -3013,12 +3033,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
__ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+ // Initialize the call counter.
+ __ LoadSmiLiteral(r8, Smi::FromInt(CallICNexus::kCallCountIncrement));
+ __ StoreP(r8, FieldMemOperand(r9, count_offset), r0);
+
// Store the function. Use a stub since we need a frame for allocation.
// r5 - vector
// r6 - slot
// r4 - function
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
__ Push(r4);
__ CallStub(&create_stub);
@@ -3046,7 +3070,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push the function and feedback info.
__ Push(r4, r5, r6);
@@ -3107,9 +3131,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Push(VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else {
// index_ is consumed by runtime conversion function.
__ Push(object_, index_);
@@ -3124,9 +3148,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r3);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Pop(VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister(), object_);
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(LoadWithVectorDescriptor::VectorRegister(),
+ LoadWithVectorDescriptor::SlotRegister(), object_);
} else {
__ pop(object_);
}
@@ -3780,7 +3804,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -4015,7 +4039,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4, r3);
__ Push(r4, r3);
__ LoadSmiLiteral(r0, Smi::FromInt(op()));
@@ -4578,15 +4602,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawLoadStub stub(isolate(), state());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawKeyedLoadStub stub(isolate());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4605,12 +4629,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
-void VectorRawLoadStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4710,11 +4732,11 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
-void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r4
- Register name = VectorLoadICDescriptor::NameRegister(); // r5
- Register vector = VectorLoadICDescriptor::VectorRegister(); // r6
- Register slot = VectorLoadICDescriptor::SlotRegister(); // r3
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r4
+ Register name = LoadWithVectorDescriptor::NameRegister(); // r5
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // r6
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // r3
Register feedback = r7;
Register receiver_map = r8;
Register scratch1 = r9;
@@ -4757,21 +4779,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
-void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r4
- Register key = VectorLoadICDescriptor::NameRegister(); // r5
- Register vector = VectorLoadICDescriptor::VectorRegister(); // r6
- Register slot = VectorLoadICDescriptor::SlotRegister(); // r3
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r4
+ Register key = LoadWithVectorDescriptor::NameRegister(); // r5
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // r6
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // r3
Register feedback = r7;
Register receiver_map = r8;
Register scratch1 = r9;
@@ -4805,7 +4827,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&try_poly_name);
Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
@@ -4830,6 +4852,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ StoreIC::GenerateMiss(masm);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
@@ -5592,7 +5666,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index c0398aebed..aae38f4724 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -677,7 +677,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
}
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/constants-ppc.cc b/deps/v8/src/ppc/constants-ppc.cc
index f32f25a258..f019089eca 100644
--- a/deps/v8/src/ppc/constants-ppc.cc
+++ b/deps/v8/src/ppc/constants-ppc.cc
@@ -85,7 +85,7 @@ int Registers::Number(const char* name) {
// No register with the requested name found.
return kNoRegister;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 58d4430019..6960a7aa1e 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -17,6 +17,11 @@ const int kNumFPRegisters = kNumFPDoubleRegisters;
const int kNoRegister = -1;
+// Used in embedded constant pool builder - max reach in bits for
+// various load instructions (one less due to unsigned)
+const int kLoadPtrMaxReachBits = 15;
+const int kLoadDoubleMaxReachBits = 15;
+
// sign-extend the least significant 16-bits of value <imm>
#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
@@ -331,7 +336,8 @@ enum {
kBOfieldMask = 0x1f << 21,
kOpcodeMask = 0x3f << 26,
kExt1OpcodeMask = 0x3ff << 1,
- kExt2OpcodeMask = 0x1f << 1,
+ kExt2OpcodeMask = 0x3ff << 1,
+ kExt2OpcodeVariant2Mask = 0x1ff << 2,
kExt5OpcodeMask = 0x3 << 2,
kBOMask = 0x1f << 21,
kBIMask = 0x1F << 16,
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/ppc/cpu-ppc.cc
index d42420cde1..1a9390b333 100644
--- a/deps/v8/src/ppc/cpu-ppc.cc
+++ b/deps/v8/src/ppc/cpu-ppc.cc
@@ -57,7 +57,7 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) {
#endif // USE_SIMULATOR
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/debug-ppc.cc b/deps/v8/src/ppc/debug-ppc.cc
index f59f6371de..9e734452b8 100644
--- a/deps/v8/src/ppc/debug-ppc.cc
+++ b/deps/v8/src/ppc/debug-ppc.cc
@@ -74,7 +74,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
__ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
@@ -157,53 +157,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC load (from ic-ppc.cc).
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- RegList regs = receiver.bit() | name.bit();
- if (FLAG_vector_ics) {
- regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
- }
- Generate_DebugBreakCallHelper(masm, regs, 0);
-}
-
-
-void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-ppc.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit() | value.bit(),
- 0);
-}
-
-
-void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for keyed IC load (from ic-ppc.cc).
- GenerateLoadICDebugBreak(masm);
-}
-
-
-void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC keyed store call (from ic-ppc.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit() | value.bit(),
- 0);
-}
-
-
-void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
- // Register state for CompareNil IC
- // ----------- S t a t e -------------
- // -- r3 : value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r3.bit(), 0);
-}
-
-
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r3 is TOS which
// is an object - this is not generally the case so this should be used with
@@ -302,7 +255,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 74c88e37a7..0db074d694 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -356,11 +356,11 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- // No out-of-line constant pool support.
- UNREACHABLE();
+ DCHECK(FLAG_enable_embedded_constant_pool);
+ SetFrameSlot(offset, value);
}
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 2486741350..02ef88bc19 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -1292,8 +1292,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
return Instruction::kInstrSize;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
//------------------------------------------------------------------------------
diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frames-ppc.cc
index 00af7c9b01..1e54c46963 100644
--- a/deps/v8/src/ppc/frames-ppc.cc
+++ b/deps/v8/src/ppc/frames-ppc.cc
@@ -21,24 +21,18 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
+ DCHECK(FLAG_enable_embedded_constant_pool);
+ return kConstantPoolRegister;
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
+ DCHECK(FLAG_enable_embedded_constant_pool);
+ return kConstantPoolRegister;
}
-
-
-Object*& ExitFrame::constant_pool_slot() const {
- UNREACHABLE();
- return Memory::Object_at(NULL);
-}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/frames-ppc.h b/deps/v8/src/ppc/frames-ppc.h
index 40a68b3a37..0357731b4b 100644
--- a/deps/v8/src/ppc/frames-ppc.h
+++ b/deps/v8/src/ppc/frames-ppc.h
@@ -116,8 +116,11 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- static const int kFrameSize = 2 * kPointerSize;
- static const int kConstantPoolOffset = 0; // Not used.
+ static const int kFrameSize =
+ FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
+
+ static const int kConstantPoolOffset =
+ FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
@@ -145,36 +148,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kImplicitReceiverOffset = -6 * kPointerSize;
- static const int kConstructorOffset = -5 * kPointerSize;
- static const int kLengthOffset = -4 * kPointerSize;
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
diff --git a/deps/v8/src/ppc/full-codegen-ppc.cc b/deps/v8/src/ppc/full-codegen-ppc.cc
index 39d6dae918..ec94a242b4 100644
--- a/deps/v8/src/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/ppc/full-codegen-ppc.cc
@@ -102,10 +102,6 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-ppc.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
- handler_table_ =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForRange(function()->handler_count()), TENURED));
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -124,7 +120,7 @@ void FullCodeGenerator::Generate() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis()) {
+ info->MayUseThis() && info->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset), r0);
@@ -196,17 +192,17 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info->scope()->num_heap_slots() > 0) {
// Argument to NewContext is the function, which is still in r4.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
__ push(r4);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -221,8 +217,9 @@ void FullCodeGenerator::Generate() {
__ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -246,10 +243,49 @@ void FullCodeGenerator::Generate() {
}
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register) {
+ __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep is marked as such.
+ }
+ SetVar(this_function_var, r4, r3, r5);
+ }
+
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+
+ // Get the frame pointer for the calling frame.
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ Label skip;
+ __ bne(&skip);
+ __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&skip);
+
+ // Check the marker in the calling frame.
+ __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+ __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::CONSTRUCT), r0);
+ Label non_construct_frame, done;
+
+ __ bne(&non_construct_frame);
+ __ LoadP(r3, MemOperand(
+ r5, ConstructFrameConstants::kOriginalConstructorOffset));
+ __ b(&done);
+
+ __ bind(&non_construct_frame);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+
+ SetVar(new_target_var, r3, r5, r6);
+ }
// Possibly allocate RestParameters
int rest_index;
@@ -259,15 +295,12 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
- --num_parameters;
- ++rest_index;
- }
__ addi(r6, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r5, Operand(Smi::FromInt(num_parameters)));
- __ mov(r4, Operand(Smi::FromInt(rest_index)));
- __ Push(r6, r5, r4);
+ __ LoadSmiLiteral(r5, Smi::FromInt(num_parameters));
+ __ LoadSmiLiteral(r4, Smi::FromInt(rest_index));
+ __ LoadSmiLiteral(r3, Smi::FromInt(language_mode()));
+ __ Push(r6, r5, r4, r3);
RestParamAccessStub stub(isolate());
__ CallStub(&stub);
@@ -304,7 +337,7 @@ void FullCodeGenerator::Generate() {
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type, has_new_target);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, r3, r4, r5);
@@ -330,7 +363,7 @@ void FullCodeGenerator::Generate() {
VariableDeclaration* function = scope()->function();
DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
@@ -362,6 +395,10 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
+
+ if (HasStackOverflow()) {
+ masm_->AbortConstantPoolBuilding();
+ }
}
@@ -402,6 +439,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterDecrement(weight);
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Assembler::BlockConstantPoolEntrySharingScope prevent_entry_sharing(masm_);
// BackEdgeTable::PatchAt manipulates this sequence.
__ cmpi(r6, Operand::Zero());
__ bc_short(ge, &ok);
@@ -463,18 +501,15 @@ void FullCodeGenerator::EmitReturnSequence() {
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
int32_t arg_count = info_->scope()->num_parameters() + 1;
- if (IsSubclassConstructor(info_->function()->kind())) {
- arg_count++;
- }
int32_t sp_delta = arg_count * kPointerSize;
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ SetReturnPosition(function());
__ RecordJSReturn();
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
#if V8_TARGET_ARCH_PPC64
// With 64bit we may need nop() instructions to ensure we have
// enough space to SetDebugBreakAtReturn()
if (is_int16(sp_delta)) {
- masm_->nop();
+ if (!FLAG_enable_embedded_constant_pool) masm_->nop();
masm_->nop();
}
#endif
@@ -815,7 +850,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
@@ -823,8 +859,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
zone());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -832,7 +868,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
@@ -843,7 +879,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(r5, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
@@ -874,25 +910,26 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ StoreP(result_register(), StackOperand(variable));
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
@@ -906,7 +943,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ mov(r5, Operand(variable->name()));
__ LoadSmiLiteral(r4, Smi::FromInt(NONE));
@@ -924,20 +961,21 @@ void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
// TODO(rossberg)
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::LOOKUP:
UNREACHABLE();
}
}
@@ -1015,9 +1053,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ SetExpressionPosition(clause);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
+ strength(language_mode())).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1063,8 +1101,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt, SKIP_BREAK);
+
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- SetStatementPosition(stmt);
Label loop, exit;
ForIn loop_statement(this, stmt);
@@ -1072,7 +1111,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionPosition(stmt->enumerable());
+ SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r3, ip);
@@ -1178,7 +1217,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- SetExpressionPosition(stmt->each());
+ SetExpressionAsStatementPosition(stmt->each());
// Load the current count to r3, load the length to r4.
__ LoadP(r3, MemOperand(sp, 0 * kPointerSize));
@@ -1213,10 +1252,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(r4, r6); // Enumerable and current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kForInFilter, 2);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mr(r6, r3);
- __ cmpi(r6, Operand::Zero());
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ cmp(r3, r0);
__ beq(loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
@@ -1226,7 +1266,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{
EffectContext context(this);
- EmitAssignment(stmt->each());
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
@@ -1284,39 +1324,16 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
-void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
- Comment cnmt(masm_, "[ SuperReference ");
-
- __ LoadP(LoadDescriptor::ReceiverRegister(),
- MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
- __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
-
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
- }
-
- __ Cmpi(r3, Operand(isolate()->factory()->undefined_value()), r0);
- Label done;
- __ bne(&done);
- __ CallRuntime(Runtime::kThrowNonMethodError, 0);
- __ bind(&done);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset) {
+ int offset,
+ FeedbackVectorICSlot slot) {
if (NeedsHomeObject(initializer)) {
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
}
}
@@ -1370,16 +1387,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
-
- ContextualMode mode =
- (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL : CONTEXTUAL;
- CallLoadIC(mode);
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_state);
}
@@ -1447,30 +1457,43 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ // Inside typeof use a regular load, not a contextual load, to avoid
+ // a reference error.
+ CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
// Record position before possible IC call.
- SetSourcePosition(proxy->position());
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- CallGlobalLoadIC(var->name());
+ EmitGlobalVariableLoad(proxy, typeof_state);
context()->Plug(r3);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1539,16 +1562,20 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
__ bind(&slow);
__ mov(r4, Operand(var->name()));
__ Push(cp, r4); // Context and name.
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Runtime::FunctionId function_id =
+ typeof_state == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ __ CallRuntime(function_id, 2);
__ bind(&done);
context()->Plug(r3);
}
@@ -1619,7 +1646,6 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
@@ -1640,13 +1666,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// result_saved is false the result is in r3.
bool result_saved = false;
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
AccessorTable accessor_table(zone());
int property_index = 0;
+ // store_slot_index points to the vector IC slot for the next store IC used.
+ // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
+ // and must be updated if the number of store ICs emitted here changes.
+ int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1673,7 +1698,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(r3));
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- CallStoreIC(key->LiteralFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ CallStoreIC();
+ } else {
+ CallStoreIC(key->LiteralFeedbackId());
+ }
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1681,6 +1711,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(), MemOperand(sp));
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ }
CallStoreIC();
}
} else {
@@ -1694,7 +1727,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
__ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
__ push(r3);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1731,9 +1765,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r3);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(it->second->getter, 2);
+ EmitSetHomeObjectIfNeeded(
+ it->second->getter, 2,
+ expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(it->second->setter, 3);
+ EmitSetHomeObjectIfNeeded(
+ it->second->setter, 3,
+ expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
__ push(r3);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1768,7 +1806,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1814,6 +1853,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(r3);
}
+
+ // Verify that compilation exactly consumed the number of store ic slots that
+ // the ObjectLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1854,8 +1897,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ if (subexpr->IsSpread()) break;
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1868,7 +1913,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ LoadP(r8, MemOperand(sp, kPointerSize)); // Copy of array literal.
__ LoadP(r4, FieldMemOperand(r8, JSObject::kElementsOffset));
__ StoreP(result_register(), FieldMemOperand(r4, offset), r0);
@@ -1877,16 +1922,41 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
kDontSaveFPRegs, EMIT_REMEMBERED_SET,
INLINE_SMI_CHECK);
} else {
- __ LoadSmiLiteral(r6, Smi::FromInt(i));
+ __ LoadSmiLiteral(r6, Smi::FromInt(array_index));
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ __ Drop(1); // literal index
+ __ Pop(r3);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ __ Push(r3);
+ if (subexpr->IsSpread()) {
+ VisitForStackValue(subexpr->AsSpread()->expression());
+ __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ } else {
+ VisitForStackValue(subexpr);
+ __ CallRuntime(Runtime::kAppendElement, 2);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ pop(); // literal index
+ __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(r3);
@@ -1898,9 +1968,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = GetAssignType(property);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1917,8 +1988,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case NAMED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch = r4;
@@ -1928,9 +2001,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case KEYED_SUPER_PROPERTY: {
const Register scratch = r4;
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
- __ Move(scratch, result_register());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
+ __ mr(scratch, result_register());
VisitForAccumulatorValue(property->key());
__ Push(scratch, result_register());
if (expr->is_compound()) {
@@ -1987,7 +2062,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(r3); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(), op, expr->target(),
@@ -2002,14 +2076,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
}
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
+ expr->op(), expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r3);
break;
@@ -2033,6 +2106,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
@@ -2117,7 +2192,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(r3); // result
- EnterTryBlock(expr->index(), &l_catch);
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(r3); // result
__ b(&l_suspend);
@@ -2127,7 +2203,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
const int generator_object_depth = kPointerSize + try_block_size;
__ LoadP(r3, MemOperand(sp, generator_object_depth));
__ push(r3); // g
- __ Push(Smi::FromInt(expr->index())); // handler-index
+ __ Push(Smi::FromInt(handler_index)); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ LoadSmiLiteral(r4, Smi::FromInt(l_continuation.pos()));
__ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
@@ -2141,7 +2217,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(r3); // result
EmitReturnSequence();
__ bind(&l_resume); // received in r3
- ExitTryBlock(expr->index());
+ ExitTryBlock(handler_index);
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2154,11 +2230,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ LoadP(load_receiver, MemOperand(sp, kPointerSize));
__ LoadP(load_name, MemOperand(sp, 2 * kPointerSize));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- }
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
CallIC(ic, TypeFeedbackId::None());
__ mr(r4, r3);
__ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
@@ -2173,10 +2247,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2186,10 +2258,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // r3=result.value
context()->DropAndPlug(2, r3); // drop iter and g
break;
@@ -2263,6 +2333,10 @@ void FullCodeGenerator::EmitGeneratorResume(
__ bne(&slow_resume, cr0);
__ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset));
{
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
+ }
__ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
__ SmiUntag(r5);
__ add(ip, ip, r5);
@@ -2340,51 +2414,44 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!prop->IsSuperAccess());
__ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL, language_mode());
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ Push(key->value());
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
- } else {
- CallIC(ic, prop->PropertyFeedbackId());
- }
+ SetExpressionPosition(prop);
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallIC(ic);
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
- SetSourcePosition(prop->position());
-
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ SetExpressionPosition(prop);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
}
@@ -2409,8 +2476,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ b(&done);
@@ -2514,7 +2581,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
+ int* used_store_slots) {
// Constructor is in r3.
DCHECK(lit != NULL);
__ push(r3);
@@ -2548,7 +2616,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(value, 2,
+ lit->SlotForHomeObject(value, used_store_slots));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -2586,8 +2655,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(r4);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2595,17 +2664,18 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorICSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
Property* prop = expr->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
break;
}
case NAMED_PROPERTY: {
@@ -2615,13 +2685,15 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ Push(r3);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
// stack: value, this; r3: home_object
Register scratch = r5;
Register scratch2 = r6;
@@ -2636,9 +2708,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
case KEYED_SUPER_PROPERTY: {
__ Push(r3);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
Register scratch = r5;
Register scratch2 = r6;
@@ -2661,6 +2733,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Move(StoreDescriptor::NameRegister(), r3);
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2684,11 +2757,13 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
- if (var->IsUnallocated()) {
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorICSlot slot) {
+ if (var->IsUnallocatedOrGlobalSlot()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
} else if (var->mode() == LET && op != Token::INIT_LET) {
@@ -2779,12 +2854,15 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r3);
@@ -2823,15 +2901,17 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r3));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r3);
@@ -2840,6 +2920,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
Expression* key = expr->key();
if (key->IsPropertyName()) {
@@ -2848,9 +2930,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Move(LoadDescriptor::ReceiverRegister(), r3);
EmitNamedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
EmitNamedSuperPropertyLoad(expr);
}
} else {
@@ -2861,9 +2943,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
@@ -2919,18 +3001,19 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
+ SetExpressionPosition(prop);
- SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
const Register scratch = r4;
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
__ mr(scratch, r3);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(scratch, r3, r3, scratch);
__ Push(key->value());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2938,7 +3021,8 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2979,18 +3063,16 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
// Load the function from the receiver.
const Register scratch = r4;
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ Push(r3);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
+ __ mr(scratch, r3);
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(r3);
- __ Push(r3);
- __ LoadP(scratch, MemOperand(sp, kPointerSize * 2));
- __ Push(scratch);
+ __ Push(scratch, r3, r3, scratch);
VisitForStackValue(prop->key());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2998,7 +3080,8 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -3014,15 +3097,11 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- {
- PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
}
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
@@ -3038,19 +3117,15 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // r8: copy of the first argument or undefined if it doesn't exist.
+ // r7: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ LoadP(r8, MemOperand(sp, arg_count * kPointerSize), r0);
+ __ LoadP(r7, MemOperand(sp, arg_count * kPointerSize), r0);
} else {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
}
- // r7: the receiver of the enclosing function.
- __ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
// r6: the receiver of the enclosing function.
- int receiver_offset = 2 + info_->scope()->num_parameters();
- __ LoadP(r6, MemOperand(fp, receiver_offset * kPointerSize), r0);
+ __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// r5: language mode.
__ LoadSmiLiteral(r5, Smi::FromInt(language_mode()));
@@ -3059,20 +3134,13 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ Push(r8, r7, r6, r5, r4);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::EmitLoadSuperConstructor() {
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(r3);
- __ CallRuntime(Runtime::kGetPrototype, 1);
+ __ Push(r7, r6, r5, r4);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperReference* super_ref) {
+ SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
Variable* this_var = super_ref->this_var()->var();
GetVar(r4, this_var);
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
@@ -3083,7 +3151,51 @@ void FullCodeGenerator::EmitInitializeThisAfterSuper(
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
- EmitVariableAssignment(this_var, Token::INIT_CONST);
+ EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
+}
+
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in r3) and
+ // the object holding it (returned in r4).
+ DCHECK(!context_register().is(r5));
+ __ mov(r5, Operand(callee->name()));
+ __ Push(context_register(), r5);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ Push(r3, r4); // Function, receiver.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the function
+ // and receiver and have the slow path jump around this code.
+ if (done.is_linked()) {
+ Label call;
+ __ b(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(r3);
+ // Pass undefined as the receiver, which is the WithBaseObject of a
+ // non-object environment record. If the callee is sloppy, it will patch
+ // it up to be the global receiver.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ push(r4);
+ __ bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ push(r5); // Reserved receiver slot.
+ }
}
@@ -3100,39 +3212,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- {
- PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ push(r5); // Reserved receiver slot.
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ push(r4);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // The runtime call returns a pair of values in r3 (function) and
- // r4 (receiver). Touch up the stack with the right values.
- __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ StoreP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ push(r4);
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
- }
+ // Touch up the stack with the resolved function.
+ __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ CallStub(&stub);
@@ -3145,44 +3249,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
- VariableProxy* proxy = callee->AsVariableProxy();
- Label slow, done;
-
- {
- PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
- }
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in r3)
- // and the object holding it (returned in edx).
- DCHECK(!context_register().is(r5));
- __ mov(r5, Operand(proxy->name()));
- __ Push(context_register(), r5);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ Push(r3, r4); // Function, receiver.
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ b(&call);
- __ bind(&done);
- // Push function.
- __ push(r3);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ push(r4);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found
- // by LoadContextSlot.
+ PushCalleeAndWithBaseObject(expr);
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
@@ -3194,10 +3261,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
- {
- PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
+ VisitForStackValue(property->obj());
if (is_named_call) {
EmitCallWithLoadIC(expr);
} else {
@@ -3209,10 +3273,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
- {
- PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
+ VisitForStackValue(callee);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ push(r4);
// Emit function call.
@@ -3235,7 +3296,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- DCHECK(!expr->expression()->IsSuperReference());
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
@@ -3247,7 +3308,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into r4 and r3.
__ mov(r3, Operand(arg_count));
@@ -3271,11 +3332,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
+
+ VariableProxy* new_target_proxy = super_call_ref->new_target_var();
+ VisitForStackValue(new_target_proxy);
- EmitLoadSuperConstructor();
+ EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
// Push the arguments ("left-to-right") on the stack.
@@ -3287,7 +3351,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into r1 and r0.
__ mov(r3, Operand(arg_count));
@@ -3313,7 +3377,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
+ EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(r3);
}
@@ -3611,6 +3675,28 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r4, JS_TYPED_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3720,7 +3806,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
- // ArgumentsAccessStub expects the key in edx and the formal
+ // ArgumentsAccessStub expects the key in r4 and the formal
// parameter count in r3.
VisitForAccumulatorValue(args->at(0));
__ mr(r4, r3);
@@ -3859,6 +3945,28 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = nullptr;
+ Label* if_false = nullptr;
+ Label* fall_through = nullptr;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r4, JS_DATE_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3867,20 +3975,15 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done, not_date_object;
Register object = r3;
Register result = r3;
Register scratch0 = r11;
Register scratch1 = r4;
- __ JumpIfSmi(object, &not_date_object);
- __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
- __ bne(&not_date_object);
-
if (index->value() == 0) {
__ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ b(&done);
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch1, Operand(stamp));
@@ -3898,13 +4001,10 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ PrepareCallCFunction(2, scratch1);
__ LoadSmiLiteral(r4, index);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ b(&done);
+ __ bind(&done);
}
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(r3);
+ context()->Plug(result);
}
@@ -4183,11 +4283,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+
+ // new.target
+ VisitForStackValue(args->at(0));
- EmitLoadSuperConstructor();
+ // .this_function
+ VisitForStackValue(args->at(1));
+ __ CallRuntime(Runtime::kGetPrototype, 1);
__ mr(r4, result_register());
__ Push(r4);
@@ -4208,9 +4312,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(r3);
- // Subtract 1 from arguments count, for new.target.
- __ subi(r3, r3, Operand(1));
-
// Get arguments pointer in r5.
__ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
__ add(r5, r5, r0);
@@ -4601,11 +4702,14 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr === CallRuntime("ReflectConstruct")
+ DCHECK_EQ(1, expr->arguments()->length());
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
+
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
- SuperReference* super_reference = args->at(0)->AsSuperReference();
+ SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
@@ -4615,8 +4719,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ push(r0);
__ StoreP(r3, MemOperand(sp, kPointerSize));
- // Push super
- EmitLoadSuperConstructor();
+ // Push super constructor
+ EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
// Push arguments array
@@ -4632,7 +4736,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r3);
- EmitInitializeThisAfterSuper(super_reference);
+ // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
+ EmitInitializeThisAfterSuper(super_call_ref);
}
@@ -4645,13 +4750,9 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Load the function from the receiver.
__ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
}
@@ -4659,8 +4760,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ CallStub(&stub);
@@ -4685,6 +4785,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
@@ -4710,6 +4811,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime function.
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
context()->Plug(r3);
}
@@ -4734,10 +4836,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
context()->Plug(r3);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- DCHECK(is_sloppy(language_mode()) || var->is_this());
- if (var->IsUnallocated()) {
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
__ LoadP(r5, GlobalObjectOperand());
__ mov(r4, Operand(var->name()));
__ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY));
@@ -4747,7 +4850,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- context()->Plug(var->is_this());
+ context()->Plug(is_this);
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
@@ -4831,10 +4934,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4857,8 +4959,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
const Register scratch = r4;
__ LoadP(scratch, MemOperand(sp, kPointerSize));
@@ -4868,11 +4971,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
const Register scratch = r4;
const Register scratch1 = r5;
- __ Move(scratch, result_register());
+ __ mr(scratch, result_register());
VisitForAccumulatorValue(prop->key());
__ Push(scratch, result_register());
__ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
@@ -4949,9 +5053,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ b(&stub_call);
__ bind(&slow);
}
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ if (!is_strong(language_mode())) {
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4983,15 +5089,17 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mr(r4, r3);
__ LoadSmiLiteral(r3, Smi::FromInt(count_value));
- // Record position before stub call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), Token::ADD, language_mode()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
+ strength(language_mode())).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
+ if (is_strong(language_mode())) {
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Store the value returned in r3.
switch (assign_type) {
case VARIABLE:
@@ -4999,7 +5107,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{
EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(r3);
}
@@ -5010,7 +5118,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r3);
}
@@ -5019,7 +5127,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -5057,7 +5170,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -5072,46 +5190,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- DCHECK(!context()->IsEffect());
- DCHECK(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
- __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallLoadIC(NOT_CONTEXTUAL);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(r3);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ mov(r3, Operand(proxy->name()));
- __ Push(cp, r3);
- __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(r3);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Expression* sub_expr,
Handle<String> check) {
@@ -5192,7 +5270,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -5246,9 +5324,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic = CodeFactory::CompareIC(
+ isolate(), op, strength(language_mode())).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -5359,6 +5436,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ mov(ip, Operand(pending_message_obj));
__ LoadP(r4, MemOperand(ip));
__ push(r4);
+
+ ClearPendingMessage();
}
@@ -5384,6 +5463,23 @@ void FullCodeGenerator::ExitFinallyBlock() {
}
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(r4));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ mov(ip, Operand(pending_message_obj));
+ __ StoreP(r4, MemOperand(ip));
+}
+
+
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+ DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(slot)));
+}
+
+
#undef __
@@ -5455,6 +5551,6 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
isolate->builtins()->OsrAfterStackCheck()->entry());
return OSR_AFTER_STACK_CHECK;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index f42da49202..2d0c55fd28 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return r4; }
const Register LoadDescriptor::NameRegister() { return r5; }
+const Register LoadDescriptor::SlotRegister() { return r3; }
-const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r3; }
-
-
-const Register VectorLoadICDescriptor::VectorRegister() { return r6; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
const Register StoreDescriptor::ReceiverRegister() { return r4; }
@@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return r5; }
const Register StoreDescriptor::ValueRegister() { return r3; }
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r7; }
+
+
+const Register VectorStoreICDescriptor::VectorRegister() { return r6; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return r6; }
@@ -56,109 +60,101 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-const Register GrowArrayElementsDescriptor::CapacityRegister() { return r5; }
-void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r5};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r4};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewContextDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+void NumberToStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r6};
- data->Initialize(arraysize(registers), registers, NULL);
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r6};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowArrayDescriptor::Initialize(
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r6, r5, r4};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r6, r5, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowObjectDescriptor::Initialize(
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r6, r5, r4, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r6, r5, r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateAllocationSiteDescriptor::Initialize(
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r5, r6};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r5, r6};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r5, r6, r4};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r5, r6, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreArrayLiteralElementDescriptor::Initialize(
+void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r6, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r6, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r4};
- data->Initialize(arraysize(registers), registers, NULL);
+void CallFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackDescriptor::Initialize(
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r4, r6};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r4, r6};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r4, r6, r5};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r4, r6, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// r3 : number of arguments
// r4 : the function to call
// r5 : feedback vector
@@ -166,210 +162,182 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// vector (Smi)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {cp, r3, r4, r5};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r3, r4, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void RegExpConstructResultDescriptor::Initialize(
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r5, r4, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r5, r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void TransitionElementsKindDescriptor::Initialize(
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r3, r4};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r3, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::Initialize(
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // register state
- // cp -- context
- Register registers[] = {cp};
- data->Initialize(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-void ArrayConstructorConstantArgCountDescriptor::Initialize(
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
// r3 -- number of arguments
// r4 -- function
// r5 -- allocation site with elements kind
- Register registers[] = {cp, r4, r5};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r4, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, r4, r5, r3};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(),
- Representation::Tagged(), Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r4, r5, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
- CallInterfaceDescriptorData* data) {
+void InternalArrayConstructorConstantArgCountDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
- // cp -- context
// r3 -- number of arguments
// r4 -- constructor function
- Register registers[] = {cp, r4};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorDescriptor::Initialize(
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {cp, r4, r3};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r4, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareNilDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToBooleanDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r4, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOpWithAllocationSiteDescriptor::Initialize(
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r5, r4, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {r5, r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {cp, r4, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+void StringAddDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void KeyedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
r5, // key
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void NamedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
r5, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallHandlerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
r3, // receiver
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
r4, // JSFunction
r3, // actual number of arguments
r5, // expected number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
r3, // callee
r7, // call_data
r5, // holder
r4, // api_function_address
r6, // actual number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Integer32(), // actual number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiAccessorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- cp, // context
r3, // callee
r7, // call_data
r5, // holder
r4, // api_function_address
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void MathRoundVariantDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r4, // math rounding function
+ r6, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/ppc/lithium-codegen-ppc.cc
index 2977da5ebc..8f4cd4637a 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.cc
@@ -50,8 +50,12 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
- return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
- GenerateJumpTable() && GenerateSafepointTable();
+ bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
+ if (FLAG_enable_embedded_constant_pool && !rc) {
+ masm()->AbortConstantPoolBuilding();
+ }
+ return rc;
}
@@ -118,8 +122,8 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info_->is_native()) {
+ if (is_sloppy(info_->language_mode()) && info_->MayUseThis() &&
+ !info_->is_native() && info_->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset));
@@ -199,8 +203,9 @@ bool LCodeGen::GeneratePrologue() {
__ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -561,41 +566,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- bool has_closure_id =
- !info()->closure().is_null() &&
- !info()->closure().is_identical_to(environment->closure());
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- }
+ WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
@@ -883,26 +856,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length(); i < length; i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (auto function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
}
-
inlined_function_count_ = deoptimization_literals_.length();
}
@@ -1927,20 +1885,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
- Label runtime, done;
DCHECK(object.is(result));
DCHECK(object.is(r3));
DCHECK(!scratch.is(scratch0()));
DCHECK(!scratch.is(object));
- __ TestIfSmi(object, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
- __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
-
if (index->value() == 0) {
__ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand(stamp));
@@ -2193,8 +2146,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r3));
DCHECK(ToRegister(instr->result()).is(r3));
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), instr->op(), instr->language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2670,7 +2623,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined
__ cmpi(r3, Operand::Zero());
@@ -2859,14 +2813,17 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
+ &load_bool_);
}
LInstruction* instr() override { return instr_; }
Label* map_check() { return &map_check_; }
+ Label* load_bool() { return &load_bool_; }
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
+ Label load_bool_;
};
DeferredInstanceOfKnownGlobal* deferred;
@@ -2899,6 +2856,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(map, ip);
__ bc_short(ne, &cache_miss);
+ __ bind(deferred->load_bool()); // Label for calculating code patching.
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
@@ -2932,7 +2890,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
+ Label* map_check,
+ Label* bool_load) {
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(flags |
InstanceofStub::kArgsInRegisters);
@@ -2949,21 +2908,24 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Handle<Code> code = stub.GetCode();
- // Include instructions below in delta: bitwise_mov32 + call
- int delta = (masm_->InstructionsGeneratedSince(map_check) + 2) *
- Instruction::kInstrSize +
- masm_->CallSize(code);
- // r8 is used to communicate the offset to the location of the map check.
- if (is_int16(delta)) {
- delta -= Instruction::kInstrSize;
- __ li(r8, Operand(delta));
- } else {
- __ bitwise_mov32(r8, delta);
- }
+ // Include instructions below in delta: bitwise_mov32 + li + call
+ int additional_delta = 3 * Instruction::kInstrSize + masm_->CallSize(code);
+ // The labels must be already bound since the code has predictabel size up
+ // to the call instruction.
+ DCHECK(map_check->is_bound());
+ DCHECK(bool_load->is_bound());
+ int map_check_delta =
+ masm_->InstructionsGeneratedSince(map_check) * Instruction::kInstrSize;
+ int bool_load_delta =
+ masm_->InstructionsGeneratedSince(bool_load) * Instruction::kInstrSize;
+ // r8 is the delta from our callee's lr to the location of the map check.
+ __ bitwise_mov32(r8, map_check_delta + additional_delta);
+ // r9 is the delta from map check to bool load.
+ __ li(r9, Operand(map_check_delta - bool_load_delta));
CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(delta / Instruction::kInstrSize ==
- masm_->InstructionsGeneratedSince(map_check));
+ DCHECK_EQ((map_check_delta + additional_delta) / Instruction::kInstrSize,
+ masm_->InstructionsGeneratedSince(map_check));
}
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -2977,7 +2939,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined
__ cmpi(r3, Operand::Zero());
@@ -3046,10 +3009,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = VectorLoadICDescriptor::SlotRegister();
- DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ Register slot_register = LoadDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(r3));
AllowDeferredHandleDereference vector_structure_check;
@@ -3058,7 +3020,21 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
// No need to allocate this register.
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
- __ mov(slot_register, Operand(Smi::FromInt(index)));
+ __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
}
@@ -3069,11 +3045,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(r3));
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3188,12 +3162,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r5.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- }
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL,
- instr->hydrogen()->initialization_state()).code();
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3387,7 +3360,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3565,9 +3539,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4177,29 +4151,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
-void LCodeGen::DoTailCallThroughMegamorphicCache(
- LTailCallThroughMegamorphicCache* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register name = ToRegister(instr->name());
- DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(name.is(LoadDescriptor::NameRegister()));
- DCHECK(receiver.is(r4));
- DCHECK(name.is(r5));
- Register scratch = r7;
- Register extra = r8;
- Register extra2 = r9;
- Register extra3 = r10;
-
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(
- masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Tail call to miss if we ended up here.
- LoadIC::GenerateMiss(masm());
-}
-
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r3));
@@ -4493,10 +4444,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic =
- StoreIC::initialize_stub(isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state());
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4635,7 +4590,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4760,6 +4716,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@@ -4767,6 +4727,99 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = r3;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ b(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
+ __ ble(deferred->entry());
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
+ __ bge(deferred->entry());
+ } else {
+ __ cmpw(ToRegister(key), ToRegister(current_capacity));
+ __ bge(deferred->entry());
+ }
+
+ if (instr->elements()->IsRegister()) {
+ __ Move(result, ToRegister(instr->elements()));
+ } else {
+ __ LoadP(result, ToMemOperand(instr->elements()));
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = r3;
+ __ li(result, Operand::Zero());
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ if (instr->object()->IsRegister()) {
+ __ Move(result, ToRegister(instr->object()));
+ } else {
+ __ LoadP(result, ToMemOperand(instr->object()));
+ }
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key)));
+ } else {
+ __ SmiTag(r6, ToRegister(key));
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ __ TestIfSmi(result, r0);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+}
+
+
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Register scratch = scratch0();
@@ -6218,5 +6271,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.h b/deps/v8/src/ppc/lithium-codegen-ppc.h
index 248792f060..392bbf5872 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.h
@@ -27,7 +27,6 @@ class LCodeGen : public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@@ -108,11 +107,12 @@ class LCodeGen : public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ Label* map_check, Label* bool_load);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
Register object, Register index);
@@ -212,7 +212,6 @@ class LCodeGen : public LCodeGenBase {
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -286,10 +285,11 @@ class LCodeGen : public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
diff --git a/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc b/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
index c261b665e7..3528bf53f9 100644
--- a/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
+++ b/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
@@ -284,5 +284,5 @@ void LGapResolver::EmitMove(int index) {
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/ppc/lithium-ppc.cc
index db6cfb2051..4f15a60d5d 100644
--- a/deps/v8/src/ppc/lithium-ppc.cc
+++ b/deps/v8/src/ppc/lithium-ppc.cc
@@ -1098,10 +1098,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
ops.Add(target, zone());
- for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op =
- UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), cp);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@@ -1111,20 +1119,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
}
-LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
- HTailCallThroughMegamorphicCache* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* receiver_register =
- UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
- LOperand* name_register =
- UseFixed(instr->name(), LoadDescriptor::NameRegister());
-
- // Not marked as call. It can't deoptimize, and it never returns.
- return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
-}
-
-
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r4);
@@ -1829,7 +1823,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r3);
LDateField* result =
new (zone()) LDateField(object, FixedTemp(r4), instr->index());
- return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
+ return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -2106,7 +2100,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
new (zone()) LLoadGlobalGeneric(context, global_object, vector);
@@ -2155,7 +2149,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@@ -2227,7 +2221,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result = DefineFixed(
@@ -2289,8 +2283,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- return MarkAsCall(new (zone()) LStoreKeyedGeneric(context, obj, key, val),
- instr);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result =
+ new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+ return MarkAsCall(result, instr);
}
@@ -2322,6 +2324,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = Use(instr->object());
+ LOperand* elements = Use(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, r3);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2358,8 +2375,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
- LInstruction* result = new (zone()) LStoreNamedGeneric(context, obj, val);
+ LStoreNamedGeneric* result =
+ new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2435,7 +2459,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@@ -2550,7 +2574,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
+ chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@@ -2619,5 +2643,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
new (zone()) LAllocateBlockContext(context, function);
return MarkAsCall(DefineFixed(result, cp), instr);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/ppc/lithium-ppc.h
index 22545e5b79..853a6240d0 100644
--- a/deps/v8/src/ppc/lithium-ppc.h
+++ b/deps/v8/src/ppc/lithium-ppc.h
@@ -117,6 +117,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@@ -153,7 +154,6 @@ class LCodeGen;
V(SubI) \
V(RSubI) \
V(TaggedToI) \
- V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -464,26 +464,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
};
-class LTailCallThroughMegamorphicCache final
- : public LTemplateInstruction<0, 3, 0> {
- public:
- LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
- LOperand* name) {
- inputs_[0] = context;
- inputs_[1] = receiver;
- inputs_[2] = name;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* name() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
- "tail-call-through-megamorphic-cache")
- DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -1151,6 +1131,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ Strength strength() { return hydrogen()->strength(); }
+
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1513,7 +1495,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
+ Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@@ -1795,8 +1777,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
inputs_.AddAll(operands, zone);
}
@@ -1806,6 +1792,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@@ -2101,17 +2091,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2158,20 +2153,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
- LStoreKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
- LOperand* value) {
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = obj;
+ inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2224,6 +2223,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
};
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index e075c17014..7cd895583a 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -103,14 +103,15 @@ void MacroAssembler::CallJSEntry(Register target) {
int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
- return (2 + kMovInstructions) * kInstrSize;
+ Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
+ return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
}
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
- return (2 + kMovInstructions) * kInstrSize;
+ return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
}
@@ -513,19 +514,35 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
void MacroAssembler::PushFixedFrame(Register marker_reg) {
mflr(r0);
- if (marker_reg.is_valid()) {
- Push(r0, fp, cp, marker_reg);
+ if (FLAG_enable_embedded_constant_pool) {
+ if (marker_reg.is_valid()) {
+ Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
+ } else {
+ Push(r0, fp, kConstantPoolRegister, cp);
+ }
} else {
- Push(r0, fp, cp);
+ if (marker_reg.is_valid()) {
+ Push(r0, fp, cp, marker_reg);
+ } else {
+ Push(r0, fp, cp);
+ }
}
}
void MacroAssembler::PopFixedFrame(Register marker_reg) {
- if (marker_reg.is_valid()) {
- Pop(r0, fp, cp, marker_reg);
+ if (FLAG_enable_embedded_constant_pool) {
+ if (marker_reg.is_valid()) {
+ Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
+ } else {
+ Pop(r0, fp, kConstantPoolRegister, cp);
+ }
} else {
- Pop(r0, fp, cp);
+ if (marker_reg.is_valid()) {
+ Pop(r0, fp, cp, marker_reg);
+ } else {
+ Pop(r0, fp, cp);
+ }
}
mtlr(r0);
}
@@ -651,11 +668,37 @@ void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
}
+void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
+ Register code_target_address) {
+ lwz(kConstantPoolRegister,
+ MemOperand(code_target_address,
+ Code::kConstantPoolOffset - Code::kHeaderSize));
+ add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
+}
+
+
+void MacroAssembler::LoadConstantPoolPointerRegister(Register base,
+ int code_start_delta) {
+ add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(),
+ code_start_delta);
+}
+
+
+void MacroAssembler::LoadConstantPoolPointerRegister() {
+ mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
+}
+
+
void MacroAssembler::StubPrologue(int prologue_offset) {
LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
PushFixedFrame(r11);
// Adjust FP to point to saved FP.
addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ if (FLAG_enable_embedded_constant_pool) {
+ // ip contains prologue address
+ LoadConstantPoolPointerRegister(ip, -prologue_offset);
+ set_constant_pool_available(true);
+ }
}
@@ -688,13 +731,26 @@ void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
}
}
}
+ if (FLAG_enable_embedded_constant_pool) {
+ // ip contains prologue address
+ LoadConstantPoolPointerRegister(ip, -prologue_offset);
+ set_constant_pool_available(true);
+ }
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
- LoadSmiLiteral(ip, Smi::FromInt(type));
- PushFixedFrame(ip);
+ if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
+ PushFixedFrame();
+ // This path should not rely on ip containing code entry.
+ LoadConstantPoolPointerRegister();
+ LoadSmiLiteral(ip, Smi::FromInt(type));
+ push(ip);
+ } else {
+ LoadSmiLiteral(ip, Smi::FromInt(type));
+ PushFixedFrame(ip);
+ }
// Adjust FP to point to saved FP.
addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
@@ -704,6 +760,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
// r3: preserved
// r4: preserved
// r5: preserved
@@ -713,6 +770,13 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
int frame_ends;
LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ if (FLAG_enable_embedded_constant_pool) {
+ const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
+ const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
+ const int offset =
+ ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
+ LoadP(kConstantPoolRegister, MemOperand(fp, offset));
+ }
mtlr(r0);
frame_ends = pc_offset();
Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
@@ -759,6 +823,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
li(r8, Operand::Zero());
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
+ if (FLAG_enable_embedded_constant_pool) {
+ StoreP(kConstantPoolRegister,
+ MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ }
mov(r8, Operand(CodeObject()));
StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@@ -828,6 +896,7 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@@ -1222,6 +1291,8 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
// hash = hash ^ (hash >> 16);
srwi(scratch, t0, Operand(16));
xor_(t0, t0, scratch);
+ // hash & 0x3fffffff
+ ExtractBitRange(t0, t0, 29, 0);
}
@@ -3171,10 +3242,53 @@ void MacroAssembler::FlushICache(Register address, size_t size,
}
+void MacroAssembler::DecodeConstantPoolOffset(Register result,
+ Register location) {
+ Label overflow_access, done;
+ DCHECK(!AreAliased(result, location, r0));
+
+ // Determine constant pool access type
+ // Caller has already placed the instruction word at location in result.
+ ExtractBitRange(r0, result, 31, 26);
+ cmpi(r0, Operand(ADDIS >> 26));
+ beq(&overflow_access);
+
+ // Regular constant pool access
+ // extract the load offset
+ andi(result, result, Operand(kImm16Mask));
+ b(&done);
+
+ bind(&overflow_access);
+ // Overflow constant pool access
+ // shift addis immediate
+ slwi(r0, result, Operand(16));
+ // sign-extend and add the load offset
+ lwz(result, MemOperand(location, kInstrSize));
+ extsh(result, result);
+ add(result, r0, result);
+
+ bind(&done);
+}
+
+
void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
Register new_value) {
lwz(scratch, MemOperand(location));
+ if (FLAG_enable_embedded_constant_pool) {
+ if (emit_debug_code()) {
+ // Check that the instruction sequence is a load from the constant pool
+ ExtractBitMask(scratch, scratch, 0x1f * B16);
+ cmpi(scratch, Operand(kConstantPoolRegister.code()));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
+ // Scratch was clobbered. Restore it.
+ lwz(scratch, MemOperand(location));
+ }
+ DecodeConstantPoolOffset(scratch, location);
+ StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
+ return;
+ }
+
// This code assumes a FIXED_SEQUENCE for lis/ori
// At this point scratch is a lis instruction.
@@ -3258,6 +3372,19 @@ void MacroAssembler::GetRelocatedValue(Register location, Register result,
Register scratch) {
lwz(result, MemOperand(location));
+ if (FLAG_enable_embedded_constant_pool) {
+ if (emit_debug_code()) {
+ // Check that the instruction sequence is a load from the constant pool
+ ExtractBitMask(result, result, 0x1f * B16);
+ cmpi(result, Operand(kConstantPoolRegister.code()));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
+ lwz(result, MemOperand(location));
+ }
+ DecodeConstantPoolOffset(result, location);
+ LoadPX(result, MemOperand(kConstantPoolRegister, result));
+ return;
+ }
+
// This code assumes a FIXED_SEQUENCE for lis/ori
if (emit_debug_code()) {
And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
@@ -3695,6 +3822,18 @@ void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
Register scratch) {
+ if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
+ !(scratch.is(r0) && ConstantPoolAccessIsInOverflow())) {
+ ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
+ if (access == ConstantPoolEntry::OVERFLOWED) {
+ addis(scratch, kConstantPoolRegister, Operand::Zero());
+ lfd(result, MemOperand(scratch, 0));
+ } else {
+ lfd(result, MemOperand(kConstantPoolRegister, 0));
+ }
+ return;
+ }
+
// avoid gcc strict aliasing error using union cast
union {
double dval;
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 2e415d6b6b..c0992c9171 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -102,7 +102,9 @@ class MacroAssembler : public Assembler {
MacroAssembler(Isolate* isolate, void* buffer, int size);
- // Returns the size of a call in instructions.
+ // Returns the size of a call in instructions. Note, the value returned is
+ // only valid as long as no entries are added to the constant pool between
+ // checking the call size and emitting the actual call.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSizeNotPredictableCodeSize(Address target,
@@ -1061,11 +1063,16 @@ class MacroAssembler : public Assembler {
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
int width = rangeStart - rangeEnd + 1;
+ if (rc == SetRC && rangeEnd == 0 && width <= 16) {
+ andi(dst, src, Operand((1 << width) - 1));
+ } else {
#if V8_TARGET_ARCH_PPC64
- rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
+ rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
#else
- rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc);
+ rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
+ rc);
#endif
+ }
}
inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
@@ -1360,7 +1367,11 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Patching helpers.
- // Retrieve/patch the relocated value (lis/ori pair).
+ // Decode offset from constant pool load instruction(s).
+ // Caller must place the instruction word at <location> in <result>.
+ void DecodeConstantPoolOffset(Register result, Register location);
+
+ // Retrieve/patch the relocated value (lis/ori pair or constant pool load).
void GetRelocatedValue(Register location, Register result, Register scratch);
void SetRelocatedValue(Register location, Register scratch,
Register new_value);
@@ -1449,6 +1460,19 @@ class MacroAssembler : public Assembler {
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
+ // Loads the constant pool pointer (kConstantPoolRegister).
+ void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
+ Register code_target_address);
+ void LoadConstantPoolPointerRegister();
+ void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
+
+ void AbortConstantPoolBuilding() {
+#ifdef DEBUG
+ // Avoid DCHECK(!is_linked()) failure in ~Label()
+ bind(ConstantPoolPosition());
+#endif
+ }
+
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
diff --git a/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
index 5f754ca8aa..05e84e415f 100644
--- a/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
@@ -1249,7 +1249,7 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
#undef __
#endif // V8_INTERPRETED_REGEXP
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 7139ead213..261982c0b3 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -794,10 +794,11 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
#if V8_TARGET_ARCH_PPC64
- size_t stack_size = 2 * 1024 * 1024; // allocate 2MB for stack
+ size_t stack_size = FLAG_sim_stack_size * KB;
#else
- size_t stack_size = 1 * 1024 * 1024; // allocate 1MB for stack
+ size_t stack_size = MB; // allocate 1MB for stack
#endif
+ stack_size += 2 * stack_protection_size_;
stack_ = reinterpret_cast<char*>(malloc(stack_size));
pc_modified_ = false;
icount_ = 0;
@@ -823,14 +824,15 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
// some buffer below.
- registers_[sp] = reinterpret_cast<intptr_t>(stack_) + stack_size - 64;
+ registers_[sp] =
+ reinterpret_cast<intptr_t>(stack_) + stack_size - stack_protection_size_;
InitializeCoverage();
last_debugger_input_ = NULL;
}
-Simulator::~Simulator() {}
+Simulator::~Simulator() { free(stack_); }
// When the generated code calls an external reference we need to catch that in
@@ -878,7 +880,7 @@ class Redirection {
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
- addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+ addr_of_swi - offsetof(Redirection, swi_instruction_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
@@ -888,6 +890,14 @@ class Redirection {
return redirection->external_function();
}
+ static void DeleteChain(Redirection* redirection) {
+ while (redirection != nullptr) {
+ Redirection* next = redirection->next_;
+ delete redirection;
+ redirection = next;
+ }
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -896,6 +906,19 @@ class Redirection {
};
+// static
+void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+ Redirection::DeleteChain(first);
+ if (i_cache != nullptr) {
+ for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ entry = i_cache->Next(entry)) {
+ delete static_cast<CachePage*>(entry->value);
+ }
+ delete i_cache;
+ }
+}
+
+
void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
@@ -1086,9 +1109,8 @@ void Simulator::WriteDW(intptr_t addr, int64_t value) {
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 1024;
+ // Leave a safety margin to prevent overrunning the stack when pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + stack_protection_size_;
}
@@ -3877,8 +3899,8 @@ uintptr_t Simulator::PopAddress() {
set_register(sp, current_sp + sizeof(uintptr_t));
return address;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // USE_SIMULATOR
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index cf338ccfdc..c92281682f 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -212,6 +212,8 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
+ static void TearDown(HashMap* i_cache, Redirection* first);
+
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
@@ -349,6 +351,7 @@ class Simulator {
// Simulator support.
char* stack_;
+ static const size_t stack_protection_size_ = 256 * kPointerSize;
bool pc_modified_;
int icount_;
diff --git a/deps/v8/src/preparse-data-format.h b/deps/v8/src/preparse-data-format.h
index de106939c7..560693f67e 100644
--- a/deps/v8/src/preparse-data-format.h
+++ b/deps/v8/src/preparse-data-format.h
@@ -14,7 +14,7 @@ struct PreparseDataConstants {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 10;
+ static const unsigned kCurrentVersion = 11;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
@@ -28,7 +28,8 @@ struct PreparseDataConstants {
static const int kMessageEndPos = 1;
static const int kMessageArgCountPos = 2;
static const int kParseErrorTypePos = 3;
- static const int kMessageTextPos = 4;
+ static const int kMessageTemplatePos = 4;
+ static const int kMessageArgPos = 5;
static const unsigned char kNumberTerminator = 0x80u;
};
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc
index e1c7ad199b..ffbfbab633 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/preparse-data.cc
@@ -29,7 +29,7 @@ CompleteParserRecorder::CompleteParserRecorder() {
void CompleteParserRecorder::LogMessage(int start_pos, int end_pos,
- const char* message,
+ MessageTemplate::Template message,
const char* arg_opt,
ParseErrorType error_type) {
if (HasError()) return;
@@ -43,8 +43,9 @@ void CompleteParserRecorder::LogMessage(int start_pos, int end_pos,
function_store_.Add((arg_opt == NULL) ? 0 : 1);
STATIC_ASSERT(PreparseDataConstants::kParseErrorTypePos == 3);
function_store_.Add(error_type);
- STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 4);
- WriteString(CStrVector(message));
+ STATIC_ASSERT(PreparseDataConstants::kMessageTemplatePos == 4);
+ function_store_.Add(static_cast<unsigned>(message));
+ STATIC_ASSERT(PreparseDataConstants::kMessageArgPos == 5);
if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
}
@@ -75,4 +76,5 @@ ScriptData* CompleteParserRecorder::GetScriptData() {
}
-} } // namespace v8::internal.
+} // namespace internal
+} // namespace v8.
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index ce3b2e0d07..f7ed1ed91a 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/hashmap.h"
+#include "src/messages.h"
#include "src/preparse-data-format.h"
namespace v8 {
@@ -52,13 +53,13 @@ class ParserRecorder {
// Logs the scope and some details of a function literal in the source.
virtual void LogFunction(int start, int end, int literals, int properties,
- LanguageMode language_mode,
- bool uses_super_property) = 0;
+ LanguageMode language_mode, bool uses_super_property,
+ bool calls_eval) = 0;
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
// representing the error only.
- virtual void LogMessage(int start, int end, const char* message,
+ virtual void LogMessage(int start, int end, MessageTemplate::Template message,
const char* argument_opt,
ParseErrorType error_type) = 0;
@@ -76,21 +77,22 @@ class SingletonLogger : public ParserRecorder {
void Reset() { has_error_ = false; }
virtual void LogFunction(int start, int end, int literals, int properties,
- LanguageMode language_mode,
- bool scope_uses_super_property) {
+ LanguageMode language_mode, bool uses_super_property,
+ bool calls_eval) {
DCHECK(!has_error_);
start_ = start;
end_ = end;
literals_ = literals;
properties_ = properties;
language_mode_ = language_mode;
- scope_uses_super_property_ = scope_uses_super_property;
+ uses_super_property_ = uses_super_property;
+ calls_eval_ = calls_eval;
}
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
// representing the error only.
- virtual void LogMessage(int start, int end, const char* message,
+ virtual void LogMessage(int start, int end, MessageTemplate::Template message,
const char* argument_opt, ParseErrorType error_type) {
if (has_error_) return;
has_error_ = true;
@@ -117,15 +119,19 @@ class SingletonLogger : public ParserRecorder {
DCHECK(!has_error_);
return language_mode_;
}
- bool scope_uses_super_property() const {
+ bool uses_super_property() const {
DCHECK(!has_error_);
- return scope_uses_super_property_;
+ return uses_super_property_;
+ }
+ bool calls_eval() const {
+ DCHECK(!has_error_);
+ return calls_eval_;
}
ParseErrorType error_type() const {
DCHECK(has_error_);
return error_type_;
}
- const char* message() {
+ MessageTemplate::Template message() {
DCHECK(has_error_);
return message_;
}
@@ -142,9 +148,10 @@ class SingletonLogger : public ParserRecorder {
int literals_;
int properties_;
LanguageMode language_mode_;
- bool scope_uses_super_property_;
+ bool uses_super_property_;
+ bool calls_eval_;
// For error messages.
- const char* message_;
+ MessageTemplate::Template message_;
const char* argument_opt_;
ParseErrorType error_type_;
};
@@ -161,20 +168,21 @@ class CompleteParserRecorder : public ParserRecorder {
virtual ~CompleteParserRecorder() {}
virtual void LogFunction(int start, int end, int literals, int properties,
- LanguageMode language_mode,
- bool scope_uses_super_property) {
+ LanguageMode language_mode, bool uses_super_property,
+ bool calls_eval) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(literals);
function_store_.Add(properties);
function_store_.Add(language_mode);
- function_store_.Add(scope_uses_super_property);
+ function_store_.Add(uses_super_property);
+ function_store_.Add(calls_eval);
}
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
// representing the error only.
- virtual void LogMessage(int start, int end, const char* message,
+ virtual void LogMessage(int start, int end, MessageTemplate::Template message,
const char* argument_opt, ParseErrorType error_type);
ScriptData* GetScriptData();
@@ -189,9 +197,6 @@ class CompleteParserRecorder : public ParserRecorder {
private:
void WriteString(Vector<const char> str);
- // Write a non-negative number to the symbol store.
- void WriteNumber(int number);
-
Collector<unsigned> function_store_;
unsigned preamble_[PreparseDataConstants::kHeaderSize];
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index e3c421ea14..b1852bf978 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -21,14 +21,16 @@ namespace v8 {
namespace internal {
void PreParserTraits::ReportMessageAt(Scanner::Location location,
- const char* message, const char* arg,
+ MessageTemplate::Template message,
+ const char* arg,
ParseErrorType error_type) {
ReportMessageAt(location.beg_pos, location.end_pos, message, arg, error_type);
}
void PreParserTraits::ReportMessageAt(int start_pos, int end_pos,
- const char* message, const char* arg,
+ MessageTemplate::Template message,
+ const char* arg,
ParseErrorType error_type) {
pre_parser_->log_->LogMessage(start_pos, end_pos, message, arg, error_type);
}
@@ -91,10 +93,11 @@ PreParserExpression PreParserTraits::ParseFunctionLiteral(
PreParserIdentifier name, Scanner::Location function_name_location,
bool name_is_strict_reserved, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok) {
return pre_parser_->ParseFunctionLiteral(
name, function_name_location, name_is_strict_reserved, kind,
- function_token_position, type, arity_restriction, ok);
+ function_token_position, type, arity_restriction, language_mode, ok);
}
@@ -108,7 +111,8 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
FunctionState top_state(&function_state_, &scope_, top_scope, kNormalFunction,
&top_factory);
scope_->SetLanguageMode(language_mode);
- Scope* function_scope = NewScope(scope_, FUNCTION_SCOPE);
+ Scope* function_scope = NewScope(
+ scope_, IsArrowFunction(kind) ? ARROW_SCOPE : FUNCTION_SCOPE, kind);
PreParserFactory function_factory(NULL);
FunctionState function_state(&function_state_, &scope_, function_scope, kind,
&function_factory);
@@ -132,7 +136,8 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
if (is_strong(scope_->language_mode()) && IsSubclassConstructor(kind)) {
if (!function_state.super_location().IsValid()) {
ReportMessageAt(Scanner::Location(start_position, start_position + 1),
- "strong_super_call_missing", kReferenceError);
+ MessageTemplate::kStrongSuperCallMissing,
+ kReferenceError);
return kPreParseSuccess;
}
}
@@ -188,15 +193,19 @@ PreParser::Statement PreParser::ParseStatementListItem(bool* ok) {
case Token::CLASS:
return ParseClassDeclaration(ok);
case Token::CONST:
- return ParseVariableStatement(kStatementListItem, ok);
+ if (allow_const()) {
+ return ParseVariableStatement(kStatementListItem, ok);
+ }
+ break;
case Token::LET:
if (is_strict(language_mode())) {
return ParseVariableStatement(kStatementListItem, ok);
}
- // Fall through.
+ break;
default:
- return ParseStatement(ok);
+ break;
}
+ return ParseStatement(ok);
}
@@ -229,13 +238,13 @@ void PreParser::ParseStatementList(int end_token, bool* ok,
Scanner::Location super_loc = function_state_->super_location();
if (this_loc.beg_pos != old_this_loc.beg_pos &&
this_loc.beg_pos != token_loc.beg_pos) {
- ReportMessageAt(this_loc, "strong_constructor_this");
+ ReportMessageAt(this_loc, MessageTemplate::kStrongConstructorThis);
*ok = false;
return;
}
if (super_loc.beg_pos != old_super_loc.beg_pos &&
super_loc.beg_pos != token_loc.beg_pos) {
- ReportMessageAt(super_loc, "strong_constructor_super");
+ ReportMessageAt(super_loc, MessageTemplate::kStrongConstructorSuper);
*ok = false;
return;
}
@@ -323,7 +332,7 @@ PreParser::Statement PreParser::ParseSubStatement(bool* ok) {
case Token::SEMICOLON:
if (is_strong(language_mode())) {
PreParserTraits::ReportMessageAt(scanner()->peek_location(),
- "strong_empty");
+ MessageTemplate::kStrongEmpty);
*ok = false;
return Statement::Default();
}
@@ -370,7 +379,7 @@ PreParser::Statement PreParser::ParseSubStatement(bool* ok) {
if (is_strict(language_mode())) {
PreParserTraits::ReportMessageAt(start_location.beg_pos,
end_location.end_pos,
- "strict_function");
+ MessageTemplate::kStrictFunction);
*ok = false;
return Statement::Default();
} else {
@@ -388,7 +397,7 @@ PreParser::Statement PreParser::ParseSubStatement(bool* ok) {
// In ES6 CONST is not allowed as a Statement, only as a
// LexicalDeclaration, however we continue to allow it in sloppy mode for
// backwards compatibility.
- if (is_sloppy(language_mode())) {
+ if (is_sloppy(language_mode()) && allow_legacy_const()) {
return ParseVariableStatement(kStatement, ok);
}
@@ -415,7 +424,8 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
pos, FunctionLiteral::DECLARATION,
- FunctionLiteral::NORMAL_ARITY, CHECK_OK);
+ FunctionLiteral::NORMAL_ARITY, language_mode(),
+ CHECK_OK);
return Statement::FunctionDeclaration();
}
@@ -423,7 +433,7 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
Expect(Token::CLASS, CHECK_OK);
if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
- ReportMessage("sloppy_lexical");
+ ReportMessage(MessageTemplate::kSloppyLexical);
*ok = false;
return Statement::Default();
}
@@ -499,12 +509,12 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
if (peek() == Token::VAR) {
if (is_strong(language_mode())) {
Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, "strong_var");
+ ReportMessageAt(location, MessageTemplate::kStrongVar);
*ok = false;
return Statement::Default();
}
Consume(Token::VAR);
- } else if (peek() == Token::CONST) {
+ } else if (peek() == Token::CONST && allow_const()) {
// TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
//
// ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
@@ -545,7 +555,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
ValidateBindingPattern(&pattern_classifier, CHECK_OK);
- if (!FLAG_harmony_destructuring && !pattern.IsIdentifier()) {
+ if (!allow_harmony_destructuring() && !pattern.IsIdentifier()) {
ReportUnexpectedToken(next);
*ok = false;
return Statement::Default();
@@ -595,6 +605,8 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
return Statement::Default();
case Token::THIS:
+ if (!FLAG_strong_this) break;
+ // Fall through.
case Token::SUPER:
if (is_strong(language_mode()) &&
i::IsConstructor(function_state_->kind())) {
@@ -617,8 +629,9 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
default:
if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
ReportMessageAt(function_state_->this_location(),
- is_this ? "strong_constructor_this"
- : "strong_constructor_super");
+ is_this
+ ? MessageTemplate::kStrongConstructorThis
+ : MessageTemplate::kStrongConstructorSuper);
*ok = false;
return Statement::Default();
}
@@ -659,7 +672,7 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
// Detect attempts at 'let' declarations in sloppy mode.
if (peek() == Token::IDENTIFIER && is_sloppy(language_mode()) &&
expr.IsIdentifier() && expr.AsIdentifier().IsLet()) {
- ReportMessage("sloppy_lexical", NULL);
+ ReportMessage(MessageTemplate::kSloppyLexical, NULL);
*ok = false;
return Statement::Default();
}
@@ -749,7 +762,7 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
i::IsConstructor(function_state_->kind())) {
int pos = peek_position();
ReportMessageAt(Scanner::Location(pos, pos + 1),
- "strong_constructor_return_value");
+ MessageTemplate::kStrongConstructorReturnValue);
*ok = false;
return Statement::Default();
}
@@ -765,7 +778,7 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
if (is_strict(language_mode())) {
- ReportMessageAt(scanner()->location(), "strict_mode_with");
+ ReportMessageAt(scanner()->location(), MessageTemplate::kStrictWith);
*ok = false;
return Statement::Default();
}
@@ -809,7 +822,8 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
}
if (is_strong(language_mode()) && !statement.IsJumpStatement() &&
token != Token::RBRACE) {
- ReportMessageAt(scanner()->location(), "strong_switch_fallthrough");
+ ReportMessageAt(scanner()->location(),
+ MessageTemplate::kStrongSwitchFallthrough);
*ok = false;
return Statement::Default();
}
@@ -856,7 +870,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
bool is_let_identifier_expression = false;
if (peek() != Token::SEMICOLON) {
ForEachStatement::VisitMode mode;
- if (peek() == Token::VAR || peek() == Token::CONST ||
+ if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
(peek() == Token::LET && is_strict(language_mode()))) {
int decl_count;
Scanner::Location first_initializer_loc = Scanner::Location::invalid();
@@ -872,17 +886,20 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
const char* loop_type =
mode == ForEachStatement::ITERATE ? "for-of" : "for-in";
PreParserTraits::ReportMessageAt(
- bindings_loc, "for_inof_loop_multi_bindings", loop_type);
+ bindings_loc, MessageTemplate::kForInOfLoopMultiBindings,
+ loop_type);
*ok = false;
return Statement::Default();
}
if (first_initializer_loc.IsValid() &&
(is_strict(language_mode()) || mode == ForEachStatement::ITERATE)) {
if (mode == ForEachStatement::ITERATE) {
- ReportMessageAt(first_initializer_loc, "for_of_loop_initializer");
+ ReportMessageAt(first_initializer_loc,
+ MessageTemplate::kForOfLoopInitializer);
} else {
// TODO(caitp): This should be an error in sloppy mode, too.
- ReportMessageAt(first_initializer_loc, "for_in_loop_initializer");
+ ReportMessageAt(first_initializer_loc,
+ MessageTemplate::kForInLoopInitializer);
}
*ok = false;
return Statement::Default();
@@ -910,7 +927,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// Detect attempts at 'let' declarations in sloppy mode.
if (peek() == Token::IDENTIFIER && is_sloppy(language_mode()) &&
is_let_identifier_expression) {
- ReportMessage("sloppy_lexical", NULL);
+ ReportMessage(MessageTemplate::kSloppyLexical, NULL);
*ok = false;
return Statement::Default();
}
@@ -937,7 +954,7 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- ReportMessageAt(scanner()->location(), "newline_after_throw");
+ ReportMessageAt(scanner()->location(), MessageTemplate::kNewlineAfterThrow);
*ok = false;
return Statement::Default();
}
@@ -965,7 +982,7 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessageAt(scanner()->location(), "no_catch_or_finally");
+ ReportMessageAt(scanner()->location(), MessageTemplate::kNoCatchOrFinally);
*ok = false;
return Statement::Default();
}
@@ -1014,32 +1031,32 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
Identifier function_name, Scanner::Location function_name_location,
bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
// Parse function body.
bool outer_is_script_scope = scope_->is_script_scope();
- Scope* function_scope = NewScope(scope_, FUNCTION_SCOPE);
+ Scope* function_scope = NewScope(scope_, FUNCTION_SCOPE, kind);
+ function_scope->SetLanguageMode(language_mode);
PreParserFactory factory(NULL);
FunctionState function_state(&function_state_, &scope_, function_scope, kind,
&factory);
- FormalParameterErrorLocations error_locs;
+ DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+ ExpressionClassifier formals_classifier(&duplicate_finder);
- bool is_rest = false;
Expect(Token::LPAREN, CHECK_OK);
int start_position = scanner()->location().beg_pos;
function_scope->set_start_position(start_position);
- int num_parameters;
- {
- DuplicateFinder duplicate_finder(scanner()->unicode_cache());
- num_parameters = ParseFormalParameterList(&duplicate_finder, &error_locs,
- &is_rest, CHECK_OK);
- }
+ PreParserFormalParameterParsingState parsing_state(nullptr);
+ int num_parameters =
+ ParseFormalParameterList(&parsing_state, &formals_classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
int formals_end_position = scanner()->location().end_pos;
- CheckArityRestrictions(num_parameters, arity_restriction, start_position,
+ CheckArityRestrictions(num_parameters, arity_restriction,
+ parsing_state.has_rest, start_position,
formals_end_position, CHECK_OK);
// See Parser::ParseFunctionLiteral for more information about lazy parsing
@@ -1056,22 +1073,29 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
}
Expect(Token::RBRACE, CHECK_OK);
+ // Parsing the body may change the language mode in our scope.
+ language_mode = function_scope->language_mode();
+
// Validate name and parameter names. We can do this only after parsing the
// function, since the function can declare itself strict.
- CheckFunctionName(language_mode(), kind, function_name,
+ CheckFunctionName(language_mode, kind, function_name,
name_is_strict_reserved, function_name_location, CHECK_OK);
- const bool use_strict_params = is_rest || IsConciseMethod(kind);
- CheckFunctionParameterNames(language_mode(), use_strict_params, error_locs,
- CHECK_OK);
-
- if (is_strict(language_mode())) {
+ const bool strict_formal_parameters =
+ !parsing_state.is_simple_parameter_list || IsConciseMethod(kind);
+ const bool allow_duplicate_parameters =
+ is_sloppy(language_mode) && !strict_formal_parameters;
+ ValidateFormalParameters(&formals_classifier, language_mode,
+ allow_duplicate_parameters, CHECK_OK);
+
+ if (is_strict(language_mode)) {
int end_position = scanner()->location().end_pos;
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
}
- if (is_strong(language_mode()) && IsSubclassConstructor(kind)) {
+ if (is_strong(language_mode) && IsSubclassConstructor(kind)) {
if (!function_state.super_location().IsValid()) {
- ReportMessageAt(function_name_location, "strong_super_call_missing",
+ ReportMessageAt(function_name_location,
+ MessageTemplate::kStrongSuperCallMissing,
kReferenceError);
*ok = false;
return Expression::Default();
@@ -1095,7 +1119,7 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok,
log_->LogFunction(body_start, body_end,
function_state_->materialized_literal_count(),
function_state_->expected_property_count(), language_mode(),
- scope_->uses_super_property());
+ scope_->uses_super_property(), scope_->calls_eval());
}
@@ -1104,18 +1128,19 @@ PreParserExpression PreParser::ParseClassLiteral(
bool name_is_strict_reserved, int pos, bool* ok) {
// All parts of a ClassDeclaration and ClassExpression are strict code.
if (name_is_strict_reserved) {
- ReportMessageAt(class_name_location, "unexpected_strict_reserved");
+ ReportMessageAt(class_name_location,
+ MessageTemplate::kUnexpectedStrictReserved);
*ok = false;
return EmptyExpression();
}
if (IsEvalOrArguments(name)) {
- ReportMessageAt(class_name_location, "strict_eval_arguments");
+ ReportMessageAt(class_name_location, MessageTemplate::kStrictEvalArguments);
*ok = false;
return EmptyExpression();
}
LanguageMode class_language_mode = language_mode();
if (is_strong(class_language_mode) && IsUndefined(name)) {
- ReportMessageAt(class_name_location, "strong_undefined");
+ ReportMessageAt(class_name_location, MessageTemplate::kStrongUndefined);
*ok = false;
return EmptyExpression();
}
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index c840092454..9ebc132d92 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -8,8 +8,10 @@
#include "src/v8.h"
#include "src/bailout-reason.h"
+#include "src/expression-classifier.h"
#include "src/func-name-inferrer.h"
#include "src/hashmap.h"
+#include "src/messages.h"
#include "src/scanner.h"
#include "src/scopes.h"
#include "src/token.h"
@@ -17,26 +19,6 @@
namespace v8 {
namespace internal {
-
-// When parsing the formal parameters of a function, we usually don't yet know
-// if the function will be strict, so we cannot yet produce errors for
-// parameter names or duplicates. Instead, we remember the locations of these
-// errors if they occur and produce the errors later.
-class FormalParameterErrorLocations BASE_EMBEDDED {
- public:
- FormalParameterErrorLocations()
- : eval_or_arguments(Scanner::Location::invalid()),
- undefined(Scanner::Location::invalid()),
- duplicate(Scanner::Location::invalid()),
- reserved(Scanner::Location::invalid()) {}
-
- Scanner::Location eval_or_arguments;
- Scanner::Location undefined;
- Scanner::Location duplicate;
- Scanner::Location reserved;
-};
-
-
// Common base class shared between parser and pre-parser. Traits encapsulate
// the differences between Parser and PreParser:
@@ -72,7 +54,6 @@ class FormalParameterErrorLocations BASE_EMBEDDED {
// typedef ExpressionList;
// typedef PropertyList;
// typedef FormalParameter;
-// typedef FormalParameterScope;
// // For constructing objects returned by the traversing functions.
// typedef Factory;
// };
@@ -86,10 +67,11 @@ class ParserBase : public Traits {
typedef typename Traits::Type::Expression ExpressionT;
typedef typename Traits::Type::Identifier IdentifierT;
typedef typename Traits::Type::FormalParameter FormalParameterT;
- typedef typename Traits::Type::FormalParameterScope FormalParameterScopeT;
typedef typename Traits::Type::FunctionLiteral FunctionLiteralT;
typedef typename Traits::Type::Literal LiteralT;
typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
+ typedef typename Traits::Type::FormalParameterParsingState
+ FormalParameterParsingStateT;
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
@@ -110,76 +92,39 @@ class ParserBase : public Traits {
allow_lazy_(false),
allow_natives_(false),
allow_harmony_arrow_functions_(false),
- allow_harmony_object_literals_(false),
allow_harmony_sloppy_(false),
allow_harmony_computed_property_names_(false),
allow_harmony_rest_params_(false),
allow_harmony_spreadcalls_(false),
- allow_strong_mode_(false) {}
+ allow_harmony_destructuring_(false),
+ allow_harmony_spread_arrays_(false),
+ allow_harmony_new_target_(false),
+ allow_strong_mode_(false),
+ allow_legacy_const_(true) {}
+
+#define ALLOW_ACCESSORS(name) \
+ bool allow_##name() const { return allow_##name##_; } \
+ void set_allow_##name(bool allow) { allow_##name##_ = allow; }
+
+ ALLOW_ACCESSORS(lazy);
+ ALLOW_ACCESSORS(natives);
+ ALLOW_ACCESSORS(harmony_arrow_functions);
+ ALLOW_ACCESSORS(harmony_sloppy);
+ ALLOW_ACCESSORS(harmony_computed_property_names);
+ ALLOW_ACCESSORS(harmony_rest_params);
+ ALLOW_ACCESSORS(harmony_spreadcalls);
+ ALLOW_ACCESSORS(harmony_destructuring);
+ ALLOW_ACCESSORS(harmony_spread_arrays);
+ ALLOW_ACCESSORS(harmony_new_target);
+ ALLOW_ACCESSORS(strong_mode);
+ ALLOW_ACCESSORS(legacy_const);
+#undef ALLOW_ACCESSORS
- // Getters that indicate whether certain syntactical constructs are
- // allowed to be parsed by this instance of the parser.
- bool allow_lazy() const { return allow_lazy_; }
- bool allow_natives() const { return allow_natives_; }
- bool allow_harmony_arrow_functions() const {
- return allow_harmony_arrow_functions_;
- }
bool allow_harmony_modules() const { return scanner()->HarmonyModules(); }
- bool allow_harmony_classes() const { return scanner()->HarmonyClasses(); }
- bool allow_harmony_object_literals() const {
- return allow_harmony_object_literals_;
- }
- bool allow_harmony_sloppy() const { return allow_harmony_sloppy_; }
bool allow_harmony_unicode() const { return scanner()->HarmonyUnicode(); }
- bool allow_harmony_computed_property_names() const {
- return allow_harmony_computed_property_names_;
- }
- bool allow_harmony_rest_params() const {
- return allow_harmony_rest_params_;
- }
- bool allow_harmony_spreadcalls() const { return allow_harmony_spreadcalls_; }
- bool allow_harmony_destructuring() const {
- return allow_harmony_destructuring_;
- }
-
- bool allow_strong_mode() const { return allow_strong_mode_; }
-
- // Setters that determine whether certain syntactical constructs are
- // allowed to be parsed by this instance of the parser.
- void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
- void set_allow_natives(bool allow) { allow_natives_ = allow; }
- void set_allow_harmony_arrow_functions(bool allow) {
- allow_harmony_arrow_functions_ = allow;
- }
- void set_allow_harmony_modules(bool allow) {
- scanner()->SetHarmonyModules(allow);
- }
- void set_allow_harmony_classes(bool allow) {
- scanner()->SetHarmonyClasses(allow);
- }
- void set_allow_harmony_object_literals(bool allow) {
- allow_harmony_object_literals_ = allow;
- }
- void set_allow_harmony_sloppy(bool allow) {
- allow_harmony_sloppy_ = allow;
- }
- void set_allow_harmony_unicode(bool allow) {
- scanner()->SetHarmonyUnicode(allow);
- }
- void set_allow_harmony_computed_property_names(bool allow) {
- allow_harmony_computed_property_names_ = allow;
- }
- void set_allow_harmony_rest_params(bool allow) {
- allow_harmony_rest_params_ = allow;
- }
- void set_allow_harmony_spreadcalls(bool allow) {
- allow_harmony_spreadcalls_ = allow;
- }
- void set_allow_strong_mode(bool allow) { allow_strong_mode_ = allow; }
- void set_allow_harmony_destructuring(bool allow) {
- allow_harmony_destructuring_ = allow;
- }
+ void set_allow_harmony_modules(bool a) { scanner()->SetHarmonyModules(a); }
+ void set_allow_harmony_unicode(bool a) { scanner()->SetHarmonyUnicode(a); }
protected:
enum AllowRestrictedIdentifiers {
@@ -209,15 +154,14 @@ class ParserBase : public Traits {
class BlockState BASE_EMBEDDED {
public:
BlockState(Scope** scope_stack, Scope* scope)
- : scope_stack_(scope_stack), outer_scope_(*scope_stack), scope_(scope) {
- *scope_stack_ = scope_;
+ : scope_stack_(scope_stack), outer_scope_(*scope_stack) {
+ *scope_stack_ = scope;
}
~BlockState() { *scope_stack_ = outer_scope_; }
private:
Scope** scope_stack_;
Scope* outer_scope_;
- Scope* scope_;
};
class FunctionState BASE_EMBEDDED {
@@ -234,8 +178,9 @@ class ParserBase : public Traits {
return next_materialized_literal_index_;
}
- int NextHandlerIndex() { return next_handler_index_++; }
- int handler_count() { return next_handler_index_; }
+ void SkipMaterializedLiterals(int count) {
+ next_materialized_literal_index_ += count;
+ }
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
@@ -277,9 +222,6 @@ class ParserBase : public Traits {
// array literals.
int next_materialized_literal_index_;
- // Used to assign a per-function index to try and catch handlers.
- int next_handler_index_;
-
// Properties count estimation.
int expected_property_count_;
@@ -318,21 +260,21 @@ class ParserBase : public Traits {
function_state_ = parser->function_state_;
next_materialized_literal_index_ =
function_state_->next_materialized_literal_index_;
- next_handler_index_ = function_state_->next_handler_index_;
expected_property_count_ = function_state_->expected_property_count_;
}
- void Restore() {
+ void Restore(int* materialized_literal_index_delta) {
+ *materialized_literal_index_delta =
+ function_state_->next_materialized_literal_index_ -
+ next_materialized_literal_index_;
function_state_->next_materialized_literal_index_ =
next_materialized_literal_index_;
- function_state_->next_handler_index_ = next_handler_index_;
function_state_->expected_property_count_ = expected_property_count_;
}
private:
FunctionState* function_state_;
int next_materialized_literal_index_;
- int next_handler_index_;
int expected_property_count_;
};
@@ -352,12 +294,17 @@ class ParserBase : public Traits {
Mode old_mode_;
};
- Scope* NewScope(Scope* parent, ScopeType scope_type,
- FunctionKind kind = kNormalFunction) {
+ Scope* NewScope(Scope* parent, ScopeType scope_type) {
+ // Must always pass the function kind for FUNCTION_SCOPE and ARROW_SCOPE.
+ DCHECK(scope_type != FUNCTION_SCOPE);
+ DCHECK(scope_type != ARROW_SCOPE);
+ return NewScope(parent, scope_type, kNormalFunction);
+ }
+
+ Scope* NewScope(Scope* parent, ScopeType scope_type, FunctionKind kind) {
DCHECK(ast_value_factory());
DCHECK(scope_type != MODULE_SCOPE || allow_harmony_modules());
- DCHECK((scope_type == FUNCTION_SCOPE && IsValidFunctionKind(kind)) ||
- kind == kNormalFunction);
+ DCHECK(!IsArrowFunction(kind) || scope_type == ARROW_SCOPE);
Scope* result = new (zone())
Scope(zone(), parent, scope_type, ast_value_factory(), kind);
result->Initialize();
@@ -464,7 +411,7 @@ class ParserBase : public Traits {
bool accept_OF, ForEachStatement::VisitMode* visit_mode, bool* ok) {
if (Check(Token::IN)) {
if (is_strong(language_mode())) {
- ReportMessageAt(scanner()->location(), "strong_for_in");
+ ReportMessageAt(scanner()->location(), MessageTemplate::kStrongForIn);
*ok = false;
} else {
*visit_mode = ForEachStatement::ENUMERATE;
@@ -479,23 +426,25 @@ class ParserBase : public Traits {
// Checks whether an octal literal was last seen between beg_pos and end_pos.
// If so, reports an error. Only called for strict mode and template strings.
- void CheckOctalLiteral(int beg_pos, int end_pos, const char* error,
- bool* ok) {
+ void CheckOctalLiteral(int beg_pos, int end_pos,
+ MessageTemplate::Template message, bool* ok) {
Scanner::Location octal = scanner()->octal_position();
if (octal.IsValid() && beg_pos <= octal.beg_pos &&
octal.end_pos <= end_pos) {
- ReportMessageAt(octal, error);
+ ReportMessageAt(octal, message);
scanner()->clear_octal_position();
*ok = false;
}
}
inline void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- CheckOctalLiteral(beg_pos, end_pos, "strict_octal_literal", ok);
+ CheckOctalLiteral(beg_pos, end_pos, MessageTemplate::kStrictOctalLiteral,
+ ok);
}
inline void CheckTemplateOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- CheckOctalLiteral(beg_pos, end_pos, "template_octal_literal", ok);
+ CheckOctalLiteral(beg_pos, end_pos, MessageTemplate::kTemplateOctalLiteral,
+ ok);
}
// Checking the name of a function literal. This has to be done after parsing
@@ -511,48 +460,20 @@ class ParserBase : public Traits {
if (is_sloppy(language_mode)) return;
if (this->IsEvalOrArguments(function_name)) {
- Traits::ReportMessageAt(function_name_loc, "strict_eval_arguments");
+ Traits::ReportMessageAt(function_name_loc,
+ MessageTemplate::kStrictEvalArguments);
*ok = false;
return;
}
if (function_name_is_strict_reserved) {
- Traits::ReportMessageAt(function_name_loc, "unexpected_strict_reserved");
+ Traits::ReportMessageAt(function_name_loc,
+ MessageTemplate::kUnexpectedStrictReserved);
*ok = false;
return;
}
if (is_strong(language_mode) && this->IsUndefined(function_name)) {
- Traits::ReportMessageAt(function_name_loc, "strong_undefined");
- *ok = false;
- return;
- }
- }
-
- // Checking the parameter names of a function literal. This has to be done
- // after parsing the function, since the function can declare itself strict.
- void CheckFunctionParameterNames(LanguageMode language_mode,
- bool strict_params,
- const FormalParameterErrorLocations& locs,
- bool* ok) {
- if (is_sloppy(language_mode) && !strict_params) return;
- if (is_strict(language_mode) && locs.eval_or_arguments.IsValid()) {
- Traits::ReportMessageAt(locs.eval_or_arguments, "strict_eval_arguments");
- *ok = false;
- return;
- }
- if (is_strict(language_mode) && locs.reserved.IsValid()) {
- Traits::ReportMessageAt(locs.reserved, "unexpected_strict_reserved");
- *ok = false;
- return;
- }
- if (is_strong(language_mode) && locs.undefined.IsValid()) {
- Traits::ReportMessageAt(locs.undefined, "strong_undefined");
- *ok = false;
- return;
- }
- // TODO(arv): When we add support for destructuring in setters we also need
- // to check for duplicate names.
- if (locs.duplicate.IsValid()) {
- Traits::ReportMessageAt(locs.duplicate, "strict_param_dupe");
+ Traits::ReportMessageAt(function_name_loc,
+ MessageTemplate::kStrongUndefined);
*ok = false;
return;
}
@@ -572,93 +493,35 @@ class ParserBase : public Traits {
LanguageMode language_mode() { return scope_->language_mode(); }
bool is_generator() const { return function_state_->is_generator(); }
+ bool allow_const() {
+ return is_strict(language_mode()) || allow_legacy_const();
+ }
+
// Report syntax errors.
- void ReportMessage(const char* message, const char* arg = NULL,
+ void ReportMessage(MessageTemplate::Template message, const char* arg = NULL,
ParseErrorType error_type = kSyntaxError) {
Scanner::Location source_location = scanner()->location();
Traits::ReportMessageAt(source_location, message, arg, error_type);
}
- void ReportMessageAt(Scanner::Location location, const char* message,
+ void ReportMessageAt(Scanner::Location location,
+ MessageTemplate::Template message,
ParseErrorType error_type = kSyntaxError) {
Traits::ReportMessageAt(location, message, reinterpret_cast<const char*>(0),
error_type);
}
- void ReportUnexpectedToken(Token::Value token);
- void ReportUnexpectedTokenAt(Scanner::Location location, Token::Value token);
-
- class ExpressionClassifier {
- public:
- struct Error {
- Error()
- : location(Scanner::Location::invalid()),
- message(nullptr),
- arg(nullptr) {}
-
- Scanner::Location location;
- const char* message;
- const char* arg;
-
- bool HasError() const { return location.IsValid(); }
- };
-
- ExpressionClassifier() {}
-
- bool is_valid_expression() const { return !expression_error_.HasError(); }
-
- bool is_valid_binding_pattern() const {
- return !binding_pattern_error_.HasError();
- }
-
- bool is_valid_assignment_pattern() const {
- return !assignment_pattern_error_.HasError();
- }
-
- const Error& expression_error() const { return expression_error_; }
-
- const Error& binding_pattern_error() const {
- return binding_pattern_error_;
- }
-
- const Error& assignment_pattern_error() const {
- return assignment_pattern_error_;
- }
-
- void RecordExpressionError(const Scanner::Location& loc,
- const char* message, const char* arg = nullptr) {
- if (!is_valid_expression()) return;
- expression_error_.location = loc;
- expression_error_.message = message;
- expression_error_.arg = arg;
- }
-
- void RecordBindingPatternError(const Scanner::Location& loc,
- const char* message,
- const char* arg = nullptr) {
- if (!is_valid_binding_pattern()) return;
- binding_pattern_error_.location = loc;
- binding_pattern_error_.message = message;
- binding_pattern_error_.arg = arg;
- }
+ void GetUnexpectedTokenMessage(
+ Token::Value token, MessageTemplate::Template* message, const char** arg,
+ MessageTemplate::Template default_ = MessageTemplate::kUnexpectedToken);
- void RecordAssignmentPatternError(const Scanner::Location& loc,
- const char* message,
- const char* arg = nullptr) {
- if (!is_valid_assignment_pattern()) return;
- assignment_pattern_error_.location = loc;
- assignment_pattern_error_.message = message;
- assignment_pattern_error_.arg = arg;
- }
+ void ReportUnexpectedToken(Token::Value token);
+ void ReportUnexpectedTokenAt(
+ Scanner::Location location, Token::Value token,
+ MessageTemplate::Template message = MessageTemplate::kUnexpectedToken);
- private:
- Error expression_error_;
- Error binding_pattern_error_;
- Error assignment_pattern_error_;
- };
- void ReportClassifierError(
- const typename ExpressionClassifier::Error& error) {
+ void ReportClassifierError(const ExpressionClassifier::Error& error) {
Traits::ReportMessageAt(error.location, error.message, error.arg,
kSyntaxError);
}
@@ -686,9 +549,69 @@ class ParserBase : public Traits {
}
}
+ void ValidateFormalParameters(const ExpressionClassifier* classifier,
+ LanguageMode language_mode,
+ bool allow_duplicates, bool* ok) {
+ if (!allow_duplicates &&
+ !classifier->is_valid_formal_parameter_list_without_duplicates()) {
+ ReportClassifierError(classifier->duplicate_formal_parameter_error());
+ *ok = false;
+ } else if (is_strict(language_mode) &&
+ !classifier->is_valid_strict_mode_formal_parameters()) {
+ ReportClassifierError(classifier->strict_mode_formal_parameter_error());
+ *ok = false;
+ } else if (is_strong(language_mode) &&
+ !classifier->is_valid_strong_mode_formal_parameters()) {
+ ReportClassifierError(classifier->strong_mode_formal_parameter_error());
+ *ok = false;
+ }
+ }
+
+ void ValidateArrowFormalParameters(const ExpressionClassifier* classifier,
+ ExpressionT expr,
+ bool parenthesized_formals, bool* ok) {
+ if (classifier->is_valid_binding_pattern()) {
+ // A simple arrow formal parameter: IDENTIFIER => BODY.
+ if (!this->IsIdentifier(expr)) {
+ Traits::ReportMessageAt(scanner()->location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(scanner()->current_token()));
+ *ok = false;
+ }
+ } else if (!classifier->is_valid_arrow_formal_parameters()) {
+ // If after parsing the expr, we see an error but the expression is
+ // neither a valid binding pattern nor a valid parenthesized formal
+ // parameter list, show the "arrow formal parameters" error if the formals
+ // started with a parenthesis, and the binding pattern error otherwise.
+ const ExpressionClassifier::Error& error =
+ parenthesized_formals ? classifier->arrow_formal_parameters_error()
+ : classifier->binding_pattern_error();
+ ReportClassifierError(error);
+ *ok = false;
+ }
+ }
+
+ void ExpressionUnexpectedToken(ExpressionClassifier* classifier) {
+ MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
+ const char* arg;
+ GetUnexpectedTokenMessage(peek(), &message, &arg);
+ classifier->RecordExpressionError(scanner()->peek_location(), message, arg);
+ }
+
void BindingPatternUnexpectedToken(ExpressionClassifier* classifier) {
- classifier->RecordBindingPatternError(
- scanner()->location(), "unexpected_token", Token::String(peek()));
+ MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
+ const char* arg;
+ GetUnexpectedTokenMessage(peek(), &message, &arg);
+ classifier->RecordBindingPatternError(scanner()->peek_location(), message,
+ arg);
+ }
+
+ void ArrowFormalParametersUnexpectedToken(ExpressionClassifier* classifier) {
+ MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
+ const char* arg;
+ GetUnexpectedTokenMessage(peek(), &message, &arg);
+ classifier->RecordArrowFormalParametersError(scanner()->peek_location(),
+ message, arg);
}
// Recursive descent functions:
@@ -751,34 +674,34 @@ class ParserBase : public Traits {
ExpressionT ParseMemberExpressionContinuation(
ExpressionT expression, ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseArrowFunctionLiteral(
- Scope* function_scope, const FormalParameterErrorLocations& error_locs,
- bool has_rest, ExpressionClassifier* classifier, bool* ok);
+ const FormalParameterParsingStateT& parsing_state,
+ const ExpressionClassifier& classifier, bool* ok);
ExpressionT ParseTemplateLiteral(ExpressionT tag, int start,
ExpressionClassifier* classifier, bool* ok);
void AddTemplateExpression(ExpressionT);
ExpressionT ParseSuperExpression(bool is_new,
ExpressionClassifier* classifier, bool* ok);
+ ExpressionT ParseNewTargetExpression(bool* ok);
ExpressionT ParseStrongInitializationExpression(
ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseStrongSuperCallExpression(ExpressionClassifier* classifier,
bool* ok);
- void ParseFormalParameter(FormalParameterScopeT* scope,
- FormalParameterErrorLocations* locs, bool is_rest,
- bool* ok);
- int ParseFormalParameterList(FormalParameterScopeT* scope,
- FormalParameterErrorLocations* locs,
- bool* has_rest, bool* ok);
+ void ParseFormalParameter(bool is_rest,
+ FormalParameterParsingStateT* parsing_result,
+ ExpressionClassifier* classifier, bool* ok);
+ int ParseFormalParameterList(FormalParameterParsingStateT* parsing_state,
+ ExpressionClassifier* classifier, bool* ok);
void CheckArityRestrictions(
int param_count, FunctionLiteral::ArityRestriction arity_restriction,
- int formals_start_pos, int formals_end_pos, bool* ok);
+ bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok);
// Checks if the expression is a valid reference expression (e.g., on the
// left-hand side of assignments). Although ruled out by ECMA as early errors,
// we allow calls for web compatibility and rewrite them to a runtime throw.
ExpressionT CheckAndRewriteReferenceExpression(
- ExpressionT expression,
- Scanner::Location location, const char* message, bool* ok);
+ ExpressionT expression, Scanner::Location location,
+ MessageTemplate::Template message, bool* ok);
// Used to validate property names in object literals and class literals
enum PropertyKind {
@@ -863,13 +786,15 @@ class ParserBase : public Traits {
bool allow_lazy_;
bool allow_natives_;
bool allow_harmony_arrow_functions_;
- bool allow_harmony_object_literals_;
bool allow_harmony_sloppy_;
bool allow_harmony_computed_property_names_;
bool allow_harmony_rest_params_;
bool allow_harmony_spreadcalls_;
bool allow_harmony_destructuring_;
+ bool allow_harmony_spread_arrays_;
+ bool allow_harmony_new_target_;
bool allow_strong_mode_;
+ bool allow_legacy_const_;
};
@@ -973,14 +898,7 @@ class PreParserExpression {
static PreParserExpression BinaryOperation(PreParserExpression left,
Token::Value op,
PreParserExpression right) {
- ValidArrowParam valid_arrow_param_list =
- (op == Token::COMMA && !left.is_single_parenthesized() &&
- !right.is_single_parenthesized())
- ? std::min(left.ValidateArrowParams(), right.ValidateArrowParams())
- : kInvalidArrowParam;
- return PreParserExpression(
- TypeField::encode(kBinaryOperationExpression) |
- IsValidArrowParamListField::encode(valid_arrow_param_list));
+ return PreParserExpression(TypeField::encode(kBinaryOperationExpression));
}
static PreParserExpression StringLiteral() {
@@ -1073,30 +991,6 @@ class PreParserExpression {
return IsIdentifier() || IsProperty();
}
- bool IsValidArrowParamList(FormalParameterErrorLocations* locs,
- const Scanner::Location& params_loc) const {
- ValidArrowParam valid = ValidateArrowParams();
- if (ParenthesizationField::decode(code_) == kMultiParenthesizedExpression) {
- return false;
- }
- switch (valid) {
- case kInvalidArrowParam:
- return false;
- case kInvalidStrongArrowParam:
- locs->undefined = params_loc;
- return true;
- case kInvalidStrictReservedArrowParam:
- locs->reserved = params_loc;
- return true;
- case kInvalidStrictEvalArgumentsArrowParam:
- locs->eval_or_arguments = params_loc;
- return true;
- default:
- DCHECK_EQ(valid, kValidArrowParam);
- return true;
- }
- }
-
// At the moment PreParser doesn't track these expression types.
bool IsFunctionLiteral() const { return false; }
bool IsCallNew() const { return false; }
@@ -1116,16 +1010,6 @@ class PreParserExpression {
return TypeField::decode(code_) == kBinaryOperationExpression;
}
- bool is_single_parenthesized() const {
- return ParenthesizationField::decode(code_) != kNotParenthesized;
- }
-
- void increase_parenthesization_level() {
- code_ = ParenthesizationField::update(
- code_, is_single_parenthesized() ? kMultiParenthesizedExpression
- : kParanthesizedExpression);
- }
-
// Dummy implementation for making expression->somefunc() work in both Parser
// and PreParser.
PreParserExpression* operator->() { return this; }
@@ -1146,12 +1030,6 @@ class PreParserExpression {
kSpreadExpression
};
- enum Parenthesization {
- kNotParenthesized,
- kParanthesizedExpression,
- kMultiParenthesizedExpression
- };
-
enum ExpressionType {
kThisExpression,
kThisPropertyExpression,
@@ -1160,56 +1038,18 @@ class PreParserExpression {
kNoTemplateTagExpression
};
- // These validity constraints are ordered such that a value of N implies lack
- // of errors M < N.
- enum ValidArrowParam {
- kInvalidArrowParam,
- kInvalidStrictEvalArgumentsArrowParam,
- kInvalidStrictReservedArrowParam,
- kInvalidStrongArrowParam,
- kValidArrowParam
- };
-
explicit PreParserExpression(uint32_t expression_code)
: code_(expression_code) {}
- V8_INLINE ValidArrowParam ValidateArrowParams() const {
- if (IsBinaryOperation()) {
- return IsValidArrowParamListField::decode(code_);
- }
- if (!IsIdentifier()) {
- return kInvalidArrowParam;
- }
- PreParserIdentifier ident = AsIdentifier();
- // In strict mode, eval and arguments are not valid formal parameter names.
- if (ident.IsEval() || ident.IsArguments()) {
- return kInvalidStrictEvalArgumentsArrowParam;
- }
- // In strict mode, future reserved words are not valid either, and as they
- // produce different errors we allot them their own error code.
- if (ident.IsFutureStrictReserved()) {
- return kInvalidStrictReservedArrowParam;
- }
- // In strong mode, 'undefined' isn't a valid formal parameter name either.
- if (ident.IsUndefined()) {
- return kInvalidStrongArrowParam;
- }
- return kValidArrowParam;
- }
-
- // The first five bits are for the Type and Parenthesization.
+ // The first three bits are for the Type.
typedef BitField<Type, 0, 3> TypeField;
- typedef BitField<Parenthesization, TypeField::kNext, 2> ParenthesizationField;
// The rest of the bits are interpreted depending on the value
// of the Type field, so they can share the storage.
- typedef BitField<ExpressionType, ParenthesizationField::kNext, 3>
- ExpressionTypeField;
- typedef BitField<bool, ParenthesizationField::kNext, 1> IsUseStrictField;
+ typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
+ typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseStrongField;
- typedef BitField<ValidArrowParam, ParenthesizationField::kNext, 3>
- IsValidArrowParamListField;
- typedef BitField<PreParserIdentifier::Type, ParenthesizationField::kNext, 10>
+ typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
IdentifierTypeField;
uint32_t code_;
@@ -1315,11 +1155,13 @@ class PreParserFactory {
PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
PreParserIdentifier js_flags,
int literal_index,
+ bool is_strong,
int pos) {
return PreParserExpression::Default();
}
PreParserExpression NewArrayLiteral(PreParserExpressionList values,
int literal_index,
+ bool is_strong,
int pos) {
return PreParserExpression::Default();
}
@@ -1340,6 +1182,7 @@ class PreParserFactory {
int literal_index,
int boilerplate_properties,
bool has_function,
+ bool is_strong,
int pos) {
return PreParserExpression::Default();
}
@@ -1416,7 +1259,7 @@ class PreParserFactory {
PreParserExpression NewFunctionLiteral(
PreParserIdentifier name, AstValueFactory* ast_value_factory,
Scope* scope, PreParserStatementList body, int materialized_literal_count,
- int expected_property_count, int handler_count, int parameter_count,
+ int expected_property_count, int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
@@ -1439,6 +1282,19 @@ class PreParserFactory {
};
+struct PreParserFormalParameterParsingState {
+ explicit PreParserFormalParameterParsingState(Scope* scope)
+ : scope(scope),
+ has_rest(false),
+ is_simple_parameter_list(true),
+ materialized_literals_count(0) {}
+ Scope* scope;
+ bool has_rest;
+ bool is_simple_parameter_list;
+ int materialized_literals_count;
+};
+
+
class PreParser;
class PreParserTraits {
@@ -1464,8 +1320,8 @@ class PreParserTraits {
typedef PreParserExpressionList ExpressionList;
typedef PreParserExpressionList PropertyList;
typedef PreParserIdentifier FormalParameter;
- typedef DuplicateFinder FormalParameterScope;
typedef PreParserStatementList StatementList;
+ typedef PreParserFormalParameterParsingState FormalParameterParsingState;
// For constructing objects returned by the traversing functions.
typedef PreParserFactory Factory;
@@ -1553,9 +1409,13 @@ class PreParserTraits {
static void CheckAssigningFunctionLiteralToProperty(
PreParserExpression left, PreParserExpression right) {}
- // PreParser doesn't need to keep track of eval calls.
static void CheckPossibleEvalCall(PreParserExpression expression,
- Scope* scope) {}
+ Scope* scope) {
+ if (IsIdentifier(expression) && IsEval(AsIdentifier(expression))) {
+ scope->DeclarationScope()->RecordEvalCall();
+ scope->RecordEvalCall();
+ }
+ }
static PreParserExpression MarkExpressionAsAssigned(
PreParserExpression expression) {
@@ -1578,23 +1438,26 @@ class PreParserTraits {
return PreParserExpression::Default();
}
- PreParserExpression NewThrowReferenceError(const char* type, int pos) {
+ PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
+ int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewThrowSyntaxError(
- const char* type, Handle<Object> arg, int pos) {
+ PreParserExpression NewThrowSyntaxError(MessageTemplate::Template message,
+ Handle<Object> arg, int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewThrowTypeError(
- const char* type, Handle<Object> arg, int pos) {
+ PreParserExpression NewThrowTypeError(MessageTemplate::Template message,
+ Handle<Object> arg, int pos) {
return PreParserExpression::Default();
}
// Reporting errors.
- void ReportMessageAt(Scanner::Location location, const char* message,
+ void ReportMessageAt(Scanner::Location location,
+ MessageTemplate::Template message,
const char* arg = NULL,
ParseErrorType error_type = kSyntaxError);
- void ReportMessageAt(int start_pos, int end_pos, const char* message,
+ void ReportMessageAt(int start_pos, int end_pos,
+ MessageTemplate::Template message,
const char* arg = NULL,
ParseErrorType error_type = kSyntaxError);
@@ -1641,8 +1504,21 @@ class PreParserTraits {
return PreParserExpression::This();
}
- static PreParserExpression SuperReference(Scope* scope,
- PreParserFactory* factory) {
+ static PreParserExpression SuperPropertyReference(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression SuperCallReference(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression NewTargetExpression(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
return PreParserExpression::Default();
}
@@ -1684,20 +1560,27 @@ class PreParserTraits {
return PreParserExpressionList();
}
+ static void AddParameterInitializationBlock(
+ const PreParserFormalParameterParsingState& formal_parameters,
+ PreParserStatementList list, bool* ok) {}
+
V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
int* expected_property_count, bool* ok) {
UNREACHABLE();
}
- V8_INLINE PreParserStatementList
- ParseEagerFunctionBody(PreParserIdentifier function_name, int pos,
- Variable* fvar, Token::Value fvar_init_op,
- FunctionKind kind, bool* ok);
+ V8_INLINE PreParserStatementList ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameterParsingState& formal_parameters,
+ Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok);
V8_INLINE void ParseArrowFunctionFormalParameters(
- Scope* scope, PreParserExpression expression,
- const Scanner::Location& params_loc,
- FormalParameterErrorLocations* error_locs, bool* is_rest, bool* ok);
+ PreParserFormalParameterParsingState* parsing_state,
+ PreParserExpression expression, const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok);
+
+ void ReindexLiterals(
+ const PreParserFormalParameterParsingState& parsing_state) {}
struct TemplateLiteralState {};
@@ -1723,9 +1606,8 @@ class PreParserTraits {
return !tag.IsNoTemplateTag();
}
- V8_INLINE bool DeclareFormalParameter(DuplicateFinder* scope,
- PreParserIdentifier param,
- bool is_rest);
+ void DeclareFormalParameter(void* parsing_state, PreParserExpression pattern,
+ ExpressionClassifier* classifier, bool is_rest) {}
void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
@@ -1735,7 +1617,8 @@ class PreParserTraits {
PreParserIdentifier name, Scanner::Location function_name_location,
bool name_is_strict_reserved, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok);
PreParserExpression ParseClassLiteral(PreParserIdentifier name,
Scanner::Location class_name_location,
@@ -1875,6 +1758,7 @@ class PreParser : public ParserBase<PreParserTraits> {
int* expected_property_count, bool* ok);
V8_INLINE PreParserStatementList
ParseEagerFunctionBody(PreParserIdentifier function_name, int pos,
+ const FormalParameterParsingStateT& formal_parameters,
Variable* fvar, Token::Value fvar_init_op,
FunctionKind kind, bool* ok);
@@ -1882,7 +1766,8 @@ class PreParser : public ParserBase<PreParserTraits> {
Identifier name, Scanner::Location function_name_location,
bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok);
void ParseLazyFunctionLiteralBody(bool* ok,
Scanner::BookmarkScope* bookmark = nullptr);
@@ -1919,30 +1804,19 @@ PreParserExpression PreParserTraits::SpreadCallNew(PreParserExpression function,
}
-bool PreParserTraits::DeclareFormalParameter(
- DuplicateFinder* duplicate_finder, PreParserIdentifier current_identifier,
- bool is_rest) {
- return pre_parser_->scanner()->FindSymbol(duplicate_finder, 1) != 0;
-}
-
-
void PreParserTraits::ParseArrowFunctionFormalParameters(
- Scope* scope, PreParserExpression params,
- const Scanner::Location& params_loc,
- FormalParameterErrorLocations* error_locs, bool* is_rest, bool* ok) {
+ PreParserFormalParameterParsingState* parsing_state,
+ PreParserExpression params, const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok) {
// TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
// lists that are too long.
- if (!params.IsValidArrowParamList(error_locs, params_loc)) {
- *ok = false;
- ReportMessageAt(params_loc, "malformed_arrow_function_parameter_list");
- return;
- }
}
PreParserStatementList PreParser::ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos, Variable* fvar,
- Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameterParsingState& formal_parameters,
+ Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
ParseStatementList(Token::RBRACE, ok);
@@ -1954,10 +1828,11 @@ PreParserStatementList PreParser::ParseEagerFunctionBody(
PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos, Variable* fvar,
- Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
- return pre_parser_->ParseEagerFunctionBody(function_name, pos, fvar,
- fvar_init_op, kind, ok);
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameterParsingState& formal_parameters,
+ Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
+ return pre_parser_->ParseEagerFunctionBody(
+ function_name, pos, formal_parameters, fvar, fvar_init_op, kind, ok);
}
@@ -1966,7 +1841,6 @@ ParserBase<Traits>::FunctionState::FunctionState(
FunctionState** function_state_stack, Scope** scope_stack, Scope* scope,
FunctionKind kind, typename Traits::Type::Factory* factory)
: next_materialized_literal_index_(0),
- next_handler_index_(0),
expected_property_count_(0),
this_location_(Scanner::Location::invalid()),
return_location_(Scanner::Location::invalid()),
@@ -1990,50 +1864,73 @@ ParserBase<Traits>::FunctionState::~FunctionState() {
}
-template<class Traits>
-void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
- return ReportUnexpectedTokenAt(scanner_->location(), token);
-}
-
-
-template<class Traits>
-void ParserBase<Traits>::ReportUnexpectedTokenAt(
- Scanner::Location source_location, Token::Value token) {
-
+template <class Traits>
+void ParserBase<Traits>::GetUnexpectedTokenMessage(
+ Token::Value token, MessageTemplate::Template* message, const char** arg,
+ MessageTemplate::Template default_) {
// Four of the tokens are treated specially
switch (token) {
case Token::EOS:
- return ReportMessageAt(source_location, "unexpected_eos");
+ *message = MessageTemplate::kUnexpectedEOS;
+ *arg = nullptr;
+ break;
case Token::SMI:
case Token::NUMBER:
- return ReportMessageAt(source_location, "unexpected_token_number");
+ *message = MessageTemplate::kUnexpectedTokenNumber;
+ *arg = nullptr;
+ break;
case Token::STRING:
- return ReportMessageAt(source_location, "unexpected_token_string");
+ *message = MessageTemplate::kUnexpectedTokenString;
+ *arg = nullptr;
+ break;
case Token::IDENTIFIER:
- return ReportMessageAt(source_location, "unexpected_token_identifier");
+ *message = MessageTemplate::kUnexpectedTokenIdentifier;
+ *arg = nullptr;
+ break;
case Token::FUTURE_RESERVED_WORD:
- return ReportMessageAt(source_location, "unexpected_reserved");
+ *message = MessageTemplate::kUnexpectedReserved;
+ *arg = nullptr;
+ break;
case Token::LET:
case Token::STATIC:
case Token::YIELD:
case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessageAt(source_location,
- is_strict(language_mode())
- ? "unexpected_strict_reserved"
- : "unexpected_token_identifier");
+ *message = is_strict(language_mode())
+ ? MessageTemplate::kUnexpectedStrictReserved
+ : MessageTemplate::kUnexpectedTokenIdentifier;
+ *arg = nullptr;
+ break;
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
- return Traits::ReportMessageAt(source_location,
- "unexpected_template_string");
+ *message = MessageTemplate::kUnexpectedTemplateString;
+ *arg = nullptr;
+ break;
default:
const char* name = Token::String(token);
DCHECK(name != NULL);
- Traits::ReportMessageAt(source_location, "unexpected_token", name);
+ *arg = name;
+ break;
}
}
template <class Traits>
+void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
+ return ReportUnexpectedTokenAt(scanner_->location(), token);
+}
+
+
+template <class Traits>
+void ParserBase<Traits>::ReportUnexpectedTokenAt(
+ Scanner::Location source_location, Token::Value token,
+ MessageTemplate::Template message) {
+ const char* arg;
+ GetUnexpectedTokenMessage(token, &message, &arg);
+ Traits::ReportMessageAt(source_location, message, arg);
+}
+
+
+template <class Traits>
typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
AllowRestrictedIdentifiers allow_restricted_identifiers, bool* ok) {
ExpressionClassifier classifier;
@@ -2061,27 +1958,56 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
Token::Value next = Next();
if (next == Token::IDENTIFIER) {
IdentifierT name = this->GetSymbol(scanner());
- if (is_strict(language_mode()) && this->IsEvalOrArguments(name)) {
- classifier->RecordBindingPatternError(scanner()->location(),
- "strict_eval_arguments");
- }
- if (is_strong(language_mode()) && this->IsUndefined(name)) {
- // TODO(dslomov): allow 'undefined' in nested patterns.
- classifier->RecordBindingPatternError(scanner()->location(),
- "strong_undefined");
- classifier->RecordAssignmentPatternError(scanner()->location(),
- "strong_undefined");
- }
- if (is_strong(language_mode()) && this->IsArguments(name)) {
- classifier->RecordExpressionError(scanner()->location(),
- "strong_arguments");
- }
- if (this->IsArguments(name)) scope_->RecordArgumentsUsage();
+ // When this function is used to read a formal parameter, we don't always
+ // know whether the function is going to be strict or sloppy. Indeed for
+ // arrow functions we don't always know that the identifier we are reading
+ // is actually a formal parameter. Therefore besides the errors that we
+ // must detect because we know we're in strict mode, we also record any
+ // error that we might make in the future once we know the language mode.
+ if (this->IsEval(name)) {
+ classifier->RecordStrictModeFormalParameterError(
+ scanner()->location(), MessageTemplate::kStrictEvalArguments);
+ if (is_strict(language_mode())) {
+ classifier->RecordBindingPatternError(
+ scanner()->location(), MessageTemplate::kStrictEvalArguments);
+ }
+ }
+ if (this->IsArguments(name)) {
+ scope_->RecordArgumentsUsage();
+ classifier->RecordStrictModeFormalParameterError(
+ scanner()->location(), MessageTemplate::kStrictEvalArguments);
+ if (is_strict(language_mode())) {
+ classifier->RecordBindingPatternError(
+ scanner()->location(), MessageTemplate::kStrictEvalArguments);
+ }
+ if (is_strong(language_mode())) {
+ classifier->RecordExpressionError(scanner()->location(),
+ MessageTemplate::kStrongArguments);
+ }
+ }
+ if (this->IsUndefined(name)) {
+ classifier->RecordStrongModeFormalParameterError(
+ scanner()->location(), MessageTemplate::kStrongUndefined);
+ if (is_strong(language_mode())) {
+ // TODO(dslomov): allow 'undefined' in nested patterns.
+ classifier->RecordBindingPatternError(
+ scanner()->location(), MessageTemplate::kStrongUndefined);
+ classifier->RecordAssignmentPatternError(
+ scanner()->location(), MessageTemplate::kStrongUndefined);
+ }
+ }
+
+ if (classifier->duplicate_finder() != nullptr &&
+ scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
+ classifier->RecordDuplicateFormalParameterError(scanner()->location());
+ }
return name;
} else if (is_sloppy(language_mode()) &&
(next == Token::FUTURE_STRICT_RESERVED_WORD ||
next == Token::LET || next == Token::STATIC ||
(next == Token::YIELD && !is_generator()))) {
+ classifier->RecordStrictModeFormalParameterError(
+ scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
return this->GetSymbol(scanner());
} else {
this->ReportUnexpectedToken(next);
@@ -2150,7 +2076,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
int pos = peek_position();
if (!scanner()->ScanRegExpPattern(seen_equal)) {
Next();
- ReportMessage("unterminated_regexp");
+ ReportMessage(MessageTemplate::kUnterminatedRegExp);
*ok = false;
return Traits::EmptyExpression();
}
@@ -2160,13 +2086,14 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
IdentifierT js_pattern = this->GetNextSymbol(scanner());
if (!scanner()->ScanRegExpFlags()) {
Next();
- ReportMessage("malformed_regexp_flags");
+ ReportMessage(MessageTemplate::kMalformedRegExpFlags);
*ok = false;
return Traits::EmptyExpression();
}
IdentifierT js_flags = this->GetNextSymbol(scanner());
Next();
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index,
+ is_strong(language_mode()), pos);
}
@@ -2211,16 +2138,15 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::THIS: {
BindingPatternUnexpectedToken(classifier);
Consume(Token::THIS);
- if (is_strong(language_mode())) {
+ if (FLAG_strong_this && is_strong(language_mode())) {
// Constructors' usages of 'this' in strong mode are parsed separately.
// TODO(rossberg): this does not work with arrow functions yet.
if (i::IsConstructor(function_state_->kind())) {
- ReportMessage("strong_constructor_this");
+ ReportMessage(MessageTemplate::kStrongConstructorThis);
*ok = false;
break;
}
}
- scope_->RecordThisUsage();
result = this->ThisExpression(scope_, factory(), beg_pos);
break;
}
@@ -2235,8 +2161,8 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
break;
case Token::SMI:
case Token::NUMBER:
- classifier->RecordBindingPatternError(scanner()->location(),
- "unexpected_token_number");
+ classifier->RecordBindingPatternError(
+ scanner()->location(), MessageTemplate::kUnexpectedTokenNumber);
Next();
result =
this->ExpressionFromLiteral(token, beg_pos, scanner(), factory());
@@ -2255,8 +2181,8 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
}
case Token::STRING: {
- classifier->RecordBindingPatternError(scanner()->location(),
- "unexpected_token_string");
+ classifier->RecordBindingPatternError(
+ scanner()->location(), MessageTemplate::kUnexpectedTokenString);
Consume(Token::STRING);
result = this->ExpressionFromString(beg_pos, scanner(), factory());
break;
@@ -2271,30 +2197,74 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
break;
case Token::LBRACK:
+ if (!allow_harmony_destructuring()) {
+ BindingPatternUnexpectedToken(classifier);
+ }
result = this->ParseArrayLiteral(classifier, CHECK_OK);
break;
case Token::LBRACE:
+ if (!allow_harmony_destructuring()) {
+ BindingPatternUnexpectedToken(classifier);
+ }
result = this->ParseObjectLiteral(classifier, CHECK_OK);
break;
case Token::LPAREN:
+ // Arrow function formal parameters are either a single identifier or a
+ // list of BindingPattern productions enclosed in parentheses.
+ // Parentheses are not valid on the LHS of a BindingPattern, so we use the
+ // is_valid_binding_pattern() check to detect multiple levels of
+ // parenthesization.
+ if (!classifier->is_valid_binding_pattern()) {
+ ArrowFormalParametersUnexpectedToken(classifier);
+ }
BindingPatternUnexpectedToken(classifier);
Consume(Token::LPAREN);
if (allow_harmony_arrow_functions() && Check(Token::RPAREN)) {
// As a primary expression, the only thing that can follow "()" is "=>".
- Scope* scope = this->NewScope(scope_, ARROW_SCOPE);
+ classifier->RecordBindingPatternError(scanner()->location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::RPAREN));
+ // Give a good error to the user who might have typed e.g. "return();".
+ if (peek() != Token::ARROW) {
+ ReportUnexpectedTokenAt(scanner_->peek_location(), peek(),
+ MessageTemplate::kMissingArrow);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+ Scope* scope =
+ this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
+ FormalParameterParsingStateT parsing_state(scope);
+ scope->set_start_position(beg_pos);
+ ExpressionClassifier args_classifier;
+ result = this->ParseArrowFunctionLiteral(parsing_state, args_classifier,
+ CHECK_OK);
+ } else if (allow_harmony_arrow_functions() &&
+ allow_harmony_rest_params() && Check(Token::ELLIPSIS)) {
+ // (...x) => y
+ Scope* scope =
+ this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
+ FormalParameterParsingStateT parsing_state(scope);
scope->set_start_position(beg_pos);
- FormalParameterErrorLocations error_locs;
- bool has_rest = false;
- result = this->ParseArrowFunctionLiteral(scope, error_locs, has_rest,
- classifier, CHECK_OK);
+ ExpressionClassifier args_classifier;
+ const bool is_rest = true;
+ this->ParseFormalParameter(is_rest, &parsing_state, &args_classifier,
+ CHECK_OK);
+ if (peek() == Token::COMMA) {
+ ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kParamAfterRest);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+ Expect(Token::RPAREN, CHECK_OK);
+ result = this->ParseArrowFunctionLiteral(parsing_state, args_classifier,
+ CHECK_OK);
} else {
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
parenthesized_function_ = (peek() == Token::FUNCTION);
result = this->ParseExpression(true, classifier, CHECK_OK);
- result->increase_parenthesization_level();
Expect(Token::RPAREN, CHECK_OK);
}
break;
@@ -2303,7 +2273,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
BindingPatternUnexpectedToken(classifier);
Consume(Token::CLASS);
if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
- ReportMessage("sloppy_lexical");
+ ReportMessage(MessageTemplate::kSloppyLexical);
*ok = false;
break;
}
@@ -2365,13 +2335,36 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
// AssignmentExpression
// Expression ',' AssignmentExpression
+ ExpressionClassifier binding_classifier;
ExpressionT result =
- this->ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
+ this->ParseAssignmentExpression(accept_IN, &binding_classifier, CHECK_OK);
+ classifier->Accumulate(binding_classifier,
+ ExpressionClassifier::AllProductions);
+ bool seen_rest = false;
while (peek() == Token::COMMA) {
- Expect(Token::COMMA, CHECK_OK);
+ if (seen_rest) {
+ // At this point the production can't possibly be valid, but we don't know
+ // which error to signal.
+ classifier->RecordArrowFormalParametersError(
+ scanner()->peek_location(), MessageTemplate::kParamAfterRest);
+ }
+ Consume(Token::COMMA);
+ bool is_rest = false;
+ if (allow_harmony_rest_params() && peek() == Token::ELLIPSIS) {
+ // 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
+ // as the formal parameters of'(x, y, ...z) => foo', and is not itself a
+ // valid expression or binding pattern.
+ ExpressionUnexpectedToken(classifier);
+ BindingPatternUnexpectedToken(classifier);
+ Consume(Token::ELLIPSIS);
+ seen_rest = is_rest = true;
+ }
int pos = position();
- ExpressionT right =
- this->ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
+ ExpressionT right = this->ParseAssignmentExpression(
+ accept_IN, &binding_classifier, CHECK_OK);
+ if (is_rest) right = factory()->NewSpread(right, pos);
+ classifier->Accumulate(binding_classifier,
+ ExpressionClassifier::AllProductions);
result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
}
return result;
@@ -2389,19 +2382,34 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
this->NewExpressionList(4, zone_);
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
+ bool seen_spread = false;
ExpressionT elem = this->EmptyExpression();
if (peek() == Token::COMMA) {
if (is_strong(language_mode())) {
- ReportMessageAt(scanner()->peek_location(), "strong_ellision");
+ ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kStrongEllision);
*ok = false;
return this->EmptyExpression();
}
elem = this->GetLiteralTheHole(peek_position(), factory());
+ } else if (peek() == Token::ELLIPSIS) {
+ if (!allow_harmony_spread_arrays()) {
+ ExpressionUnexpectedToken(classifier);
+ }
+ int start_pos = peek_position();
+ Consume(Token::ELLIPSIS);
+ ExpressionT argument =
+ this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ elem = factory()->NewSpread(argument, start_pos);
+ seen_spread = true;
} else {
elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
}
values->Add(elem, zone_);
if (peek() != Token::RBRACK) {
+ if (seen_spread) {
+ BindingPatternUnexpectedToken(classifier);
+ }
Expect(Token::COMMA, CHECK_OK);
}
}
@@ -2410,7 +2418,8 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
// Update the scope information before the pre-parsing bailout.
int literal_index = function_state_->NextMaterializedLiteralIndex();
- return factory()->NewArrayLiteral(values, literal_index, pos);
+ return factory()->NewArrayLiteral(values, literal_index,
+ is_strong(language_mode()), pos);
}
@@ -2450,8 +2459,10 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
if (allow_harmony_computed_property_names_) {
*is_computed_name = true;
Consume(Token::LBRACK);
- ExpressionT expression =
- ParseAssignmentExpression(true, classifier, CHECK_OK);
+ ExpressionClassifier computed_name_classifier;
+ ExpressionT expression = ParseAssignmentExpression(
+ true, &computed_name_classifier, CHECK_OK);
+ classifier->AccumulateReclassifyingAsPattern(computed_name_classifier);
Expect(Token::RBRACK, CHECK_OK);
return expression;
}
@@ -2485,7 +2496,7 @@ ParserBase<Traits>::ParsePropertyDefinition(
bool is_get = false;
bool is_set = false;
bool name_is_static = false;
- bool is_generator = allow_harmony_object_literals_ && Check(Token::MUL);
+ bool is_generator = Check(Token::MUL);
Token::Value name_token = peek();
int next_beg_pos = scanner()->peek_location().beg_pos;
@@ -2509,8 +2520,7 @@ ParserBase<Traits>::ParsePropertyDefinition(
value = this->ParseAssignmentExpression(
true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- } else if (is_generator ||
- (allow_harmony_object_literals_ && peek() == Token::LPAREN)) {
+ } else if (is_generator || peek() == Token::LPAREN) {
// Concise Method
if (!*is_computed_name) {
checker->CheckProperty(name_token, kMethodProperty, is_static,
@@ -2533,7 +2543,7 @@ ParserBase<Traits>::ParsePropertyDefinition(
name, scanner()->location(),
false, // reserved words are allowed here
kind, RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
- FunctionLiteral::NORMAL_ARITY,
+ FunctionLiteral::NORMAL_ARITY, language_mode(),
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
return factory()->NewObjectLiteralProperty(name_expression, value,
@@ -2567,7 +2577,7 @@ ParserBase<Traits>::ParsePropertyDefinition(
false, // reserved words are allowed here
kind, RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
is_get ? FunctionLiteral::GETTER_ARITY : FunctionLiteral::SETTER_ARITY,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
// Make sure the name expression is a string since we need a Name for
// Runtime_DefineAccessorPropertyUnchecked and since we can determine this
@@ -2582,13 +2592,30 @@ ParserBase<Traits>::ParsePropertyDefinition(
is_get ? ObjectLiteralProperty::GETTER : ObjectLiteralProperty::SETTER,
is_static, *is_computed_name);
- } else if (!in_class && allow_harmony_object_literals_ &&
- Token::IsIdentifier(name_token, language_mode(),
- this->is_generator())) {
+ } else if (!in_class && Token::IsIdentifier(name_token, language_mode(),
+ this->is_generator())) {
DCHECK(!*is_computed_name);
DCHECK(!is_static);
- value = this->ExpressionFromIdentifier(name, next_beg_pos, next_end_pos,
- scope_, factory());
+
+ if (classifier->duplicate_finder() != nullptr &&
+ scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
+ classifier->RecordDuplicateFormalParameterError(scanner()->location());
+ }
+
+ ExpressionT lhs = this->ExpressionFromIdentifier(
+ name, next_beg_pos, next_end_pos, scope_, factory());
+ if (peek() == Token::ASSIGN) {
+ this->ExpressionUnexpectedToken(classifier);
+ Consume(Token::ASSIGN);
+ ExpressionClassifier rhs_classifier;
+ ExpressionT rhs = this->ParseAssignmentExpression(
+ true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ classifier->AccumulateReclassifyingAsPattern(rhs_classifier);
+ value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
+ RelocInfo::kNoPosition);
+ } else {
+ value = lhs;
+ }
return factory()->NewObjectLiteralProperty(
name_expression, value, ObjectLiteralProperty::COMPUTED, false, false);
@@ -2666,6 +2693,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
literal_index,
number_of_boilerplate_properties,
has_function,
+ is_strong(language_mode()),
pos);
}
@@ -2710,7 +2738,7 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
}
if (result->length() > Code::kMaxArguments) {
- ReportMessage("too_many_arguments");
+ ReportMessage(MessageTemplate::kTooManyArguments);
*ok = false;
return this->NullExpressionList();
}
@@ -2721,7 +2749,7 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
}
Scanner::Location location = scanner_->location();
if (Token::RPAREN != Next()) {
- ReportMessageAt(location, "unterminated_arg_list");
+ ReportMessageAt(location, MessageTemplate::kUnterminatedArgList);
*ok = false;
return this->NullExpressionList();
}
@@ -2757,37 +2785,70 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
if (fni_ != NULL) fni_->Enter();
ParserBase<Traits>::Checkpoint checkpoint(this);
- ExpressionT expression =
- this->ParseConditionalExpression(accept_IN, classifier, CHECK_OK);
+ ExpressionClassifier arrow_formals_classifier(classifier->duplicate_finder());
+ bool parenthesized_formals = peek() == Token::LPAREN;
+ if (!parenthesized_formals) {
+ ArrowFormalParametersUnexpectedToken(&arrow_formals_classifier);
+ }
+ ExpressionT expression = this->ParseConditionalExpression(
+ accept_IN, &arrow_formals_classifier, CHECK_OK);
if (allow_harmony_arrow_functions() && peek() == Token::ARROW) {
- checkpoint.Restore();
- FormalParameterErrorLocations error_locs;
+ BindingPatternUnexpectedToken(classifier);
+ ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
+ parenthesized_formals, CHECK_OK);
Scanner::Location loc(lhs_location.beg_pos, scanner()->location().end_pos);
- bool has_rest = false;
- Scope* scope = this->NewScope(scope_, ARROW_SCOPE);
+ Scope* scope =
+ this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
+ FormalParameterParsingStateT parsing_state(scope);
+ checkpoint.Restore(&parsing_state.materialized_literals_count);
+
scope->set_start_position(lhs_location.beg_pos);
- this->ParseArrowFunctionFormalParameters(scope, expression, loc,
- &error_locs, &has_rest, CHECK_OK);
- expression = this->ParseArrowFunctionLiteral(scope, error_locs, has_rest,
- classifier, CHECK_OK);
+ Scanner::Location duplicate_loc = Scanner::Location::invalid();
+ this->ParseArrowFunctionFormalParameters(&parsing_state, expression, loc,
+ &duplicate_loc, CHECK_OK);
+ if (duplicate_loc.IsValid()) {
+ arrow_formals_classifier.RecordDuplicateFormalParameterError(
+ duplicate_loc);
+ }
+ expression = this->ParseArrowFunctionLiteral(
+ parsing_state, arrow_formals_classifier, CHECK_OK);
return expression;
}
+ // "expression" was not itself an arrow function parameter list, but it might
+ // form part of one. Propagate speculative formal parameter error locations.
+ classifier->Accumulate(arrow_formals_classifier,
+ ExpressionClassifier::StandardProductions |
+ ExpressionClassifier::FormalParametersProductions);
+
if (!Token::IsAssignmentOp(peek())) {
if (fni_ != NULL) fni_->Leave();
// Parsed conditional expression only (no assignment).
return expression;
}
+ if (!allow_harmony_destructuring()) {
+ BindingPatternUnexpectedToken(classifier);
+ }
+
expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_location, "invalid_lhs_in_assignment", CHECK_OK);
+ expression, lhs_location, MessageTemplate::kInvalidLhsInAssignment,
+ CHECK_OK);
expression = this->MarkExpressionAsAssigned(expression);
Token::Value op = Next(); // Get assignment operator.
+ if (op != Token::ASSIGN) {
+ classifier->RecordBindingPatternError(scanner()->location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(op));
+ }
int pos = position();
+
+ ExpressionClassifier rhs_classifier;
ExpressionT right =
- this->ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
+ this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
+ classifier->AccumulateReclassifyingAsPattern(rhs_classifier);
// TODO(1231235): We try to estimate the set of properties set by
// constructors. We define a new property whenever there is an
@@ -2859,9 +2920,6 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
}
typename Traits::Type::YieldExpression yield =
factory()->NewYield(generator_object, expression, kind, pos);
- if (kind == Yield::kDelegating) {
- yield->set_index(function_state_->NextHandlerIndex());
- }
return yield;
}
@@ -2881,6 +2939,7 @@ ParserBase<Traits>::ParseConditionalExpression(bool accept_IN,
ExpressionT expression =
this->ParseBinaryExpression(4, accept_IN, classifier, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
+ BindingPatternUnexpectedToken(classifier);
Consume(Token::CONDITIONAL);
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
@@ -2904,6 +2963,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
+ BindingPatternUnexpectedToken(classifier);
Token::Value op = Next();
Scanner::Location op_location = scanner()->location();
int pos = position();
@@ -2927,7 +2987,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
default: break;
}
if (cmp == Token::EQ && is_strong(language_mode())) {
- ReportMessageAt(op_location, "strong_equal");
+ ReportMessageAt(op_location, MessageTemplate::kStrongEqual);
*ok = false;
return this->EmptyExpression();
}
@@ -2973,12 +3033,12 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
if (op == Token::DELETE && is_strict(language_mode())) {
if (is_strong(language_mode())) {
- ReportMessage("strong_delete");
+ ReportMessage(MessageTemplate::kStrongDelete);
*ok = false;
return this->EmptyExpression();
} else if (this->IsIdentifier(expression)) {
// "delete identifier" is a syntax error in strict mode.
- ReportMessage("strict_delete");
+ ReportMessage(MessageTemplate::kStrictDelete);
*ok = false;
return this->EmptyExpression();
}
@@ -2992,7 +3052,8 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
Scanner::Location lhs_location = scanner()->peek_location();
ExpressionT expression = this->ParseUnaryExpression(classifier, CHECK_OK);
expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_location, "invalid_lhs_in_prefix_op", CHECK_OK);
+ expression, lhs_location, MessageTemplate::kInvalidLhsInPrefixOp,
+ CHECK_OK);
this->MarkExpressionAsAssigned(expression);
return factory()->NewCountOperation(op,
@@ -3021,7 +3082,8 @@ ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
BindingPatternUnexpectedToken(classifier);
expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_location, "invalid_lhs_in_postfix_op", CHECK_OK);
+ expression, lhs_location, MessageTemplate::kInvalidLhsInPostfixOp,
+ CHECK_OK);
expression = this->MarkExpressionAsAssigned(expression);
Token::Value next = Next();
@@ -3062,7 +3124,7 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
if (is_strong(language_mode()) && this->IsIdentifier(result) &&
this->IsEval(this->AsIdentifier(result))) {
- ReportMessage("strong_direct_eval");
+ ReportMessage(MessageTemplate::kStrongDirectEval);
*ok = false;
return this->EmptyExpression();
}
@@ -3119,6 +3181,13 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
break;
}
+ case Token::TEMPLATE_SPAN:
+ case Token::TEMPLATE_TAIL: {
+ BindingPatternUnexpectedToken(classifier);
+ result = ParseTemplateLiteral(result, position(), classifier, CHECK_OK);
+ break;
+ }
+
default:
return result;
}
@@ -3132,6 +3201,9 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
ExpressionClassifier* classifier, bool* ok) {
// NewExpression ::
// ('new')+ MemberExpression
+ //
+ // NewTarget ::
+ // 'new' '.' 'target'
// The grammar for new expressions is pretty warped. We can have several 'new'
// keywords following each other, and then a MemberExpression. When we see '('
@@ -3155,6 +3227,8 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
if (peek() == Token::SUPER) {
const bool is_new = true;
result = ParseSuperExpression(is_new, classifier, CHECK_OK);
+ } else if (allow_harmony_new_target() && peek() == Token::PERIOD) {
+ return ParseNewTargetExpression(CHECK_OK);
} else {
result = this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
}
@@ -3220,7 +3294,7 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
function_token_position, function_type, FunctionLiteral::NORMAL_ARITY,
- CHECK_OK);
+ language_mode(), CHECK_OK);
} else if (peek() == Token::SUPER) {
const bool is_new = false;
result = ParseSuperExpression(is_new, classifier, CHECK_OK);
@@ -3246,7 +3320,6 @@ ParserBase<Traits>::ParseStrongInitializationExpression(
Consume(Token::THIS);
int pos = position();
function_state_->set_this_location(scanner()->location());
- scope_->RecordThisUsage();
ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
ExpressionT left = this->EmptyExpression();
@@ -3274,14 +3347,14 @@ ParserBase<Traits>::ParseStrongInitializationExpression(
break;
}
default:
- ReportMessage("strong_constructor_this");
+ ReportMessage(MessageTemplate::kStrongConstructorThis);
*ok = false;
return this->EmptyExpression();
}
if (peek() != Token::ASSIGN) {
ReportMessageAt(function_state_->this_location(),
- "strong_constructor_this");
+ MessageTemplate::kStrongConstructorThis);
*ok = false;
return this->EmptyExpression();
}
@@ -3306,7 +3379,7 @@ ParserBase<Traits>::ParseStrongInitializationExpression(
if (function_state_->return_location().IsValid()) {
ReportMessageAt(function_state_->return_location(),
- "strong_constructor_return_misplaced");
+ MessageTemplate::kStrongConstructorReturnMisplaced);
*ok = false;
return this->EmptyExpression();
}
@@ -3326,10 +3399,10 @@ ParserBase<Traits>::ParseStrongSuperCallExpression(
Consume(Token::SUPER);
int pos = position();
Scanner::Location super_loc = scanner()->location();
- ExpressionT expr = this->SuperReference(scope_, factory());
+ ExpressionT expr = this->SuperCallReference(scope_, factory(), pos);
if (peek() != Token::LPAREN) {
- ReportMessage("strong_constructor_super");
+ ReportMessage(MessageTemplate::kStrongConstructorSuper);
*ok = false;
return this->EmptyExpression();
}
@@ -3340,20 +3413,22 @@ ParserBase<Traits>::ParseStrongSuperCallExpression(
// TODO(rossberg): This doesn't work with arrow functions yet.
if (!IsSubclassConstructor(function_state_->kind())) {
- ReportMessage("unexpected_super");
+ ReportMessage(MessageTemplate::kUnexpectedSuper);
*ok = false;
return this->EmptyExpression();
} else if (function_state_->super_location().IsValid()) {
- ReportMessageAt(scanner()->location(), "strong_super_call_duplicate");
+ ReportMessageAt(scanner()->location(),
+ MessageTemplate::kStrongSuperCallDuplicate);
*ok = false;
return this->EmptyExpression();
} else if (function_state_->this_location().IsValid()) {
- ReportMessageAt(scanner()->location(), "strong_super_call_misplaced");
+ ReportMessageAt(scanner()->location(),
+ MessageTemplate::kStrongSuperCallMisplaced);
*ok = false;
return this->EmptyExpression();
} else if (function_state_->return_location().IsValid()) {
ReportMessageAt(function_state_->return_location(),
- "strong_constructor_return_misplaced");
+ MessageTemplate::kStrongConstructorReturnMisplaced);
*ok = false;
return this->EmptyExpression();
}
@@ -3373,37 +3448,41 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseSuperExpression(bool is_new,
ExpressionClassifier* classifier,
bool* ok) {
+ int pos = position();
Expect(Token::SUPER, CHECK_OK);
- // TODO(wingo): Does this actually work with lazily compiled arrows?
- FunctionState* function_state = function_state_;
- while (IsArrowFunction(function_state->kind())) {
- function_state = function_state->outer();
+ Scope* scope = scope_->DeclarationScope();
+ while (scope->is_eval_scope() || scope->is_arrow_scope()) {
+ scope = scope->outer_scope();
+ DCHECK_NOT_NULL(scope);
+ scope = scope->DeclarationScope();
}
- // TODO(arv): Handle eval scopes similarly.
- FunctionKind kind = function_state->kind();
+ FunctionKind kind = scope->function_kind();
if (IsConciseMethod(kind) || IsAccessorFunction(kind) ||
i::IsConstructor(kind)) {
if (peek() == Token::PERIOD || peek() == Token::LBRACK) {
- scope_->RecordSuperPropertyUsage();
- return this->SuperReference(scope_, factory());
+ scope->RecordSuperPropertyUsage();
+ return this->SuperPropertyReference(scope_, factory(), pos);
}
// new super() is never allowed.
// super() is only allowed in derived constructor
if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
if (is_strong(language_mode())) {
// Super calls in strong mode are parsed separately.
- ReportMessageAt(scanner()->location(), "strong_constructor_super");
+ ReportMessageAt(scanner()->location(),
+ MessageTemplate::kStrongConstructorSuper);
*ok = false;
return this->EmptyExpression();
}
- function_state->set_super_location(scanner()->location());
- return this->SuperReference(scope_, factory());
+ // TODO(rossberg): This might not be the correct FunctionState for the
+ // method here.
+ function_state_->set_super_location(scanner()->location());
+ return this->SuperCallReference(scope_, factory(), pos);
}
}
- ReportMessageAt(scanner()->location(), "unexpected_super");
+ ReportMessageAt(scanner()->location(), MessageTemplate::kUnexpectedSuper);
*ok = false;
return this->EmptyExpression();
}
@@ -3411,6 +3490,31 @@ ParserBase<Traits>::ParseSuperExpression(bool is_new,
template <class Traits>
typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseNewTargetExpression(bool* ok) {
+ int pos = position();
+ Consume(Token::PERIOD);
+ ExpectContextualKeyword(CStrVector("target"), CHECK_OK);
+
+ Scope* scope = scope_->DeclarationScope();
+ while (scope->is_eval_scope() || scope->is_arrow_scope()) {
+ scope = scope->outer_scope();
+ DCHECK_NOT_NULL(scope);
+ scope = scope->DeclarationScope();
+ }
+
+ if (!scope->is_function_scope()) {
+ ReportMessageAt(scanner()->location(),
+ MessageTemplate::kUnexpectedNewTarget);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ return this->NewTargetExpression(scope_, factory(), pos);
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseMemberExpressionContinuation(
ExpressionT expression, ExpressionClassifier* classifier, bool* ok) {
// Parses this part of MemberExpression:
@@ -3472,36 +3576,42 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
template <class Traits>
void ParserBase<Traits>::ParseFormalParameter(
- FormalParameterScopeT* scope, FormalParameterErrorLocations* locs,
- bool is_rest, bool* ok) {
+ bool is_rest, FormalParameterParsingStateT* parsing_state,
+ ExpressionClassifier* classifier, bool* ok) {
// FormalParameter[Yield,GeneratorParameter] :
// BindingElement[?Yield, ?GeneratorParameter]
- bool is_strict_reserved;
- IdentifierT name =
- ParseIdentifierOrStrictReservedWord(&is_strict_reserved, ok);
+
+ Token::Value next = peek();
+ ExpressionT pattern = ParsePrimaryExpression(classifier, ok);
if (!*ok) return;
- // Store locations for possible future error reports.
- if (!locs->eval_or_arguments.IsValid() && this->IsEvalOrArguments(name)) {
- locs->eval_or_arguments = scanner()->location();
- }
- if (!locs->undefined.IsValid() && this->IsUndefined(name)) {
- locs->undefined = scanner()->location();
+ ValidateBindingPattern(classifier, ok);
+ if (!*ok) return;
+
+ if (!allow_harmony_destructuring() && !Traits::IsIdentifier(pattern)) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return;
}
- if (!locs->reserved.IsValid() && is_strict_reserved) {
- locs->reserved = scanner()->location();
+
+ if (parsing_state->is_simple_parameter_list) {
+ parsing_state->is_simple_parameter_list =
+ !is_rest && Traits::IsIdentifier(pattern);
}
- bool was_declared = Traits::DeclareFormalParameter(scope, name, is_rest);
- if (!locs->duplicate.IsValid() && was_declared) {
- locs->duplicate = scanner()->location();
+ parsing_state->has_rest = is_rest;
+ if (is_rest && !Traits::IsIdentifier(pattern)) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return;
}
+ Traits::DeclareFormalParameter(parsing_state, pattern, classifier, is_rest);
}
template <class Traits>
int ParserBase<Traits>::ParseFormalParameterList(
- FormalParameterScopeT* scope, FormalParameterErrorLocations* locs,
- bool* is_rest, bool* ok) {
+ FormalParameterParsingStateT* parsing_state,
+ ExpressionClassifier* classifier, bool* ok) {
// FormalParameters[Yield,GeneratorParameter] :
// [empty]
// FormalParameterList[?Yield, ?GeneratorParameter]
@@ -3521,17 +3631,18 @@ int ParserBase<Traits>::ParseFormalParameterList(
if (peek() != Token::RPAREN) {
do {
if (++parameter_count > Code::kMaxArguments) {
- ReportMessage("too_many_parameters");
+ ReportMessage(MessageTemplate::kTooManyParameters);
*ok = false;
return -1;
}
- *is_rest = allow_harmony_rest_params() && Check(Token::ELLIPSIS);
- ParseFormalParameter(scope, locs, *is_rest, ok);
+ bool is_rest = allow_harmony_rest_params() && Check(Token::ELLIPSIS);
+ ParseFormalParameter(is_rest, parsing_state, classifier, ok);
if (!*ok) return -1;
- } while (!*is_rest && Check(Token::COMMA));
+ } while (!parsing_state->has_rest && Check(Token::COMMA));
- if (*is_rest && peek() == Token::COMMA) {
- ReportMessageAt(scanner()->peek_location(), "param_after_rest");
+ if (parsing_state->has_rest && peek() == Token::COMMA) {
+ ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kParamAfterRest);
*ok = false;
return -1;
}
@@ -3544,19 +3655,24 @@ int ParserBase<Traits>::ParseFormalParameterList(
template <class Traits>
void ParserBase<Traits>::CheckArityRestrictions(
int param_count, FunctionLiteral::ArityRestriction arity_restriction,
- int formals_start_pos, int formals_end_pos, bool* ok) {
+ bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok) {
switch (arity_restriction) {
case FunctionLiteral::GETTER_ARITY:
if (param_count != 0) {
ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
- "bad_getter_arity");
+ MessageTemplate::kBadGetterArity);
*ok = false;
}
break;
case FunctionLiteral::SETTER_ARITY:
if (param_count != 1) {
ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
- "bad_setter_arity");
+ MessageTemplate::kBadSetterArity);
+ *ok = false;
+ }
+ if (has_rest) {
+ ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
+ MessageTemplate::kBadSetterRestParameter);
*ok = false;
}
break;
@@ -3569,8 +3685,8 @@ void ParserBase<Traits>::CheckArityRestrictions(
template <class Traits>
typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseArrowFunctionLiteral(
- Scope* scope, const FormalParameterErrorLocations& error_locs,
- bool has_rest, ExpressionClassifier* classifier, bool* ok) {
+ const FormalParameterParsingStateT& formal_parameters,
+ const ExpressionClassifier& formals_classifier, bool* ok) {
if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
// `=> ...` is never a valid expression, so report as syntax error.
@@ -3581,20 +3697,22 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
}
typename Traits::Type::StatementList body;
- int num_parameters = scope->num_parameters();
+ int num_parameters = formal_parameters.scope->num_parameters();
int materialized_literal_count = -1;
int expected_property_count = -1;
- int handler_count = 0;
Scanner::Location super_loc;
{
typename Traits::Type::Factory function_factory(ast_value_factory());
- FunctionState function_state(&function_state_, &scope_, scope,
- kArrowFunction, &function_factory);
+ FunctionState function_state(&function_state_, &scope_,
+ formal_parameters.scope, kArrowFunction,
+ &function_factory);
+
+ function_state.SkipMaterializedLiterals(
+ formal_parameters.materialized_literals_count);
+
+ this->ReindexLiterals(formal_parameters);
- if (peek() == Token::ARROW) {
- BindingPatternUnexpectedToken(classifier);
- }
Expect(Token::ARROW, CHECK_OK);
if (peek() == Token::LBRACE) {
@@ -3608,54 +3726,57 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
&expected_property_count, CHECK_OK);
} else {
body = this->ParseEagerFunctionBody(
- this->EmptyIdentifier(), RelocInfo::kNoPosition, NULL,
- Token::INIT_VAR, kArrowFunction, CHECK_OK);
+ this->EmptyIdentifier(), RelocInfo::kNoPosition, formal_parameters,
+ NULL, Token::INIT_VAR, kArrowFunction, CHECK_OK);
materialized_literal_count =
function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
- handler_count = function_state.handler_count();
}
} else {
// Single-expression body
int pos = position();
parenthesized_function_ = false;
+ ExpressionClassifier classifier;
ExpressionT expression =
- ParseAssignmentExpression(true, classifier, CHECK_OK);
+ ParseAssignmentExpression(true, &classifier, CHECK_OK);
+ ValidateExpression(&classifier, CHECK_OK);
body = this->NewStatementList(1, zone());
+ this->AddParameterInitializationBlock(formal_parameters, body, CHECK_OK);
body->Add(factory()->NewReturnStatement(expression, pos), zone());
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
- handler_count = function_state.handler_count();
}
super_loc = function_state.super_location();
- scope->set_end_position(scanner()->location().end_pos);
+ formal_parameters.scope->set_end_position(scanner()->location().end_pos);
// Arrow function formal parameters are parsed as StrictFormalParameterList,
// which is not the same as "parameters of a strict function"; it only means
// that duplicates are not allowed. Of course, the arrow function may
// itself be strict as well.
- const bool use_strict_params = true;
- this->CheckFunctionParameterNames(language_mode(), use_strict_params,
- error_locs, CHECK_OK);
+ const bool allow_duplicate_parameters = false;
+ this->ValidateFormalParameters(&formals_classifier, language_mode(),
+ allow_duplicate_parameters, CHECK_OK);
// Validate strict mode.
if (is_strict(language_mode())) {
- CheckStrictOctalLiteral(scope->start_position(),
+ CheckStrictOctalLiteral(formal_parameters.scope->start_position(),
scanner()->location().end_pos, CHECK_OK);
- this->CheckConflictingVarDeclarations(scope, CHECK_OK);
+ this->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
}
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
- this->EmptyIdentifierString(), ast_value_factory(), scope, body,
- materialized_literal_count, expected_property_count, handler_count,
- num_parameters, FunctionLiteral::kNoDuplicateParameters,
+ this->EmptyIdentifierString(), ast_value_factory(),
+ formal_parameters.scope, body, materialized_literal_count,
+ expected_property_count, num_parameters,
+ FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
FunctionLiteral::kShouldLazyCompile, FunctionKind::kArrowFunction,
- scope->start_position());
+ formal_parameters.scope->start_position());
- function_literal->set_function_token_position(scope->start_position());
+ function_literal->set_function_token_position(
+ formal_parameters.scope->start_position());
if (super_loc.IsValid()) function_state_->set_super_location(super_loc);
if (fni_ != NULL) this->InferFunctionName(fni_, function_literal);
@@ -3708,13 +3829,13 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
next = peek();
if (next == Token::EOS) {
ReportMessageAt(Scanner::Location(start, peek_position()),
- "unterminated_template");
+ MessageTemplate::kUnterminatedTemplate);
*ok = false;
return Traits::EmptyExpression();
} else if (next == Token::ILLEGAL) {
Traits::ReportMessageAt(
Scanner::Location(position() + 1, peek_position()),
- "unexpected_token", "ILLEGAL", kSyntaxError);
+ MessageTemplate::kUnexpectedToken, "ILLEGAL", kSyntaxError);
*ok = false;
return Traits::EmptyExpression();
}
@@ -3725,7 +3846,7 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
if (peek() != Token::RBRACE) {
ReportMessageAt(Scanner::Location(expr_pos, peek_position()),
- "unterminated_template_expr");
+ MessageTemplate::kUnterminatedTemplateExpr);
*ok = false;
return Traits::EmptyExpression();
}
@@ -3737,13 +3858,14 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
pos = position();
if (next == Token::EOS) {
- ReportMessageAt(Scanner::Location(start, pos), "unterminated_template");
+ ReportMessageAt(Scanner::Location(start, pos),
+ MessageTemplate::kUnterminatedTemplate);
*ok = false;
return Traits::EmptyExpression();
} else if (next == Token::ILLEGAL) {
Traits::ReportMessageAt(
Scanner::Location(position() + 1, peek_position()),
- "unexpected_token", "ILLEGAL", kSyntaxError);
+ MessageTemplate::kUnexpectedToken, "ILLEGAL", kSyntaxError);
*ok = false;
return Traits::EmptyExpression();
}
@@ -3759,20 +3881,22 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
template <typename Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<
- Traits>::CheckAndRewriteReferenceExpression(ExpressionT expression,
- Scanner::Location location,
- const char* message, bool* ok) {
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::CheckAndRewriteReferenceExpression(
+ ExpressionT expression, Scanner::Location location,
+ MessageTemplate::Template message, bool* ok) {
if (this->IsIdentifier(expression)) {
if (is_strict(language_mode()) &&
this->IsEvalOrArguments(this->AsIdentifier(expression))) {
- this->ReportMessageAt(location, "strict_eval_arguments", kSyntaxError);
+ this->ReportMessageAt(location, MessageTemplate::kStrictEvalArguments,
+ kSyntaxError);
*ok = false;
return this->EmptyExpression();
}
if (is_strong(language_mode()) &&
this->IsUndefined(this->AsIdentifier(expression))) {
- this->ReportMessageAt(location, "strong_undefined", kSyntaxError);
+ this->ReportMessageAt(location, MessageTemplate::kStrongUndefined,
+ kSyntaxError);
*ok = false;
return this->EmptyExpression();
}
@@ -3808,7 +3932,7 @@ void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
if (type == kValueProperty && IsProto()) {
if (has_seen_proto_) {
- this->parser()->ReportMessage("duplicate_proto");
+ this->parser()->ReportMessage(MessageTemplate::kDuplicateProto);
*ok = false;
return;
}
@@ -3828,20 +3952,21 @@ void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
if (is_static) {
if (IsPrototype()) {
- this->parser()->ReportMessage("static_prototype");
+ this->parser()->ReportMessage(MessageTemplate::kStaticPrototype);
*ok = false;
return;
}
} else if (IsConstructor()) {
if (is_generator || type == kAccessorProperty) {
- const char* msg =
- is_generator ? "constructor_is_generator" : "constructor_is_accessor";
+ MessageTemplate::Template msg =
+ is_generator ? MessageTemplate::kConstructorIsGenerator
+ : MessageTemplate::kConstructorIsAccessor;
this->parser()->ReportMessage(msg);
*ok = false;
return;
}
if (has_seen_constructor_) {
- this->parser()->ReportMessage("duplicate_constructor");
+ this->parser()->ReportMessage(MessageTemplate::kDuplicateConstructor);
*ok = false;
return;
}
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 3a47657571..6a7718a323 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -364,7 +364,10 @@ void CallPrinter::VisitSpread(Spread* node) {
void CallPrinter::VisitThisFunction(ThisFunction* node) {}
-void CallPrinter::VisitSuperReference(SuperReference* node) {}
+void CallPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {}
+
+
+void CallPrinter::VisitSuperCallReference(SuperCallReference* node) {}
void CallPrinter::FindStatements(ZoneList<Statement*>* statements) {
@@ -430,10 +433,10 @@ PrettyPrinter::~PrettyPrinter() {
void PrettyPrinter::VisitBlock(Block* node) {
- if (!node->is_initializer_block()) Print("{ ");
+ if (!node->ignore_completion_value()) Print("{ ");
PrintStatements(node->statements());
if (node->statements()->length() > 0) Print(" ");
- if (!node->is_initializer_block()) Print("}");
+ if (!node->ignore_completion_value()) Print("}");
}
@@ -716,6 +719,7 @@ void PrettyPrinter::PrintObjectLiteralProperty(
void PrettyPrinter::VisitArrayLiteral(ArrayLiteral* node) {
Print("[ ");
+ Print(" literal_index = %d", node->literal_index());
for (int i = 0; i < node->values()->length(); i++) {
if (i != 0) Print(",");
Visit(node->values()->at(i));
@@ -835,8 +839,13 @@ void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
}
-void PrettyPrinter::VisitSuperReference(SuperReference* node) {
- Print("<super-reference>");
+void PrettyPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {
+ Print("<super-property-reference>");
+}
+
+
+void PrettyPrinter::VisitSuperCallReference(SuperCallReference* node) {
+ Print("<super-call-reference>");
}
@@ -1138,7 +1147,8 @@ void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
void AstPrinter::VisitBlock(Block* node) {
- const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK";
+ const char* block_txt =
+ node->ignore_completion_value() ? "BLOCK NOCOMPLETIONS" : "BLOCK";
IndentedScope indent(this, block_txt);
PrintStatements(node->statements());
}
@@ -1321,7 +1331,48 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
IndentedScope indent(this, "CLASS LITERAL");
- PrintLiteralIndented("NAME", node->name(), false);
+ if (node->raw_name() != nullptr) {
+ PrintLiteralIndented("NAME", node->name(), false);
+ }
+ if (node->extends() != nullptr) {
+ PrintIndentedVisit("EXTENDS", node->extends());
+ }
+ PrintProperties(node->properties());
+}
+
+
+void AstPrinter::PrintProperties(
+ ZoneList<ObjectLiteral::Property*>* properties) {
+ for (int i = 0; i < properties->length(); i++) {
+ ObjectLiteral::Property* property = properties->at(i);
+ const char* prop_kind = nullptr;
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ prop_kind = "CONSTANT";
+ break;
+ case ObjectLiteral::Property::COMPUTED:
+ prop_kind = "COMPUTED";
+ break;
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ prop_kind = "MATERIALIZED_LITERAL";
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ prop_kind = "PROTOTYPE";
+ break;
+ case ObjectLiteral::Property::GETTER:
+ prop_kind = "GETTER";
+ break;
+ case ObjectLiteral::Property::SETTER:
+ prop_kind = "SETTER";
+ break;
+ }
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "PROPERTY%s - %s", property->is_static() ? " - STATIC" : "",
+ prop_kind);
+ IndentedScope prop(this, buf.start());
+ PrintIndentedVisit("KEY", properties->at(i)->key());
+ PrintIndentedVisit("VALUE", properties->at(i)->value());
+ }
}
@@ -1347,6 +1398,9 @@ void AstPrinter::VisitLiteral(Literal* node) {
void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
IndentedScope indent(this, "REGEXP LITERAL");
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "literal_index = %d\n", node->literal_index());
+ PrintIndented(buf.start());
PrintLiteralIndented("PATTERN", node->pattern(), false);
PrintLiteralIndented("FLAGS", node->flags(), false);
}
@@ -1354,39 +1408,19 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
IndentedScope indent(this, "OBJ LITERAL");
- for (int i = 0; i < node->properties()->length(); i++) {
- const char* prop_kind = NULL;
- switch (node->properties()->at(i)->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- prop_kind = "PROPERTY - CONSTANT";
- break;
- case ObjectLiteral::Property::COMPUTED:
- prop_kind = "PROPERTY - COMPUTED";
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- prop_kind = "PROPERTY - MATERIALIZED_LITERAL";
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- prop_kind = "PROPERTY - PROTOTYPE";
- break;
- case ObjectLiteral::Property::GETTER:
- prop_kind = "PROPERTY - GETTER";
- break;
- case ObjectLiteral::Property::SETTER:
- prop_kind = "PROPERTY - SETTER";
- break;
- default:
- UNREACHABLE();
- }
- IndentedScope prop(this, prop_kind);
- PrintIndentedVisit("KEY", node->properties()->at(i)->key());
- PrintIndentedVisit("VALUE", node->properties()->at(i)->value());
- }
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "literal_index = %d\n", node->literal_index());
+ PrintIndented(buf.start());
+ PrintProperties(node->properties());
}
void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
IndentedScope indent(this, "ARRAY LITERAL");
+
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "literal_index = %d\n", node->literal_index());
+ PrintIndented(buf.start());
if (node->values()->length() > 0) {
IndentedScope indent(this, "VALUES");
for (int i = 0; i < node->values()->length(); i++) {
@@ -1402,18 +1436,21 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
EmbeddedVector<char, 128> buf;
int pos = SNPrintF(buf, "VAR PROXY");
switch (var->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::UNALLOCATED:
break;
- case Variable::PARAMETER:
+ case VariableLocation::PARAMETER:
SNPrintF(buf + pos, " parameter[%d]", var->index());
break;
- case Variable::LOCAL:
+ case VariableLocation::LOCAL:
SNPrintF(buf + pos, " local[%d]", var->index());
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
SNPrintF(buf + pos, " context[%d]", var->index());
break;
- case Variable::LOOKUP:
+ case VariableLocation::GLOBAL:
+ SNPrintF(buf + pos, " global[%d]", var->index());
+ break;
+ case VariableLocation::LOOKUP:
SNPrintF(buf + pos, " lookup");
break;
}
@@ -1513,10 +1550,17 @@ void AstPrinter::VisitThisFunction(ThisFunction* node) {
}
-void AstPrinter::VisitSuperReference(SuperReference* node) {
- IndentedScope indent(this, "SUPER-REFERENCE");
+void AstPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {
+ IndentedScope indent(this, "SUPER-PROPERTY-REFERENCE");
+}
+
+
+void AstPrinter::VisitSuperCallReference(SuperCallReference* node) {
+ IndentedScope indent(this, "SUPER-CALL-REFERENCE");
}
+
#endif // DEBUG
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index a05b61796a..1971cfe839 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -123,6 +123,7 @@ class AstPrinter: public PrettyPrinter {
Variable* var,
Handle<Object> value);
void PrintLabelsIndented(ZoneList<const AstRawString*>* labels);
+ void PrintProperties(ZoneList<ObjectLiteral::Property*>* properties);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 30d36eb9e2..070a9eee2b 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -696,4 +696,5 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/prologue.js b/deps/v8/src/prologue.js
new file mode 100644
index 0000000000..e7ad29e358
--- /dev/null
+++ b/deps/v8/src/prologue.js
@@ -0,0 +1,232 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+// -----------------------------------------------------------------------
+// Utils
+
+var imports = UNDEFINED;
+var exports = UNDEFINED;
+var imports_from_experimental = UNDEFINED;
+
+
+// Export to other scripts.
+// In normal natives, this exports functions to other normal natives.
+// In experimental natives, this exports to other experimental natives and
+// to normal natives that import using utils.ImportFromExperimental.
+function Export(f) {
+ f.next = exports;
+ exports = f;
+};
+
+
+// Import from other scripts.
+// In normal natives, this imports from other normal natives.
+// In experimental natives, this imports from other experimental natives and
+// whitelisted exports from normal natives.
+function Import(f) {
+ f.next = imports;
+ imports = f;
+};
+
+
+// In normal natives, import from experimental natives.
+// Not callable from experimental natives.
+function ImportFromExperimental(f) {
+ f.next = imports_from_experimental;
+ imports_from_experimental = f;
+};
+
+
+function SetFunctionName(f, name, prefix) {
+ if (IS_SYMBOL(name)) {
+ name = "[" + %SymbolDescription(name) + "]";
+ }
+ if (IS_UNDEFINED(prefix)) {
+ %FunctionSetName(f, name);
+ } else {
+ %FunctionSetName(f, prefix + " " + name);
+ }
+}
+
+
+function InstallConstants(object, constants) {
+ %CheckIsBootstrapping();
+ %OptimizeObjectForAddingMultipleProperties(object, constants.length >> 1);
+ var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
+ for (var i = 0; i < constants.length; i += 2) {
+ var name = constants[i];
+ var k = constants[i + 1];
+ %AddNamedProperty(object, name, k, attributes);
+ }
+ %ToFastProperties(object);
+}
+
+
+function InstallFunctions(object, attributes, functions) {
+ %CheckIsBootstrapping();
+ %OptimizeObjectForAddingMultipleProperties(object, functions.length >> 1);
+ for (var i = 0; i < functions.length; i += 2) {
+ var key = functions[i];
+ var f = functions[i + 1];
+ SetFunctionName(f, key);
+ %FunctionRemovePrototype(f);
+ %AddNamedProperty(object, key, f, attributes);
+ %SetNativeFlag(f);
+ }
+ %ToFastProperties(object);
+}
+
+
+// Helper function to install a getter-only accessor property.
+function InstallGetter(object, name, getter, attributes) {
+ %CheckIsBootstrapping();
+ if (typeof attributes == "undefined") {
+ attributes = DONT_ENUM;
+ }
+ SetFunctionName(getter, name, "get");
+ %FunctionRemovePrototype(getter);
+ %DefineAccessorPropertyUnchecked(object, name, getter, null, attributes);
+ %SetNativeFlag(getter);
+}
+
+
+// Helper function to install a getter/setter accessor property.
+function InstallGetterSetter(object, name, getter, setter) {
+ %CheckIsBootstrapping();
+ SetFunctionName(getter, name, "get");
+ SetFunctionName(setter, name, "set");
+ %FunctionRemovePrototype(getter);
+ %FunctionRemovePrototype(setter);
+ %DefineAccessorPropertyUnchecked(object, name, getter, setter, DONT_ENUM);
+ %SetNativeFlag(getter);
+ %SetNativeFlag(setter);
+}
+
+
+// Prevents changes to the prototype of a built-in function.
+// The "prototype" property of the function object is made non-configurable,
+// and the prototype object is made non-extensible. The latter prevents
+// changing the __proto__ property.
+function SetUpLockedPrototype(
+ constructor, fields, methods) {
+ %CheckIsBootstrapping();
+ var prototype = constructor.prototype;
+ // Install functions first, because this function is used to initialize
+ // PropertyDescriptor itself.
+ var property_count = (methods.length >> 1) + (fields ? fields.length : 0);
+ if (property_count >= 4) {
+ %OptimizeObjectForAddingMultipleProperties(prototype, property_count);
+ }
+ if (fields) {
+ for (var i = 0; i < fields.length; i++) {
+ %AddNamedProperty(prototype, fields[i],
+ UNDEFINED, DONT_ENUM | DONT_DELETE);
+ }
+ }
+ for (var i = 0; i < methods.length; i += 2) {
+ var key = methods[i];
+ var f = methods[i + 1];
+ %AddNamedProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetNativeFlag(f);
+ }
+ %InternalSetPrototype(prototype, null);
+ %ToFastProperties(prototype);
+}
+
+// -----------------------------------------------------------------------
+// To be called by bootstrapper
+
+var experimental_exports = UNDEFINED;
+
+function PostNatives(utils) {
+ %CheckIsBootstrapping();
+
+ var container = {};
+ for ( ; !IS_UNDEFINED(exports); exports = exports.next) exports(container);
+ for ( ; !IS_UNDEFINED(imports); imports = imports.next) imports(container);
+
+ // Whitelist of exports from normal natives to experimental natives.
+ var expose_to_experimental = [
+ "ArrayToString",
+ "GetIterator",
+ "GetMethod",
+ "InnerArrayEvery",
+ "InnerArrayFilter",
+ "InnerArrayForEach",
+ "InnerArrayIndexOf",
+ "InnerArrayJoin",
+ "InnerArrayLastIndexOf",
+ "InnerArrayMap",
+ "InnerArrayReduce",
+ "InnerArrayReduceRight",
+ "InnerArrayReverse",
+ "InnerArraySome",
+ "InnerArraySort",
+ "InnerArrayToLocaleString",
+ "IsNaN",
+ "MathMax",
+ "MathMin",
+ "ObjectIsFrozen",
+ "ObjectDefineProperty",
+ "OwnPropertyKeys",
+ "ToNameArray",
+ ];
+ experimental_exports = {};
+ %OptimizeObjectForAddingMultipleProperties(
+ experimental_exports, expose_to_experimental.length);
+ for (var key of expose_to_experimental) {
+ experimental_exports[key] = container[key];
+ }
+ %ToFastProperties(experimental_exports);
+ container = UNDEFINED;
+
+ utils.PostNatives = UNDEFINED;
+ utils.ImportFromExperimental = UNDEFINED;
+};
+
+
+function PostExperimentals(utils) {
+ %CheckIsBootstrapping();
+
+ for ( ; !IS_UNDEFINED(exports); exports = exports.next) {
+ exports(experimental_exports);
+ }
+ for ( ; !IS_UNDEFINED(imports); imports = imports.next) {
+ imports(experimental_exports);
+ }
+ for ( ; !IS_UNDEFINED(imports_from_experimental);
+ imports_from_experimental = imports_from_experimental.next) {
+ imports_from_experimental(experimental_exports);
+ }
+
+ experimental_exports = UNDEFINED;
+
+ utils.PostExperimentals = UNDEFINED;
+ utils.Import = UNDEFINED;
+ utils.Export = UNDEFINED;
+};
+
+// -----------------------------------------------------------------------
+
+InstallFunctions(utils, NONE, [
+ "Import", Import,
+ "Export", Export,
+ "ImportFromExperimental", ImportFromExperimental,
+ "SetFunctionName", SetFunctionName,
+ "InstallConstants", InstallConstants,
+ "InstallFunctions", InstallFunctions,
+ "InstallGetter", InstallGetter,
+ "InstallGetterSetter", InstallGetterSetter,
+ "SetUpLockedPrototype", SetUpLockedPrototype,
+ "PostNatives", PostNatives,
+ "PostExperimentals", PostExperimentals,
+]);
+
+})
diff --git a/deps/v8/src/promise.js b/deps/v8/src/promise.js
index b0d1aa088b..0fd4b89c51 100644
--- a/deps/v8/src/promise.js
+++ b/deps/v8/src/promise.js
@@ -12,13 +12,18 @@ var $promiseHasUserDefinedRejectHandler;
var $promiseStatus;
var $promiseValue;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
// -------------------------------------------------------------------
+// Imports
+
+var InternalArray = utils.InternalArray;
+
+// -------------------------------------------------------------------
// Status values: 0 = pending, +1 = resolved, -1 = rejected
var promiseStatus = GLOBAL_PRIVATE("Promise#status");
@@ -184,11 +189,11 @@ function PromiseDeferred() {
reject: function(r) { PromiseReject(promise, r) }
};
} else {
- var result = {};
+ var result = {promise: UNDEFINED, reject: UNDEFINED, resolve: UNDEFINED};
result.promise = new this(function(resolve, reject) {
result.resolve = resolve;
result.reject = reject;
- })
+ });
return result;
}
}
@@ -366,7 +371,7 @@ function PromiseHasUserDefinedRejectHandler() {
%AddNamedProperty(GlobalPromise.prototype, symbolToStringTag, "Promise",
DONT_ENUM | READ_ONLY);
-$installFunctions(GlobalPromise, DONT_ENUM, [
+utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
"defer", PromiseDeferred,
"accept", PromiseResolved,
"reject", PromiseRejected,
@@ -375,7 +380,7 @@ $installFunctions(GlobalPromise, DONT_ENUM, [
"resolve", PromiseCast
]);
-$installFunctions(GlobalPromise.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
"chain", PromiseChain,
"then", PromiseThen,
"catch", PromiseCatch
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 3b4ddafd82..93ba43cb92 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -82,4 +82,5 @@ std::ostream& operator<<(std::ostream& os, const Descriptor& d) {
return os;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index 60a7073da6..782035b6f0 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -2,24 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $proxyDelegateCallAndConstruct;
var $proxyDerivedGetTrap;
var $proxyDerivedHasTrap;
-var $proxyDerivedHasOwnTrap;
-var $proxyDerivedKeysTrap;
var $proxyDerivedSetTrap;
var $proxyEnumerate;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// ----------------------------------------------------------------------------
+// Imports
+
var GlobalFunction = global.Function;
var GlobalObject = global.Object;
-// -------------------------------------------------------------------
+var ToNameArray;
+
+utils.Import(function(from) {
+ ToNameArray = from.ToNameArray;
+});
+
+//----------------------------------------------------------------------------
function ProxyCreate(handler, proto) {
if (!IS_SPEC_OBJECT(handler))
@@ -175,7 +181,7 @@ function ProxyEnumerate(proxy) {
if (IS_UNDEFINED(handler.enumerate)) {
return %Apply(DerivedEnumerateTrap, handler, [], 0, 0)
} else {
- return $toNameArray(handler.enumerate(), "enumerate", false)
+ return ToNameArray(handler.enumerate(), "enumerate", false)
}
}
@@ -185,17 +191,23 @@ var Proxy = new GlobalObject();
%AddNamedProperty(global, "Proxy", Proxy, DONT_ENUM);
//Set up non-enumerable properties of the Proxy object.
-$installFunctions(Proxy, DONT_ENUM, [
+utils.InstallFunctions(Proxy, DONT_ENUM, [
"create", ProxyCreate,
"createFunction", ProxyCreateFunction
])
-$proxyDelegateCallAndConstruct = DelegateCallAndConstruct;
+// -------------------------------------------------------------------
+// Exports
+
$proxyDerivedGetTrap = DerivedGetTrap;
$proxyDerivedHasTrap = DerivedHasTrap;
-$proxyDerivedHasOwnTrap = DerivedHasOwnTrap;
-$proxyDerivedKeysTrap = DerivedKeysTrap;
$proxyDerivedSetTrap = DerivedSetTrap;
$proxyEnumerate = ProxyEnumerate;
+utils.Export(function(to) {
+ to.ProxyDelegateCallAndConstruct = DelegateCallAndConstruct;
+ to.ProxyDerivedHasOwnTrap = DerivedHasOwnTrap;
+ to.ProxyDerivedKeysTrap = DerivedKeysTrap;
+});
+
})
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index 5574730a57..7bd3b56b0f 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -453,4 +453,5 @@ void RegExpMacroAssemblerIrregexp::Expand() {
#endif // V8_INTERPRETED_REGEXP
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index 71d0b9b0f7..19fae2f9ac 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -26,6 +26,12 @@ RegExpMacroAssemblerTracer::~RegExpMacroAssemblerTracer() {
}
+void RegExpMacroAssemblerTracer::AbortedCodeGeneration() {
+ PrintF(" AbortedCodeGeneration\n");
+ assembler_->AbortedCodeGeneration();
+}
+
+
// This is used for printing out debugging information. It makes an integer
// that is closely related to the address of an object.
static int LabelToInt(Label* label) {
@@ -409,4 +415,5 @@ Handle<HeapObject> RegExpMacroAssemblerTracer::GetCode(Handle<String> source) {
return assembler_->GetCode(source);
}
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h
index 67b1710e45..8b8d80a15a 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp-macro-assembler-tracer.h
@@ -13,6 +13,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
public:
RegExpMacroAssemblerTracer(Isolate* isolate, RegExpMacroAssembler* assembler);
virtual ~RegExpMacroAssemblerTracer();
+ virtual void AbortedCodeGeneration();
virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); }
virtual bool CanReadUnaligned() { return assembler_->CanReadUnaligned(); }
virtual void AdvanceCurrentPosition(int by); // Signed cp change.
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 6228ce4b53..48cbbf3ed1 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -307,4 +307,5 @@ Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
#endif // V8_INTERPRETED_REGEXP
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index c0a8d0027e..df244249b6 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -45,6 +45,9 @@ class RegExpMacroAssembler {
RegExpMacroAssembler(Isolate* isolate, Zone* zone);
virtual ~RegExpMacroAssembler();
+ // This function is called when code generation is aborted, so that
+ // the assembler could clean up internal data structures.
+ virtual void AbortedCodeGeneration() {}
// The maximal number of pushes between stack checks. Users must supply
// kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
// at least once for every stack_limit() pushes that are executed.
diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc
index f114ae4424..0ef4942048 100644
--- a/deps/v8/src/regexp-stack.cc
+++ b/deps/v8/src/regexp-stack.cc
@@ -84,4 +84,5 @@ Address RegExpStack::EnsureCapacity(size_t size) {
}
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index 3ac5987202..bf75ca1b01 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -2,26 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $regexpExec;
-var $regexpExecNoTests;
-var $regexpLastMatchInfo;
var $regexpLastMatchInfoOverride;
var harmony_regexps = false;
var harmony_unicode_regexps = false;
-(function(global, shared, exports) {
+(function(global, utils) {
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalRegExp = global.RegExp;
+var InternalPackedArray = utils.InternalPackedArray;
+
+// -------------------------------------------------------------------
// Property of the builtins object for recording the result of the last
-// regexp match. The property $regexpLastMatchInfo includes the matchIndices
+// regexp match. The property RegExpLastMatchInfo includes the matchIndices
// array of the last successful regexp match (an array of start/end index
// pairs for the match and all the captured substrings), the invariant is
// that there are at least two capture indeces. The array also contains
// the subject string for the last successful match.
-$regexpLastMatchInfo = new InternalPackedArray(
+var RegExpLastMatchInfo = new InternalPackedArray(
2, // REGEXP_NUMBER_OF_CAPTURES
"", // Last subject.
UNDEFINED, // Last input - settable with RegExpSetInput.
@@ -42,9 +45,7 @@ $regexpLastMatchInfoOverride = null;
function DoConstructRegExp(object, pattern, flags) {
// RegExp : Called as constructor; see ECMA-262, section 15.10.4.
if (IS_REGEXP(pattern)) {
- if (!IS_UNDEFINED(flags)) {
- throw MakeTypeError('regexp_flags', []);
- }
+ if (!IS_UNDEFINED(flags)) throw MakeTypeError(kRegExpFlags);
flags = (pattern.global ? 'g' : '')
+ (pattern.ignoreCase ? 'i' : '')
+ (pattern.multiline ? 'm' : '');
@@ -100,7 +101,7 @@ function RegExpCompileJS(pattern, flags) {
function DoRegExpExec(regexp, string, index) {
- var result = %_RegExpExec(regexp, string, index, $regexpLastMatchInfo);
+ var result = %_RegExpExec(regexp, string, index, RegExpLastMatchInfo);
if (result !== null) $regexpLastMatchInfoOverride = null;
return result;
}
@@ -134,7 +135,7 @@ endmacro
function RegExpExecNoTests(regexp, string, start) {
// Must be called with RegExp, string and positive integer as arguments.
- var matchInfo = %_RegExpExec(regexp, string, start, $regexpLastMatchInfo);
+ var matchInfo = %_RegExpExec(regexp, string, start, RegExpLastMatchInfo);
if (matchInfo !== null) {
$regexpLastMatchInfoOverride = null;
RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, string);
@@ -167,8 +168,8 @@ function RegExpExecJS(string) {
i = 0;
}
- // matchIndices is either null or the $regexpLastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, $regexpLastMatchInfo);
+ // matchIndices is either null or the RegExpLastMatchInfo array.
+ var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
@@ -178,7 +179,7 @@ function RegExpExecJS(string) {
// Successful match.
$regexpLastMatchInfoOverride = null;
if (updateLastIndex) {
- this.lastIndex = $regexpLastMatchInfo[CAPTURE1];
+ this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
}
RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
}
@@ -210,14 +211,14 @@ function RegExpTest(string) {
this.lastIndex = 0;
return false;
}
- // matchIndices is either null or the $regexpLastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, $regexpLastMatchInfo);
+ // matchIndices is either null or the RegExpLastMatchInfo array.
+ var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return false;
}
$regexpLastMatchInfoOverride = null;
- this.lastIndex = $regexpLastMatchInfo[CAPTURE1];
+ this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
return true;
} else {
// Non-global, non-sticky regexp.
@@ -231,8 +232,8 @@ function RegExpTest(string) {
%_StringCharCodeAt(regexp.source, 2) != 63) { // '?'
regexp = TrimRegExp(regexp);
}
- // matchIndices is either null or the $regexpLastMatchInfo array.
- var matchIndices = %_RegExpExec(regexp, string, 0, $regexpLastMatchInfo);
+ // matchIndices is either null or the RegExpLastMatchInfo array.
+ var matchIndices = %_RegExpExec(regexp, string, 0, RegExpLastMatchInfo);
if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return false;
@@ -277,10 +278,10 @@ function RegExpGetLastMatch() {
if ($regexpLastMatchInfoOverride !== null) {
return OVERRIDE_MATCH($regexpLastMatchInfoOverride);
}
- var regExpSubject = LAST_SUBJECT($regexpLastMatchInfo);
+ var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
return %_SubString(regExpSubject,
- $regexpLastMatchInfo[CAPTURE0],
- $regexpLastMatchInfo[CAPTURE1]);
+ RegExpLastMatchInfo[CAPTURE0],
+ RegExpLastMatchInfo[CAPTURE1]);
}
@@ -290,14 +291,14 @@ function RegExpGetLastParen() {
if (override.length <= 3) return '';
return override[override.length - 3];
}
- var length = NUMBER_OF_CAPTURES($regexpLastMatchInfo);
+ var length = NUMBER_OF_CAPTURES(RegExpLastMatchInfo);
if (length <= 2) return ''; // There were no captures.
// We match the SpiderMonkey behavior: return the substring defined by the
// last pair (after the first pair) of elements of the capture array even if
// it is empty.
- var regExpSubject = LAST_SUBJECT($regexpLastMatchInfo);
- var start = $regexpLastMatchInfo[CAPTURE(length - 2)];
- var end = $regexpLastMatchInfo[CAPTURE(length - 1)];
+ var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
+ var start = RegExpLastMatchInfo[CAPTURE(length - 2)];
+ var end = RegExpLastMatchInfo[CAPTURE(length - 1)];
if (start != -1 && end != -1) {
return %_SubString(regExpSubject, start, end);
}
@@ -309,8 +310,8 @@ function RegExpGetLeftContext() {
var start_index;
var subject;
if (!$regexpLastMatchInfoOverride) {
- start_index = $regexpLastMatchInfo[CAPTURE0];
- subject = LAST_SUBJECT($regexpLastMatchInfo);
+ start_index = RegExpLastMatchInfo[CAPTURE0];
+ subject = LAST_SUBJECT(RegExpLastMatchInfo);
} else {
var override = $regexpLastMatchInfoOverride;
start_index = OVERRIDE_POS(override);
@@ -324,8 +325,8 @@ function RegExpGetRightContext() {
var start_index;
var subject;
if (!$regexpLastMatchInfoOverride) {
- start_index = $regexpLastMatchInfo[CAPTURE1];
- subject = LAST_SUBJECT($regexpLastMatchInfo);
+ start_index = RegExpLastMatchInfo[CAPTURE1];
+ subject = LAST_SUBJECT(RegExpLastMatchInfo);
} else {
var override = $regexpLastMatchInfoOverride;
subject = OVERRIDE_SUBJECT(override);
@@ -348,11 +349,11 @@ function RegExpMakeCaptureGetter(n) {
return '';
}
var index = n * 2;
- if (index >= NUMBER_OF_CAPTURES($regexpLastMatchInfo)) return '';
- var matchStart = $regexpLastMatchInfo[CAPTURE(index)];
- var matchEnd = $regexpLastMatchInfo[CAPTURE(index + 1)];
+ if (index >= NUMBER_OF_CAPTURES(RegExpLastMatchInfo)) return '';
+ var matchStart = RegExpLastMatchInfo[CAPTURE(index)];
+ var matchEnd = RegExpLastMatchInfo[CAPTURE(index + 1)];
if (matchStart == -1 || matchEnd == -1) return '';
- return %_SubString(LAST_SUBJECT($regexpLastMatchInfo), matchStart, matchEnd);
+ return %_SubString(LAST_SUBJECT(RegExpLastMatchInfo), matchStart, matchEnd);
};
}
@@ -363,7 +364,7 @@ function RegExpMakeCaptureGetter(n) {
GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
%SetCode(GlobalRegExp, RegExpConstructor);
-$installFunctions(GlobalRegExp.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
"exec", RegExpExecJS,
"test", RegExpTest,
"toString", RegExpToString,
@@ -377,11 +378,11 @@ $installFunctions(GlobalRegExp.prototype, DONT_ENUM, [
// value is set the value it is set to is coerced to a string.
// Getter and setter for the input.
var RegExpGetInput = function() {
- var regExpInput = LAST_INPUT($regexpLastMatchInfo);
+ var regExpInput = LAST_INPUT(RegExpLastMatchInfo);
return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
};
var RegExpSetInput = function(string) {
- LAST_INPUT($regexpLastMatchInfo) = $toString(string);
+ LAST_INPUT(RegExpLastMatchInfo) = $toString(string);
};
%OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
@@ -439,7 +440,14 @@ for (var i = 1; i < 10; ++i) {
}
%ToFastProperties(GlobalRegExp);
-$regexpExecNoTests = RegExpExecNoTests;
-$regexpExec = DoRegExpExec;
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.RegExpExec = DoRegExpExec;
+ to.RegExpExecNoTests = RegExpExecNoTests;
+ to.RegExpLastMatchInfo = RegExpLastMatchInfo;
+ to.RegExpTest = RegExpTest;
+});
})
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 4780538e1b..c901653a2b 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -83,7 +83,7 @@ void Processor::VisitBlock(Block* node) {
// with some JS VMs: For instance, using smjs, print(eval('var x = 7'))
// returns 'undefined'. To obtain the same behavior with v8, we need
// to prevent rewriting in that case.
- if (!node->is_initializer_block()) Process(node->statements());
+ if (!node->ignore_completion_value()) Process(node->statements());
}
@@ -248,4 +248,5 @@ bool Rewriter::Rewrite(ParseInfo* info) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 5e5b57ced2..351bca7b6a 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -88,8 +88,6 @@ static void GetICCounts(SharedFunctionInfo* shared,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
- DCHECK(function->IsOptimizable());
-
if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) {
PrintF("[marking ");
function->ShortPrint();
@@ -117,7 +115,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
}
// If the code is not optimizable, don't try OSR.
- if (!shared->code()->optimizable()) return;
+ if (shared->optimization_disabled()) return;
// We are not prepared to do OSR for a function that already has an
// allocated arguments object. The optimized code would bypass it for
@@ -217,7 +215,7 @@ void RuntimeProfiler::OptimizeNow() {
}
continue;
}
- if (!function->IsOptimizable()) continue;
+ if (function->IsOptimized()) continue;
int ticks = shared_code->profiler_ticks();
@@ -262,4 +260,5 @@ void RuntimeProfiler::OptimizeNow() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 5e7e956c22..fa71432883 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -19,6 +19,7 @@
var EQUALS;
var STRICT_EQUALS;
var COMPARE;
+var COMPARE_STRONG;
var ADD;
var ADD_STRONG;
var STRING_ADD_LEFT;
@@ -48,11 +49,11 @@ var SHR_STRONG;
var DELETE;
var IN;
var INSTANCE_OF;
-var FILTER_KEY;
var CALL_NON_FUNCTION;
var CALL_NON_FUNCTION_AS_CONSTRUCTOR;
var CALL_FUNCTION_PROXY;
var CALL_FUNCTION_PROXY_AS_CONSTRUCTOR;
+var CONCAT_ITERABLE_TO_ARRAY;
var APPLY_PREPARE;
var REFLECT_APPLY_PREPARE;
var REFLECT_CONSTRUCT_PREPARE;
@@ -62,7 +63,9 @@ var TO_NUMBER;
var TO_STRING;
var TO_NAME;
-var STRING_LENGTH_STUB;
+var StringLengthTFStub;
+var StringAddTFStub;
+var MathFloorStub;
var $defaultNumber;
var $defaultString;
@@ -83,7 +86,7 @@ var $toPrimitive;
var $toString;
var $toUint32;
-(function(global, shared, exports) {
+(function(global, utils) {
%CheckIsBootstrapping();
@@ -203,6 +206,14 @@ COMPARE = function COMPARE(x, ncr) {
}
}
+// Strong mode COMPARE throws if an implicit conversion would be performed
+COMPARE_STRONG = function COMPARE_STRONG(x, ncr) {
+ if (IS_STRING(this) && IS_STRING(x)) return %_StringCompare(this, x);
+ if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberCompare(this, x, ncr);
+
+ throw %MakeTypeError(kStrongImplicitConversion);
+}
+
/* -----------------------------------
@@ -235,7 +246,7 @@ ADD_STRONG = function ADD_STRONG(x) {
if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
if (IS_STRING(this) && IS_STRING(x)) return %_StringAdd(this, x);
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -259,7 +270,7 @@ STRING_ADD_LEFT_STRONG = function STRING_ADD_LEFT_STRONG(y) {
if (IS_STRING(y)) {
return %_StringAdd(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -284,7 +295,7 @@ STRING_ADD_RIGHT_STRONG = function STRING_ADD_RIGHT_STRONG(y) {
if (IS_STRING(this)) {
return %_StringAdd(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -301,7 +312,7 @@ SUB_STRONG = function SUB_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberSub(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -318,7 +329,7 @@ MUL_STRONG = function MUL_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberMul(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -335,7 +346,7 @@ DIV_STRONG = function DIV_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberDiv(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -352,7 +363,7 @@ MOD_STRONG = function MOD_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberMod(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -374,7 +385,7 @@ BIT_OR_STRONG = function BIT_OR_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberOr(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -405,7 +416,7 @@ BIT_AND_STRONG = function BIT_AND_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberAnd(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -422,7 +433,7 @@ BIT_XOR_STRONG = function BIT_XOR_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberXor(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -439,7 +450,7 @@ SHL_STRONG = function SHL_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberShl(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -470,7 +481,7 @@ SAR_STRONG = function SAR_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberSar(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -487,7 +498,7 @@ SHR_STRONG = function SHR_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberShr(this, y);
}
- throw %MakeTypeError('strong_implicit_cast');
+ throw %MakeTypeError(kStrongImplicitConversion);
}
@@ -498,7 +509,7 @@ SHR_STRONG = function SHR_STRONG(y) {
// ECMA-262, section 11.4.1, page 46.
DELETE = function DELETE(key, language_mode) {
- return %DeleteProperty(%$toObject(this), %$toName(key), language_mode);
+ return %DeleteProperty(%$toObject(this), key, language_mode);
}
@@ -549,16 +560,6 @@ INSTANCE_OF = function INSTANCE_OF(F) {
}
-// Filter a given key against an object by checking if the object
-// has a property with the given key; return the key as a string if
-// it has. Otherwise returns 0 (smi). Used in for-in statements.
-FILTER_KEY = function FILTER_KEY(key) {
- var string = %$toName(key);
- if (%HasProperty(this, string)) return string;
- return 0;
-}
-
-
CALL_NON_FUNCTION = function CALL_NON_FUNCTION() {
var delegate = %GetFunctionDelegate(this);
if (!IS_FUNCTION(delegate)) {
@@ -716,6 +717,11 @@ REFLECT_CONSTRUCT_PREPARE = function REFLECT_CONSTRUCT_PREPARE(
}
+CONCAT_ITERABLE_TO_ARRAY = function CONCAT_ITERABLE_TO_ARRAY(iterable) {
+ return %$concatIterableToArray(this, iterable);
+};
+
+
STACK_OVERFLOW = function STACK_OVERFLOW(length) {
throw %MakeRangeError(kStackOverflow);
}
@@ -750,9 +756,44 @@ TO_NAME = function TO_NAME() {
-----------------------------------------------
*/
-STRING_LENGTH_STUB = function STRING_LENGTH_STUB(name) {
- var receiver = this; // implicit first parameter
- return %_StringGetLength(%_JSValueGetValue(receiver));
+StringLengthTFStub = function StringLengthTFStub(call_conv, minor_key) {
+ var stub = function(receiver, name, i, v) {
+ // i and v are dummy parameters mandated by the InterfaceDescriptor,
+ // (LoadWithVectorDescriptor).
+ return %_StringGetLength(%_JSValueGetValue(receiver));
+ }
+ return stub;
+}
+
+StringAddTFStub = function StringAddTFStub(call_conv, minor_key) {
+ var stub = function(left, right) {
+ return %StringAdd(left, right);
+ }
+ return stub;
+}
+
+MathFloorStub = function MathFloorStub(call_conv, minor_key) {
+ var stub = function(f, i, v) {
+ // |f| is calling function's JSFunction
+ // |i| is TypeFeedbackVector slot # of callee's CallIC for Math.floor call
+ // |v| is the value to floor
+ var r = %_MathFloor(+v);
+ if (%_IsMinusZero(r)) {
+ // Collect type feedback when the result of the floor is -0. This is
+ // accomplished by storing a sentinel in the second, "extra"
+ // TypeFeedbackVector slot corresponding to the Math.floor CallIC call in
+ // the caller's TypeVector.
+ %_FixedArraySet(%_GetTypeFeedbackVector(f), ((i|0)+1)|0, 1);
+ return -0;
+ }
+ // Return integers in smi range as smis.
+ var trunc = r|0;
+ if (trunc === r) {
+ return trunc;
+ }
+ return r;
+ }
+ return stub;
}
@@ -900,6 +941,14 @@ function SameValueZero(x, y) {
return x === y;
}
+function ConcatIterableToArray(target, iterable) {
+ var index = target.length;
+ for (var element of iterable) {
+ %AddElement(target, index++, element);
+ }
+ return target;
+}
+
/* ---------------------------------
- - - U t i l i t i e s - - -
@@ -978,6 +1027,7 @@ function ToPositiveInteger(x, rangeErrorIndex) {
//----------------------------------------------------------------------------
+$concatIterableToArray = ConcatIterableToArray;
$defaultNumber = DefaultNumber;
$defaultString = DefaultString;
$NaN = %GetRootNaN();
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 4147dc953e..d00df71576 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/arguments.h"
+#include "src/elements.h"
#include "src/messages.h"
#include "src/runtime/runtime-utils.h"
@@ -54,6 +55,26 @@ RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
}
+RUNTIME_FUNCTION(Runtime_FixedArrayGet) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_CHECKED(FixedArray, object, 0);
+ CONVERT_SMI_ARG_CHECKED(index, 1);
+ return object->get(index);
+}
+
+
+RUNTIME_FUNCTION(Runtime_FixedArraySet) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_CHECKED(FixedArray, object, 0);
+ CONVERT_SMI_ARG_CHECKED(index, 1);
+ CONVERT_ARG_CHECKED(Object, value, 2);
+ object->set(index, value);
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 2);
@@ -81,7 +102,8 @@ RUNTIME_FUNCTION(Runtime_PushIfAbsent) {
// Strict not needed. Used for cycle detection in Array join implementation.
RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetFastElement(array, length, element, SLOPPY, true));
+ isolate, JSObject::AddDataElement(array, length, element, NONE));
+ JSObject::ValidateElements(array);
return isolate->heap()->true_value();
}
@@ -273,7 +295,8 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
}
break;
}
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS:
@@ -414,7 +437,8 @@ static void CollectElementIndices(Handle<JSObject> object, uint32_t range,
if (length == range) return; // All indices accounted for already.
break;
}
- case SLOPPY_ARGUMENTS_ELEMENTS: {
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
MaybeHandle<Object> length_obj =
Object::GetProperty(object, isolate->factory()->length_string());
double length_num = length_obj.ToHandleChecked()->Number();
@@ -684,17 +708,15 @@ static bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
isolate, receiver, false, false, visitor);
break;
}
- case SLOPPY_ARGUMENTS_ELEMENTS: {
- ElementsAccessor* accessor = receiver->GetElementsAccessor();
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
for (uint32_t index = 0; index < length; index++) {
HandleScope loop_scope(isolate);
- if (accessor->HasElement(receiver, index)) {
- Handle<Object> element;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, accessor->Get(receiver, receiver, index),
- false);
- visitor->visit(index, element);
- }
+ Handle<Object> element;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element, Object::GetElement(isolate, receiver, index),
+ false);
+ visitor->visit(index, element);
}
break;
}
@@ -707,17 +729,18 @@ static bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
static bool IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
HandleScope handle_scope(isolate);
if (!obj->IsSpecObject()) return false;
- if (obj->IsJSArray()) return true;
- if (FLAG_harmony_arrays) {
+ if (FLAG_harmony_concat_spreadable) {
Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
Handle<Object> value;
MaybeHandle<Object> maybeValue =
i::Runtime::GetObjectProperty(isolate, obj, key);
if (maybeValue.ToHandle(&value)) {
- return value->BooleanValue();
+ if (!value->IsUndefined()) {
+ return value->BooleanValue();
+ }
}
}
- return false;
+ return obj->IsJSArray();
}
@@ -1050,8 +1073,8 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
Handle<Object> argument_one = caller_args->at<Object>(0);
if (argument_one->IsSmi()) {
int value = Handle<Smi>::cast(argument_one)->value();
- if (value < 0 || JSArray::SetElementsLengthWouldNormalize(isolate->heap(),
- argument_one)) {
+ if (value < 0 ||
+ JSArray::SetLengthWouldNormalize(isolate->heap(), value)) {
// the array is a dictionary in this case.
can_use_type_feedback = false;
} else if (value != 0) {
@@ -1221,7 +1244,7 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
// GrowArrayElements returns a sentinel Smi if the object was normalized.
RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_NUMBER_CHECKED(int, key, Int32, args[1]);
@@ -1240,16 +1263,7 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
}
uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1);
- ElementsKind kind = object->GetElementsKind();
- if (IsFastDoubleElementsKind(kind)) {
- JSObject::SetFastDoubleElementsCapacity(object, new_capacity);
- } else {
- JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
- object->HasFastSmiElements() ? JSObject::kAllowSmiElements
- : JSObject::kDontAllowSmiElements;
- JSObject::SetFastElementsCapacity(object, new_capacity,
- set_capacity_mode);
- }
+ object->GetElementsAccessor()->GrowCapacityAndConvert(object, new_capacity);
}
// On success, return the fixed array elements.
@@ -1273,8 +1287,7 @@ RUNTIME_FUNCTION(Runtime_HasComplexElements) {
return isolate->heap()->true_value();
}
if (!current->HasDictionaryElements()) continue;
- if (current->element_dictionary()
- ->HasComplexElements<DictionaryEntryType::kObjects>()) {
+ if (current->element_dictionary()->HasComplexElements()) {
return isolate->heap()->true_value();
}
}
@@ -1282,92 +1295,6 @@ RUNTIME_FUNCTION(Runtime_HasComplexElements) {
}
-// TODO(dcarney): remove this function when TurboFan supports it.
-// Takes the object to be iterated over and the result of GetPropertyNamesFast
-// Returns pair (cache_array, cache_type).
-RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInInit) {
- SealHandleScope scope(isolate);
- DCHECK(args.length() == 2);
- // This simulates CONVERT_ARG_HANDLE_CHECKED for calls returning pairs.
- // Not worth creating a macro atm as this function should be removed.
- if (!args[0]->IsJSReceiver() || !args[1]->IsObject()) {
- Object* error = isolate->ThrowIllegalOperation();
- return MakePair(error, isolate->heap()->undefined_value());
- }
- Handle<JSReceiver> object = args.at<JSReceiver>(0);
- Handle<Object> cache_type = args.at<Object>(1);
- if (cache_type->IsMap()) {
- // Enum cache case.
- if (Map::EnumLengthBits::decode(Map::cast(*cache_type)->bit_field3()) ==
- 0) {
- // 0 length enum.
- // Can't handle this case in the graph builder,
- // so transform it into the empty fixed array case.
- return MakePair(isolate->heap()->empty_fixed_array(), Smi::FromInt(1));
- }
- return MakePair(object->map()->instance_descriptors()->GetEnumCache(),
- *cache_type);
- } else {
- // FixedArray case.
- Smi* new_cache_type = Smi::FromInt(object->IsJSProxy() ? 0 : 1);
- return MakePair(*Handle<FixedArray>::cast(cache_type), new_cache_type);
- }
-}
-
-
-// TODO(dcarney): remove this function when TurboFan supports it.
-RUNTIME_FUNCTION(Runtime_ForInCacheArrayLength) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 0);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, array, 1);
- int length = 0;
- if (cache_type->IsMap()) {
- length = Map::cast(*cache_type)->EnumLength();
- } else {
- DCHECK(cache_type->IsSmi());
- length = array->length();
- }
- return Smi::FromInt(length);
-}
-
-
-// TODO(dcarney): remove this function when TurboFan supports it.
-// Takes (the object to be iterated over,
-// cache_array from ForInInit,
-// cache_type from ForInInit,
-// the current index)
-// Returns pair (array[index], needs_filtering).
-RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInNext) {
- SealHandleScope scope(isolate);
- DCHECK(args.length() == 4);
- int32_t index;
- // This simulates CONVERT_ARG_HANDLE_CHECKED for calls returning pairs.
- // Not worth creating a macro atm as this function should be removed.
- if (!args[0]->IsJSReceiver() || !args[1]->IsFixedArray() ||
- !args[2]->IsObject() || !args[3]->ToInt32(&index)) {
- Object* error = isolate->ThrowIllegalOperation();
- return MakePair(error, isolate->heap()->undefined_value());
- }
- Handle<JSReceiver> object = args.at<JSReceiver>(0);
- Handle<FixedArray> array = args.at<FixedArray>(1);
- Handle<Object> cache_type = args.at<Object>(2);
- // Figure out first if a slow check is needed for this object.
- bool slow_check_needed = false;
- if (cache_type->IsMap()) {
- if (object->map() != Map::cast(*cache_type)) {
- // Object transitioned. Need slow check.
- slow_check_needed = true;
- }
- } else {
- // No slow check needed for proxies.
- slow_check_needed = Smi::cast(*cache_type)->value() == 1;
- }
- return MakePair(array->get(index),
- isolate->heap()->ToBoolean(slow_check_needed));
-}
-
-
RUNTIME_FUNCTION(Runtime_IsArray) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -1398,5 +1325,5 @@ RUNTIME_FUNCTION(Runtime_FastOneByteArrayJoin) {
// to a slow path.
return isolate->heap()->undefined_value();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
new file mode 100644
index 0000000000..c9b78769cd
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -0,0 +1,824 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/conversions.h"
+#include "src/runtime/runtime-utils.h"
+
+// Implement Atomic accesses to SharedArrayBuffers as defined in the
+// SharedArrayBuffer draft spec, found here
+// https://docs.google.com/document/d/1NDGA_gZJ7M7w1Bh8S0AoDyEqwDdRh4uSoTPSNn77PFk
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+#if V8_CC_GNU
+
+template <typename T>
+inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
+ (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST);
+ return oldval;
+}
+
+template <typename T>
+inline T LoadSeqCst(T* p) {
+ T result;
+ __atomic_load(p, &result, __ATOMIC_SEQ_CST);
+ return result;
+}
+
+template <typename T>
+inline void StoreSeqCst(T* p, T value) {
+ __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline T AddSeqCst(T* p, T value) {
+ return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline T SubSeqCst(T* p, T value) {
+ return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline T AndSeqCst(T* p, T value) {
+ return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline T OrSeqCst(T* p, T value) {
+ return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline T XorSeqCst(T* p, T value) {
+ return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline T ExchangeSeqCst(T* p, T value) {
+ return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
+}
+
+#if ATOMICS_REQUIRE_LOCK_64_BIT
+
+// We only need to implement the following functions, because the rest of the
+// atomic operations only work on integer types, and the only 64-bit type is
+// float64. Similarly, because the values are being bit_cast from double ->
+// uint64_t, we don't need to implement these functions for int64_t either.
+
+static base::LazyMutex atomic_mutex = LAZY_MUTEX_INITIALIZER;
+
+inline uint64_t CompareExchangeSeqCst(uint64_t* p, uint64_t oldval,
+ uint64_t newval) {
+ base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
+ uint64_t result = *p;
+ if (result == oldval) *p = newval;
+ return result;
+}
+
+
+inline uint64_t LoadSeqCst(uint64_t* p) {
+ base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
+ return *p;
+}
+
+
+inline void StoreSeqCst(uint64_t* p, uint64_t value) {
+ base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
+ *p = value;
+}
+
+#endif // ATOMICS_REQUIRE_LOCK_64_BIT
+
+#elif V8_CC_MSVC
+
+#define _InterlockedCompareExchange32 _InterlockedCompareExchange
+#define _InterlockedExchange32 _InterlockedExchange
+#define _InterlockedExchangeAdd32 _InterlockedExchangeAdd
+#define _InterlockedAnd32 _InterlockedAnd
+#define _InterlockedOr32 _InterlockedOr
+#define _InterlockedXor32 _InterlockedXor
+
+#define INTEGER_TYPES(V) \
+ V(int8_t, 8, char) \
+ V(uint8_t, 8, char) \
+ V(int16_t, 16, short) /* NOLINT(runtime/int) */ \
+ V(uint16_t, 16, short) /* NOLINT(runtime/int) */ \
+ V(int32_t, 32, long) /* NOLINT(runtime/int) */ \
+ V(uint32_t, 32, long) /* NOLINT(runtime/int) */ \
+ V(int64_t, 64, LONGLONG) \
+ V(uint64_t, 64, LONGLONG)
+
+#define ATOMIC_OPS(type, suffix, vctype) \
+ inline type CompareExchangeSeqCst(volatile type* p, type oldval, \
+ type newval) { \
+ return _InterlockedCompareExchange##suffix( \
+ reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(newval), \
+ bit_cast<vctype>(oldval)); \
+ } \
+ inline type LoadSeqCst(volatile type* p) { return *p; } \
+ inline void StoreSeqCst(volatile type* p, type value) { \
+ _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type AddSeqCst(volatile type* p, type value) { \
+ return _InterlockedExchangeAdd##suffix( \
+ reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(value)); \
+ } \
+ inline type SubSeqCst(volatile type* p, type value) { \
+ return _InterlockedExchangeAdd##suffix( \
+ reinterpret_cast<volatile vctype*>(p), -bit_cast<vctype>(value)); \
+ } \
+ inline type AndSeqCst(volatile type* p, type value) { \
+ return _InterlockedAnd##suffix(reinterpret_cast<volatile vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type OrSeqCst(volatile type* p, type value) { \
+ return _InterlockedOr##suffix(reinterpret_cast<volatile vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type XorSeqCst(volatile type* p, type value) { \
+ return _InterlockedXor##suffix(reinterpret_cast<volatile vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type ExchangeSeqCst(volatile type* p, type value) { \
+ return _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ }
+INTEGER_TYPES(ATOMIC_OPS)
+#undef ATOMIC_OPS
+
+#undef INTEGER_TYPES
+#undef _InterlockedCompareExchange32
+#undef _InterlockedExchange32
+#undef _InterlockedExchangeAdd32
+#undef _InterlockedAnd32
+#undef _InterlockedOr32
+#undef _InterlockedXor32
+
+#else
+
+#error Unsupported platform!
+
+#endif
+
+template <typename T>
+T FromObject(Handle<Object> number);
+
+template <>
+inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
+ return NumberToUint32(*number);
+}
+
+template <>
+inline int32_t FromObject<int32_t>(Handle<Object> number) {
+ return NumberToInt32(*number);
+}
+
+template <>
+inline float FromObject<float>(Handle<Object> number) {
+ return static_cast<float>(number->Number());
+}
+
+template <>
+inline double FromObject<double>(Handle<Object> number) {
+ return number->Number();
+}
+
+template <typename T, typename F>
+inline T ToAtomic(F from) {
+ return static_cast<T>(from);
+}
+
+template <>
+inline uint32_t ToAtomic<uint32_t, float>(float from) {
+ return bit_cast<uint32_t, float>(from);
+}
+
+template <>
+inline uint64_t ToAtomic<uint64_t, double>(double from) {
+ return bit_cast<uint64_t, double>(from);
+}
+
+template <typename T, typename F>
+inline T FromAtomic(F from) {
+ return static_cast<T>(from);
+}
+
+template <>
+inline float FromAtomic<float, uint32_t>(uint32_t from) {
+ return bit_cast<float, uint32_t>(from);
+}
+
+template <>
+inline double FromAtomic<double, uint64_t>(uint64_t from) {
+ return bit_cast<double, uint64_t>(from);
+}
+
+template <typename T>
+inline Object* ToObject(Isolate* isolate, T t);
+
+template <>
+inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) {
+ return Smi::FromInt(t);
+}
+
+template <>
+inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) {
+ return Smi::FromInt(t);
+}
+
+template <>
+inline Object* ToObject<int16_t>(Isolate* isolate, int16_t t) {
+ return Smi::FromInt(t);
+}
+
+template <>
+inline Object* ToObject<uint16_t>(Isolate* isolate, uint16_t t) {
+ return Smi::FromInt(t);
+}
+
+template <>
+inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) {
+ return *isolate->factory()->NewNumber(t);
+}
+
+template <>
+inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) {
+ return *isolate->factory()->NewNumber(t);
+}
+
+template <>
+inline Object* ToObject<float>(Isolate* isolate, float t) {
+ return *isolate->factory()->NewNumber(t);
+}
+
+template <>
+inline Object* ToObject<double>(Isolate* isolate, double t) {
+ return *isolate->factory()->NewNumber(t);
+}
+
+template <typename T>
+struct FromObjectTraits {};
+
+template <>
+struct FromObjectTraits<int8_t> {
+ typedef int32_t convert_type;
+ typedef int8_t atomic_type;
+};
+
+template <>
+struct FromObjectTraits<uint8_t> {
+ typedef uint32_t convert_type;
+ typedef uint8_t atomic_type;
+};
+
+template <>
+struct FromObjectTraits<int16_t> {
+ typedef int32_t convert_type;
+ typedef int16_t atomic_type;
+};
+
+template <>
+struct FromObjectTraits<uint16_t> {
+ typedef uint32_t convert_type;
+ typedef uint16_t atomic_type;
+};
+
+template <>
+struct FromObjectTraits<int32_t> {
+ typedef int32_t convert_type;
+ typedef int32_t atomic_type;
+};
+
+template <>
+struct FromObjectTraits<uint32_t> {
+ typedef uint32_t convert_type;
+ typedef uint32_t atomic_type;
+};
+
+template <>
+struct FromObjectTraits<float> {
+ typedef float convert_type;
+ typedef uint32_t atomic_type;
+};
+
+template <>
+struct FromObjectTraits<double> {
+ typedef double convert_type;
+ typedef uint64_t atomic_type;
+};
+
+
+template <typename T>
+inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> oldobj, Handle<Object> newobj) {
+ typedef typename FromObjectTraits<T>::atomic_type atomic_type;
+ typedef typename FromObjectTraits<T>::convert_type convert_type;
+ atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj));
+ atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj));
+ atomic_type result = CompareExchangeSeqCst(
+ static_cast<atomic_type*>(buffer) + index, oldval, newval);
+ return ToObject<T>(isolate, FromAtomic<T>(result));
+}
+
+
+template <typename T>
+inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
+ typedef typename FromObjectTraits<T>::atomic_type atomic_type;
+ atomic_type result = LoadSeqCst(static_cast<atomic_type*>(buffer) + index);
+ return ToObject<T>(isolate, FromAtomic<T>(result));
+}
+
+
+template <typename T>
+inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ typedef typename FromObjectTraits<T>::atomic_type atomic_type;
+ typedef typename FromObjectTraits<T>::convert_type convert_type;
+ atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
+ StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ return *obj;
+}
+
+
+template <typename T>
+inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ typedef typename FromObjectTraits<T>::atomic_type atomic_type;
+ typedef typename FromObjectTraits<T>::convert_type convert_type;
+ atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
+ atomic_type result =
+ AddSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ return ToObject<T>(isolate, FromAtomic<T>(result));
+}
+
+
+template <typename T>
+inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ typedef typename FromObjectTraits<T>::atomic_type atomic_type;
+ typedef typename FromObjectTraits<T>::convert_type convert_type;
+ atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
+ atomic_type result =
+ SubSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ return ToObject<T>(isolate, FromAtomic<T>(result));
+}
+
+
+template <typename T>
+inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ typedef typename FromObjectTraits<T>::atomic_type atomic_type;
+ typedef typename FromObjectTraits<T>::convert_type convert_type;
+ atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
+ atomic_type result =
+ AndSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ return ToObject<T>(isolate, FromAtomic<T>(result));
+}
+
+
+template <typename T>
+inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ typedef typename FromObjectTraits<T>::atomic_type atomic_type;
+ typedef typename FromObjectTraits<T>::convert_type convert_type;
+ atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
+ atomic_type result =
+ OrSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ return ToObject<T>(isolate, FromAtomic<T>(result));
+}
+
+
+template <typename T>
+inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ typedef typename FromObjectTraits<T>::atomic_type atomic_type;
+ typedef typename FromObjectTraits<T>::convert_type convert_type;
+ atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
+ atomic_type result =
+ XorSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ return ToObject<T>(isolate, FromAtomic<T>(result));
+}
+
+
+template <typename T>
+inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ typedef typename FromObjectTraits<T>::atomic_type atomic_type;
+ typedef typename FromObjectTraits<T>::convert_type convert_type;
+ atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
+ atomic_type result =
+ ExchangeSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ return ToObject<T>(isolate, FromAtomic<T>(result));
+}
+
+
+// Uint8Clamped functions
+
+uint8_t ClampToUint8(int32_t value) {
+ if (value < 0) return 0;
+ if (value > 255) return 255;
+ return value;
+}
+
+
+inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer,
+ size_t index,
+ Handle<Object> oldobj,
+ Handle<Object> newobj) {
+ typedef int32_t convert_type;
+ typedef uint8_t atomic_type;
+ atomic_type oldval = ClampToUint8(FromObject<convert_type>(oldobj));
+ atomic_type newval = ClampToUint8(FromObject<convert_type>(newobj));
+ atomic_type result = CompareExchangeSeqCst(
+ static_cast<atomic_type*>(buffer) + index, oldval, newval);
+ return ToObject<uint8_t>(isolate, FromAtomic<uint8_t>(result));
+}
+
+
+inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
+ Handle<Object> obj) {
+ typedef int32_t convert_type;
+ typedef uint8_t atomic_type;
+ atomic_type value = ClampToUint8(FromObject<convert_type>(obj));
+ StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ return *obj;
+}
+
+
+#define DO_UINT8_CLAMPED_OP(name, op) \
+ inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \
+ size_t index, Handle<Object> obj) { \
+ typedef int32_t convert_type; \
+ typedef uint8_t atomic_type; \
+ atomic_type* p = static_cast<atomic_type*>(buffer) + index; \
+ convert_type operand = FromObject<convert_type>(obj); \
+ atomic_type expected; \
+ atomic_type result; \
+ do { \
+ expected = *p; \
+ result = ClampToUint8(static_cast<convert_type>(expected) op operand); \
+ } while (CompareExchangeSeqCst(p, expected, result) != expected); \
+ return ToObject<uint8_t>(isolate, expected); \
+ }
+
+DO_UINT8_CLAMPED_OP(Add, +)
+DO_UINT8_CLAMPED_OP(Sub, -)
+DO_UINT8_CLAMPED_OP(And, &)
+DO_UINT8_CLAMPED_OP(Or, | )
+DO_UINT8_CLAMPED_OP(Xor, ^)
+
+#undef DO_UINT8_CLAMPED_OP
+
+
+inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer,
+ size_t index, Handle<Object> obj) {
+ typedef int32_t convert_type;
+ typedef uint8_t atomic_type;
+ atomic_type* p = static_cast<atomic_type*>(buffer) + index;
+ atomic_type result = ClampToUint8(FromObject<convert_type>(obj));
+ atomic_type expected;
+ do {
+ expected = *p;
+ } while (CompareExchangeSeqCst(p, expected, result) != expected);
+ return ToObject<uint8_t>(isolate, expected);
+}
+
+
+} // anonymous namespace
+
+// Duplicated from objects.h
+// V has parameters (Type, type, TYPE, C type, element_size)
+#define INTEGER_TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t, 1) \
+ V(Int8, int8, INT8, int8_t, 1) \
+ V(Uint16, uint16, UINT16, uint16_t, 2) \
+ V(Int16, int16, INT16, int16_t, 2) \
+ V(Uint32, uint32, UINT32, uint32_t, 4) \
+ V(Int32, int32, INT32, int32_t, 4)
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+
+ void* buffer = sta->GetBuffer()->backing_store();
+
+ switch (sta->type()) {
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj);
+
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case kExternalFloat32Array:
+ return DoCompareExchange<float>(isolate, buffer, index, oldobj, newobj);
+
+ case kExternalFloat64Array:
+ return DoCompareExchange<double>(isolate, buffer, index, oldobj, newobj);
+
+ case kExternalUint8ClampedArray:
+ return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj,
+ newobj);
+
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+
+ void* buffer = sta->GetBuffer()->backing_store();
+
+ switch (sta->type()) {
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return DoLoad<ctype>(isolate, buffer, index);
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsStore) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+
+ void* buffer = sta->GetBuffer()->backing_store();
+
+ switch (sta->type()) {
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return DoStore<ctype>(isolate, buffer, index, value);
+
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case kExternalFloat32Array:
+ return DoStore<float>(isolate, buffer, index, value);
+
+ case kExternalFloat64Array:
+ return DoStore<double>(isolate, buffer, index, value);
+
+ case kExternalUint8ClampedArray:
+ return DoStoreUint8Clamped(isolate, buffer, index, value);
+
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+
+ void* buffer = sta->GetBuffer()->backing_store();
+
+ switch (sta->type()) {
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return DoAdd<ctype>(isolate, buffer, index, value);
+
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case kExternalUint8ClampedArray:
+ return DoAddUint8Clamped(isolate, buffer, index, value);
+
+ case kExternalFloat32Array:
+ case kExternalFloat64Array:
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsSub) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+
+ void* buffer = sta->GetBuffer()->backing_store();
+
+ switch (sta->type()) {
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return DoSub<ctype>(isolate, buffer, index, value);
+
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case kExternalUint8ClampedArray:
+ return DoSubUint8Clamped(isolate, buffer, index, value);
+
+ case kExternalFloat32Array:
+ case kExternalFloat64Array:
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+
+ void* buffer = sta->GetBuffer()->backing_store();
+
+ switch (sta->type()) {
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return DoAnd<ctype>(isolate, buffer, index, value);
+
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case kExternalUint8ClampedArray:
+ return DoAndUint8Clamped(isolate, buffer, index, value);
+
+ case kExternalFloat32Array:
+ case kExternalFloat64Array:
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsOr) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+
+ void* buffer = sta->GetBuffer()->backing_store();
+
+ switch (sta->type()) {
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return DoOr<ctype>(isolate, buffer, index, value);
+
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case kExternalUint8ClampedArray:
+ return DoOrUint8Clamped(isolate, buffer, index, value);
+
+ case kExternalFloat32Array:
+ case kExternalFloat64Array:
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsXor) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+
+ void* buffer = sta->GetBuffer()->backing_store();
+
+ switch (sta->type()) {
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return DoXor<ctype>(isolate, buffer, index, value);
+
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case kExternalUint8ClampedArray:
+ return DoXorUint8Clamped(isolate, buffer, index, value);
+
+ case kExternalFloat32Array:
+ case kExternalFloat64Array:
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+
+ void* buffer = sta->GetBuffer()->backing_store();
+
+ switch (sta->type()) {
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return DoExchange<ctype>(isolate, buffer, index, value);
+
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case kExternalUint8ClampedArray:
+ return DoExchangeUint8Clamped(isolate, buffer, index, value);
+
+ case kExternalFloat32Array:
+ case kExternalFloat64Array:
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0);
+ uint32_t usize = NumberToUint32(*size);
+
+ return Runtime::AtomicIsLockFree(usize) ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 3dc3a244db..97a19c1bc7 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -52,7 +52,7 @@ RUNTIME_FUNCTION(Runtime_ThrowArrayNotSubclassableError) {
static Object* ThrowStaticPrototypeError(Isolate* isolate) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError("static_prototype", HandleVector<Object>(NULL, 0)));
+ isolate, NewTypeError(MessageTemplate::kStaticPrototype));
}
@@ -113,28 +113,26 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
prototype_parent = isolate->factory()->null_value();
} else if (super_class->IsSpecFunction()) {
if (Handle<JSFunction>::cast(super_class)->shared()->is_generator()) {
- Handle<Object> args[1] = {super_class};
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
- NewTypeError("extends_value_generator", HandleVector(args, 1)));
+ NewTypeError(MessageTemplate::kExtendsValueGenerator, super_class));
}
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, prototype_parent,
Runtime::GetObjectProperty(isolate, super_class,
- isolate->factory()->prototype_string()));
+ isolate->factory()->prototype_string(),
+ SLOPPY));
if (!prototype_parent->IsNull() && !prototype_parent->IsSpecObject()) {
- Handle<Object> args[1] = {prototype_parent};
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError("prototype_parent_not_an_object",
- HandleVector(args, 1)));
+ isolate, NewTypeError(MessageTemplate::kPrototypeParentNotAnObject,
+ prototype_parent));
}
constructor_parent = super_class;
} else {
// TODO(arv): Should be IsConstructor.
- Handle<Object> args[1] = {super_class};
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
- NewTypeError("extends_value_not_a_function", HandleVector(args, 1)));
+ NewTypeError(MessageTemplate::kExtendsValueNotFunction, super_class));
}
}
@@ -203,16 +201,9 @@ RUNTIME_FUNCTION(Runtime_DefineClassMethod) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::SetOwnElement(object, index, function, DONT_ENUM, STRICT));
- } else {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetOwnPropertyIgnoreAttributes(object, name,
- function, DONT_ENUM));
- }
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::DefinePropertyOrElementIgnoreAttributes(
+ object, name, function, DONT_ENUM));
return isolate->heap()->undefined_value();
}
@@ -254,74 +245,105 @@ RUNTIME_FUNCTION(Runtime_ClassGetSourceCode) {
}
-static Object* LoadFromSuper(Isolate* isolate, Handle<Object> receiver,
- Handle<JSObject> home_object, Handle<Name> name) {
+static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<JSObject> home_object,
+ Handle<Name> name,
+ LanguageMode language_mode) {
if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
isolate->ReportFailedAccessCheck(home_object);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
PrototypeIterator iter(isolate, home_object);
Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
- if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
+ if (!proto->IsJSReceiver()) {
+ return Object::ReadAbsentProperty(isolate, proto, name, language_mode);
+ }
LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::GetProperty(&it));
- return *result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ Object::GetProperty(&it, language_mode), Object);
+ return result;
}
-static Object* LoadElementFromSuper(Isolate* isolate, Handle<Object> receiver,
- Handle<JSObject> home_object,
- uint32_t index) {
+static MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<JSObject> home_object,
+ uint32_t index,
+ LanguageMode language_mode) {
if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
isolate->ReportFailedAccessCheck(home_object);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
PrototypeIterator iter(isolate, home_object);
Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
- if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
+ if (!proto->IsJSReceiver()) {
+ Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
+ return Object::ReadAbsentProperty(isolate, proto, name, language_mode);
+ }
+ LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::GetElementWithReceiver(isolate, proto, receiver, index));
- return *result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ Object::GetProperty(&it, language_mode), Object);
+ return result;
}
+// TODO(conradw): It would be more efficient to have a separate runtime function
+// for strong mode.
RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
- return LoadFromSuper(isolate, receiver, home_object, name);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ LoadFromSuper(isolate, receiver, home_object, name, language_mode));
+ return *result;
}
RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
+
+ uint32_t index = 0;
+ Handle<Object> result;
- uint32_t index;
if (key->ToArrayIndex(&index)) {
- return LoadElementFromSuper(isolate, receiver, home_object, index);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, LoadElementFromSuper(isolate, receiver, home_object,
+ index, language_mode));
+ return *result;
}
Handle<Name> name;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
Runtime::ToName(isolate, key));
+ // TODO(verwaest): Unify using LookupIterator.
if (name->AsArrayIndex(&index)) {
- return LoadElementFromSuper(isolate, receiver, home_object, index);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, LoadElementFromSuper(isolate, receiver, home_object,
+ index, language_mode));
+ return *result;
}
- return LoadFromSuper(isolate, receiver, home_object, name);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ LoadFromSuper(isolate, receiver, home_object, name, language_mode));
+ return *result;
}
@@ -361,11 +383,12 @@ static Object* StoreElementToSuper(Isolate* isolate,
Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
+ LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Object::SetElementWithReceiver(isolate, proto, receiver, index, value,
- language_mode));
+ Object::SetSuperProperty(&it, value, language_mode,
+ Object::MAY_BE_STORE_FROM_KEYED));
return *result;
}
@@ -398,7 +421,7 @@ static Object* StoreKeyedToSuper(Isolate* isolate, Handle<JSObject> home_object,
Handle<Object> receiver, Handle<Object> key,
Handle<Object> value,
LanguageMode language_mode) {
- uint32_t index;
+ uint32_t index = 0;
if (key->ToArrayIndex(&index)) {
return StoreElementToSuper(isolate, home_object, receiver, index, value,
@@ -407,6 +430,7 @@ static Object* StoreKeyedToSuper(Isolate* isolate, Handle<JSObject> home_object,
Handle<Name> name;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
Runtime::ToName(isolate, key));
+ // TODO(verwaest): Unify using LookupIterator.
if (name->AsArrayIndex(&index)) {
return StoreElementToSuper(isolate, home_object, receiver, index, value,
language_mode);
@@ -446,9 +470,7 @@ RUNTIME_FUNCTION(Runtime_HandleStepInForDerivedConstructors) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
Debug* debug = isolate->debug();
// Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) {
- debug->HandleStepIn(function, Handle<Object>::null(), 0, true);
- }
+ if (debug->StepInActive()) debug->HandleStepIn(function, true);
return *isolate->factory()->undefined_value();
}
@@ -463,5 +485,5 @@ RUNTIME_FUNCTION(Runtime_CallSuperWithSpread) {
UNIMPLEMENTED();
return nullptr;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index e5f3ba1cfb..1ba1e34356 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -27,26 +27,6 @@ RUNTIME_FUNCTION(Runtime_TheHole) {
}
-RUNTIME_FUNCTION(Runtime_FixedArrayGet) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(FixedArray, object, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- return object->get(index);
-}
-
-
-RUNTIME_FUNCTION(Runtime_FixedArraySet) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, object, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- CONVERT_ARG_CHECKED(Object, value, 2);
- object->set(index, value);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_JSCollectionGetTable) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -65,12 +45,17 @@ RUNTIME_FUNCTION(Runtime_GenericHash) {
}
+void Runtime::JSSetInitialize(Isolate* isolate, Handle<JSSet> set) {
+ Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
+ set->set_table(*table);
+}
+
+
RUNTIME_FUNCTION(Runtime_SetInitialize) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
- holder->set_table(*table);
+ Runtime::JSSetInitialize(isolate, holder);
return *holder;
}
@@ -97,13 +82,18 @@ RUNTIME_FUNCTION(Runtime_SetShrink) {
}
+void Runtime::JSSetClear(Isolate* isolate, Handle<JSSet> set) {
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
+ table = OrderedHashSet::Clear(table);
+ set->set_table(*table);
+}
+
+
RUNTIME_FUNCTION(Runtime_SetClear) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
- table = OrderedHashSet::Clear(table);
- holder->set_table(*table);
+ Runtime::JSSetClear(isolate, holder);
return isolate->heap()->undefined_value();
}
@@ -163,12 +153,17 @@ RUNTIME_FUNCTION(Runtime_SetIteratorDetails) {
}
+void Runtime::JSMapInitialize(Isolate* isolate, Handle<JSMap> map) {
+ Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
+ map->set_table(*table);
+}
+
+
RUNTIME_FUNCTION(Runtime_MapInitialize) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
- holder->set_table(*table);
+ Runtime::JSMapInitialize(isolate, holder);
return *holder;
}
@@ -184,13 +179,18 @@ RUNTIME_FUNCTION(Runtime_MapShrink) {
}
+void Runtime::JSMapClear(Isolate* isolate, Handle<JSMap> map) {
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
+ table = OrderedHashMap::Clear(table);
+ map->set_table(*table);
+}
+
+
RUNTIME_FUNCTION(Runtime_MapClear) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
- table = OrderedHashMap::Clear(table);
- holder->set_table(*table);
+ Runtime::JSMapClear(isolate, holder);
return isolate->heap()->undefined_value();
}
@@ -316,41 +316,51 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_SMI_ARG_CHECKED(hash, 2)
RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
RUNTIME_ASSERT(table->IsKey(*key));
- Handle<Object> lookup(table->Lookup(key), isolate);
+ Handle<Object> lookup(table->Lookup(key, hash), isolate);
return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
}
RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_SMI_ARG_CHECKED(hash, 2)
RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
RUNTIME_ASSERT(table->IsKey(*key));
- Handle<Object> lookup(table->Lookup(key), isolate);
+ Handle<Object> lookup(table->Lookup(key, hash), isolate);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
bool Runtime::WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
Handle<Object> key) {
+ int32_t hash =
+ Object::GetOrCreateHash(weak_collection->GetIsolate(), key)->value();
+ return WeakCollectionDelete(weak_collection, key, hash);
+}
+
+
+bool Runtime::WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key, int32_t hash) {
DCHECK(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
DCHECK(table->IsKey(*key));
bool was_present = false;
Handle<ObjectHashTable> new_table =
- ObjectHashTable::Remove(table, key, &was_present);
+ ObjectHashTable::Remove(table, key, &was_present, hash);
weak_collection->set_table(*new_table);
if (*table != *new_table) {
// Zap the old table since we didn't record slots for its elements.
@@ -362,25 +372,28 @@ bool Runtime::WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_SMI_ARG_CHECKED(hash, 2)
RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
RUNTIME_ASSERT(table->IsKey(*key));
- bool was_present = Runtime::WeakCollectionDelete(weak_collection, key);
+ bool was_present = Runtime::WeakCollectionDelete(weak_collection, key, hash);
return isolate->heap()->ToBoolean(was_present);
}
void Runtime::WeakCollectionSet(Handle<JSWeakCollection> weak_collection,
- Handle<Object> key, Handle<Object> value) {
+ Handle<Object> key, Handle<Object> value,
+ int32_t hash) {
DCHECK(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
DCHECK(table->IsKey(*key));
- Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
+ Handle<ObjectHashTable> new_table =
+ ObjectHashTable::Put(table, key, value, hash);
weak_collection->set_table(*new_table);
if (*table != *new_table) {
// Zap the old table since we didn't record slots for its elements.
@@ -391,15 +404,16 @@ void Runtime::WeakCollectionSet(Handle<JSWeakCollection> weak_collection,
RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_SMI_ARG_CHECKED(hash, 3)
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
RUNTIME_ASSERT(table->IsKey(*key));
- Runtime::WeakCollectionSet(weak_collection, key, value);
+ Runtime::WeakCollectionSet(weak_collection, key, value, hash);
return *weak_collection;
}
@@ -440,5 +454,5 @@ RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) {
Runtime::WeakCollectionInitialize(isolate, weakmap);
return *weakmap;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 3b37bda125..4cfa84ac44 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -151,11 +151,11 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
PrintF("]\n");
}
function->ReplaceCode(function->shared()->code());
- // Evict optimized code for this function from the cache so that it
- // doesn't get used for new closures.
- function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
- "notify deoptimized");
}
+ // Evict optimized code for this function from the cache so that it
+ // doesn't get used for new closures.
+ function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
+ "notify deoptimized");
} else {
// TODO(titzer): we should probably do DeoptimizeCodeList(code)
// unconditionally if the code is not already marked for deoptimization.
@@ -168,10 +168,9 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
- Handle<JSFunction> function,
- Handle<Code> current_code) {
+ Handle<JSFunction> function) {
// Keep track of whether we've succeeded in optimizing.
- if (!current_code->optimizable()) return false;
+ if (function->shared()->optimization_disabled()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
@@ -220,11 +219,12 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
DCHECK(!ast_id.IsNone());
- Compiler::ConcurrencyMode mode =
- isolate->concurrent_osr_enabled() &&
- (function->shared()->ast_node_count() > 512)
- ? Compiler::CONCURRENT
- : Compiler::NOT_CONCURRENT;
+ // Disable concurrent OSR for asm.js, to enable frame specialization.
+ Compiler::ConcurrencyMode mode = (isolate->concurrent_osr_enabled() &&
+ !function->shared()->asm_function() &&
+ function->shared()->ast_node_count() > 512)
+ ? Compiler::CONCURRENT
+ : Compiler::NOT_CONCURRENT;
Handle<Code> result = Handle<Code>::null();
OptimizedCompileJob* job = NULL;
@@ -253,14 +253,15 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
PrintF(" at AST id %d]\n", ast_id.ToInt());
}
result = Compiler::GetConcurrentlyOptimizedCode(job);
- } else if (IsSuitableForOnStackReplacement(isolate, function, caller_code)) {
+ } else if (IsSuitableForOnStackReplacement(isolate, function)) {
if (FLAG_trace_osr) {
PrintF("[OSR - Compiling: ");
function->PrintName();
PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- MaybeHandle<Code> maybe_result =
- Compiler::GetOptimizedCode(function, caller_code, mode, ast_id);
+ MaybeHandle<Code> maybe_result = Compiler::GetOptimizedCode(
+ function, caller_code, mode, ast_id,
+ (mode == Compiler::NOT_CONCURRENT) ? frame : nullptr);
if (maybe_result.ToHandle(&result) &&
result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
// Optimization is queued. Return to check later.
@@ -382,11 +383,10 @@ RUNTIME_FUNCTION(Runtime_CompileString) {
}
-static ObjectPair CompileGlobalEval(Isolate* isolate, Handle<String> source,
- Handle<SharedFunctionInfo> outer_info,
- Handle<Object> receiver,
- LanguageMode language_mode,
- int scope_position) {
+static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ LanguageMode language_mode,
+ int scope_position) {
Handle<Context> context = Handle<Context>(isolate->context());
Handle<Context> native_context = Handle<Context>(context->native_context());
@@ -400,7 +400,7 @@ static ObjectPair CompileGlobalEval(Isolate* isolate, Handle<String> source,
MaybeHandle<Object> maybe_error = isolate->factory()->NewEvalError(
MessageTemplate::kCodeGenFromStrings, error_message);
if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
- return MakePair(isolate->heap()->exception(), NULL);
+ return isolate->heap()->exception();
}
// Deal with a normal eval call with a string argument. Compile it
@@ -411,14 +411,14 @@ static ObjectPair CompileGlobalEval(Isolate* isolate, Handle<String> source,
isolate, compiled,
Compiler::GetFunctionFromEval(source, outer_info, context, language_mode,
restriction, scope_position),
- MakePair(isolate->heap()->exception(), NULL));
- return MakePair(*compiled, *receiver);
+ isolate->heap()->exception());
+ return *compiled;
}
-RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ResolvePossiblyDirectEval) {
+RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
HandleScope scope(isolate);
- DCHECK(args.length() == 6);
+ DCHECK(args.length() == 5);
Handle<Object> callee = args.at<Object>(0);
@@ -429,17 +429,17 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ResolvePossiblyDirectEval) {
// the first argument without doing anything).
if (*callee != isolate->native_context()->global_eval_fun() ||
!args[1]->IsString()) {
- return MakePair(*callee, isolate->heap()->undefined_value());
+ return *callee;
}
+ DCHECK(args[3]->IsSmi());
+ DCHECK(is_valid_language_mode(args.smi_at(3)));
+ LanguageMode language_mode = static_cast<LanguageMode>(args.smi_at(3));
DCHECK(args[4]->IsSmi());
- DCHECK(is_valid_language_mode(args.smi_at(4)));
- LanguageMode language_mode = static_cast<LanguageMode>(args.smi_at(4));
- DCHECK(args[5]->IsSmi());
Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
isolate);
return CompileGlobalEval(isolate, args.at<String>(1), outer_info,
- args.at<Object>(3), language_mode, args.smi_at(5));
-}
+ language_mode, args.smi_at(4));
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 844ca25fd5..c47b158564 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -7,6 +7,7 @@
#include "src/arguments.h"
#include "src/date.h"
#include "src/dateparser-inl.h"
+#include "src/messages.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -59,11 +60,19 @@ RUNTIME_FUNCTION(Runtime_DateSetValue) {
}
+RUNTIME_FUNCTION(Runtime_IsDate) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsJSDate());
+}
+
+
RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError("not_date_object", HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError(MessageTemplate::kNotDateObject));
}
@@ -173,18 +182,13 @@ RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
RUNTIME_FUNCTION(Runtime_DateField) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(JSDate, date, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
- if (!obj->IsJSDate()) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError("not_date_object", HandleVector<Object>(NULL, 0)));
- }
- JSDate* date = JSDate::cast(obj);
+ DCHECK_LE(0, index);
if (index == 0) return date->value();
return JSDate::GetField(date, Smi::FromInt(index));
}
-}
-} // namespace v8::internal
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 239f2b2e38..e7aaed1f6f 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -80,9 +80,8 @@ static Handle<Object> DebugGetProperty(LookupIterator* it,
if (!accessors->IsAccessorInfo()) {
return it->isolate()->factory()->undefined_value();
}
- MaybeHandle<Object> maybe_result = JSObject::GetPropertyWithAccessor(
- it->GetReceiver(), it->name(), it->GetHolder<JSObject>(),
- accessors);
+ MaybeHandle<Object> maybe_result =
+ JSObject::GetPropertyWithAccessor(it, SLOPPY);
Handle<Object> result;
if (!maybe_result.ToHandle(&result)) {
result = handle(it->isolate()->pending_exception(), it->isolate());
@@ -101,6 +100,183 @@ static Handle<Object> DebugGetProperty(LookupIterator* it,
}
+static Handle<Object> DebugGetProperty(Handle<Object> object,
+ Handle<Name> name) {
+ LookupIterator it(object, name);
+ return DebugGetProperty(&it);
+}
+
+
+template <class IteratorType>
+static MaybeHandle<JSArray> GetIteratorInternalProperties(
+ Isolate* isolate, Handle<IteratorType> object) {
+ Factory* factory = isolate->factory();
+ Handle<IteratorType> iterator = Handle<IteratorType>::cast(object);
+ RUNTIME_ASSERT_HANDLIFIED(iterator->kind()->IsSmi(), JSArray);
+ const char* kind = NULL;
+ switch (Smi::cast(iterator->kind())->value()) {
+ case IteratorType::kKindKeys:
+ kind = "keys";
+ break;
+ case IteratorType::kKindValues:
+ kind = "values";
+ break;
+ case IteratorType::kKindEntries:
+ kind = "entries";
+ break;
+ default:
+ RUNTIME_ASSERT_HANDLIFIED(false, JSArray);
+ }
+
+ Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
+ Handle<String> has_more =
+ factory->NewStringFromAsciiChecked("[[IteratorHasMore]]");
+ result->set(0, *has_more);
+ result->set(1, isolate->heap()->ToBoolean(iterator->HasMore()));
+
+ Handle<String> index =
+ factory->NewStringFromAsciiChecked("[[IteratorIndex]]");
+ result->set(2, *index);
+ result->set(3, iterator->index());
+
+ Handle<String> iterator_kind =
+ factory->NewStringFromAsciiChecked("[[IteratorKind]]");
+ result->set(4, *iterator_kind);
+ Handle<String> kind_str = factory->NewStringFromAsciiChecked(kind);
+ result->set(5, *kind_str);
+ return factory->NewJSArrayWithElements(result);
+}
+
+
+MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
+ Handle<Object> object) {
+ Factory* factory = isolate->factory();
+ if (object->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object);
+ if (function->shared()->bound()) {
+ RUNTIME_ASSERT_HANDLIFIED(function->function_bindings()->IsFixedArray(),
+ JSArray);
+
+ Handle<FixedArray> bindings(function->function_bindings());
+
+ Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
+ Handle<String> target =
+ factory->NewStringFromAsciiChecked("[[TargetFunction]]");
+ result->set(0, *target);
+ result->set(1, bindings->get(JSFunction::kBoundFunctionIndex));
+
+ Handle<String> bound_this =
+ factory->NewStringFromAsciiChecked("[[BoundThis]]");
+ result->set(2, *bound_this);
+ result->set(3, bindings->get(JSFunction::kBoundThisIndex));
+
+ Handle<FixedArray> arguments = factory->NewFixedArray(
+ bindings->length() - JSFunction::kBoundArgumentsStartIndex);
+ bindings->CopyTo(
+ JSFunction::kBoundArgumentsStartIndex, *arguments, 0,
+ bindings->length() - JSFunction::kBoundArgumentsStartIndex);
+ Handle<String> bound_args =
+ factory->NewStringFromAsciiChecked("[[BoundArgs]]");
+ result->set(4, *bound_args);
+ Handle<JSArray> arguments_array =
+ factory->NewJSArrayWithElements(arguments);
+ result->set(5, *arguments_array);
+ return factory->NewJSArrayWithElements(result);
+ }
+ } else if (object->IsJSMapIterator()) {
+ Handle<JSMapIterator> iterator = Handle<JSMapIterator>::cast(object);
+ return GetIteratorInternalProperties(isolate, iterator);
+ } else if (object->IsJSSetIterator()) {
+ Handle<JSSetIterator> iterator = Handle<JSSetIterator>::cast(object);
+ return GetIteratorInternalProperties(isolate, iterator);
+ } else if (object->IsJSGeneratorObject()) {
+ Handle<JSGeneratorObject> generator =
+ Handle<JSGeneratorObject>::cast(object);
+
+ const char* status = "suspended";
+ if (generator->is_closed()) {
+ status = "closed";
+ } else if (generator->is_executing()) {
+ status = "running";
+ } else {
+ DCHECK(generator->is_suspended());
+ }
+
+ Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
+ Handle<String> generator_status =
+ factory->NewStringFromAsciiChecked("[[GeneratorStatus]]");
+ result->set(0, *generator_status);
+ Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
+ result->set(1, *status_str);
+
+ Handle<String> function =
+ factory->NewStringFromAsciiChecked("[[GeneratorFunction]]");
+ result->set(2, *function);
+ result->set(3, generator->function());
+
+ Handle<String> receiver =
+ factory->NewStringFromAsciiChecked("[[GeneratorReceiver]]");
+ result->set(4, *receiver);
+ result->set(5, generator->receiver());
+ return factory->NewJSArrayWithElements(result);
+ } else if (Object::IsPromise(object)) {
+ Handle<JSObject> promise = Handle<JSObject>::cast(object);
+
+ Handle<Object> status_obj =
+ DebugGetProperty(promise, isolate->promise_status());
+ RUNTIME_ASSERT_HANDLIFIED(status_obj->IsSmi(), JSArray);
+ const char* status = "rejected";
+ int status_val = Handle<Smi>::cast(status_obj)->value();
+ switch (status_val) {
+ case +1:
+ status = "resolved";
+ break;
+ case 0:
+ status = "pending";
+ break;
+ default:
+ DCHECK_EQ(-1, status_val);
+ }
+
+ Handle<FixedArray> result = factory->NewFixedArray(2 * 2);
+ Handle<String> promise_status =
+ factory->NewStringFromAsciiChecked("[[PromiseStatus]]");
+ result->set(0, *promise_status);
+ Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
+ result->set(1, *status_str);
+
+ Handle<Object> value_obj =
+ DebugGetProperty(promise, isolate->promise_value());
+ Handle<String> promise_value =
+ factory->NewStringFromAsciiChecked("[[PromiseValue]]");
+ result->set(2, *promise_value);
+ result->set(3, *value_obj);
+ return factory->NewJSArrayWithElements(result);
+ } else if (object->IsJSValue()) {
+ Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+
+ Handle<FixedArray> result = factory->NewFixedArray(2);
+ Handle<String> primitive_value =
+ factory->NewStringFromAsciiChecked("[[PrimitiveValue]]");
+ result->set(0, *primitive_value);
+ result->set(1, js_value->value());
+ return factory->NewJSArrayWithElements(result);
+ }
+ return factory->NewJSArray(0);
+}
+
+
+RUNTIME_FUNCTION(Runtime_DebugGetInternalProperties) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
+ Handle<JSArray> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Runtime::GetInternalProperties(isolate, obj));
+ return *result;
+}
+
+
// Get debugger related details for an object property, in the following format:
// 0: Property value
// 1: Property details
@@ -130,6 +306,8 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
// Check if the name is trivially convertible to an index and get the element
// if so.
uint32_t index;
+ // TODO(verwaest): Make sure DebugGetProperty can handle arrays, and remove
+ // this special case.
if (name->AsArrayIndex(&index)) {
Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
Handle<Object> element_or_char;
@@ -246,9 +424,8 @@ RUNTIME_FUNCTION(Runtime_DebugIndexedInterceptorElementValue) {
RUNTIME_ASSERT(obj->HasIndexedInterceptor());
CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::GetElementWithInterceptor(obj, obj, index, true));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::GetElement(isolate, obj, index));
return *result;
}
@@ -280,8 +457,8 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
it.frame()->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
- // Omit functions from native scripts.
- if (!frames[i].function()->IsFromNativeScript()) n++;
+ // Omit functions from native and extension scripts.
+ if (frames[i].function()->IsSubjectToDebugging()) n++;
}
}
return Smi::FromInt(n);
@@ -299,7 +476,9 @@ class FrameInspector {
// Calculate the deoptimized frame.
if (frame->is_optimized()) {
// TODO(turbofan): Revisit once we support deoptimization.
- if (frame->LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+ if (frame->LookupCode()->is_turbofanned() &&
+ frame->function()->shared()->asm_function() &&
+ !FLAG_turbo_asm_deoptimization) {
is_optimized_ = false;
return;
}
@@ -331,7 +510,9 @@ class FrameInspector {
}
Object* GetExpression(int index) {
// TODO(turbofan): Revisit once we support deoptimization.
- if (frame_->LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+ if (frame_->LookupCode()->is_turbofanned() &&
+ frame_->function()->shared()->asm_function() &&
+ !FLAG_turbo_asm_deoptimization) {
return isolate_->heap()->undefined_value();
}
return is_optimized_ ? deoptimized_frame_->GetExpression(index)
@@ -402,8 +583,8 @@ int Runtime::FindIndexedNonNativeFrame(JavaScriptFrameIterator* it, int index) {
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
it->frame()->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
- // Omit functions from native scripts.
- if (frames[i].function()->IsFromNativeScript()) continue;
+ // Omit functions from native and extension scripts.
+ if (!frames[i].function()->IsSubjectToDebugging()) continue;
if (++count == index) return i;
}
}
@@ -500,11 +681,13 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
if (scope_info->LocalIsSynthetic(i)) continue;
Handle<String> name(scope_info->LocalName(i));
VariableMode mode;
+ VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
locals->set(local * 2, *name);
int context_slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
+ scope_info, name, &mode, &location, &init_flag, &maybe_assigned_flag);
+ DCHECK(VariableLocation::CONTEXT == location);
Object* value = context->get(context_slot_index);
locals->set(local * 2 + 1, value);
local++;
@@ -675,17 +858,58 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
static bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
Handle<String> parameter_name) {
VariableMode mode;
+ VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &init_flag,
- &maybe_assigned_flag) != -1;
+ return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &location,
+ &init_flag, &maybe_assigned_flag) != -1;
+}
+
+
+static Handle<Context> MaterializeReceiver(Isolate* isolate,
+ Handle<Context> target,
+ Handle<JSFunction> function,
+ JavaScriptFrame* frame) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+ Handle<Object> receiver;
+ switch (scope_info->scope_type()) {
+ case FUNCTION_SCOPE: {
+ VariableMode mode;
+ VariableLocation location;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+
+ // Don't bother creating a fake context node if "this" is in the context
+ // already.
+ if (ScopeInfo::ContextSlotIndex(
+ scope_info, isolate->factory()->this_string(), &mode, &location,
+ &init_flag, &maybe_assigned_flag) >= 0) {
+ return target;
+ }
+ receiver = handle(frame->receiver(), isolate);
+ break;
+ }
+ case MODULE_SCOPE:
+ receiver = isolate->factory()->undefined_value();
+ break;
+ case SCRIPT_SCOPE:
+ receiver = handle(function->global_proxy(), isolate);
+ break;
+ default:
+ // For eval code, arrow functions, and the like, there's no "this" binding
+ // to materialize.
+ return target;
+ }
+
+ return isolate->factory()->NewCatchContext(
+ function, target, isolate->factory()->this_string(), receiver);
}
// Create a plain JSObject which materializes the local scope for the specified
// frame.
-MUST_USE_RESULT
-static MaybeHandle<JSObject> MaterializeStackLocalsWithFrameInspector(
+static void MaterializeStackLocalsWithFrameInspector(
Isolate* isolate, Handle<JSObject> target, Handle<ScopeInfo> scope_info,
FrameInspector* frame_inspector) {
// First fill all parameters.
@@ -703,9 +927,7 @@ static MaybeHandle<JSObject> MaterializeStackLocalsWithFrameInspector(
isolate);
DCHECK(!value->IsTheHole());
- RETURN_ON_EXCEPTION(isolate, Runtime::SetObjectProperty(
- isolate, target, name, value, SLOPPY),
- JSObject);
+ JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
}
// Second fill all stack locals.
@@ -719,23 +941,18 @@ static MaybeHandle<JSObject> MaterializeStackLocalsWithFrameInspector(
value = isolate->factory()->undefined_value();
}
- RETURN_ON_EXCEPTION(isolate, Runtime::SetObjectProperty(
- isolate, target, name, value, SLOPPY),
- JSObject);
+ JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
}
-
- return target;
}
-MUST_USE_RESULT
-static MaybeHandle<JSObject> MaterializeStackLocalsWithFrameInspector(
+static void MaterializeStackLocalsWithFrameInspector(
Isolate* isolate, Handle<JSObject> target, Handle<JSFunction> function,
FrameInspector* frame_inspector) {
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
- return MaterializeStackLocalsWithFrameInspector(isolate, target, scope_info,
- frame_inspector);
+ MaterializeStackLocalsWithFrameInspector(isolate, target, scope_info,
+ frame_inspector);
}
@@ -788,10 +1005,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext(
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->declaration_context());
- if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info, function_context,
- target)) {
- return MaybeHandle<JSObject>();
- }
+ ScopeInfo::CopyContextLocalsToScopeObject(scope_info, function_context,
+ target);
// Finally copy any properties from the function context extension.
// These will be variables introduced by eval.
@@ -836,10 +1051,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeScriptScope(
Handle<Context> context =
ScriptContextTable::GetContext(script_contexts, context_index);
Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
- if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context,
- script_scope)) {
- return MaybeHandle<JSObject>();
- }
+ ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context,
+ script_scope);
}
return script_scope;
}
@@ -852,11 +1065,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalScope(
Handle<JSObject> local_scope =
isolate->factory()->NewJSObject(isolate->object_function());
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, local_scope,
- MaterializeStackLocalsWithFrameInspector(isolate, local_scope, function,
- &frame_inspector),
- JSObject);
+ MaterializeStackLocalsWithFrameInspector(isolate, local_scope, function,
+ &frame_inspector);
return MaterializeLocalContext(isolate, local_scope, function, frame);
}
@@ -871,10 +1081,12 @@ static bool SetContextLocalValue(Isolate* isolate, Handle<ScopeInfo> scope_info,
Handle<String> next_name(scope_info->ContextLocalName(i));
if (String::Equals(variable_name, next_name)) {
VariableMode mode;
+ VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- int context_index = ScopeInfo::ContextSlotIndex(
- scope_info, next_name, &mode, &init_flag, &maybe_assigned_flag);
+ int context_index =
+ ScopeInfo::ContextSlotIndex(scope_info, next_name, &mode, &location,
+ &init_flag, &maybe_assigned_flag);
context->set(context_index, *new_value);
return true;
}
@@ -976,8 +1188,8 @@ static bool SetBlockVariableValue(Isolate* isolate,
// Create a plain JSObject which materializes the closure content for the
// context.
-MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure(
- Isolate* isolate, Handle<Context> context) {
+static Handle<JSObject> MaterializeClosure(Isolate* isolate,
+ Handle<Context> context) {
DCHECK(context->IsFunctionContext());
Handle<SharedFunctionInfo> shared(context->closure()->shared());
@@ -989,31 +1201,24 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals to the context extension.
- if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context,
- closure_scope)) {
- return MaybeHandle<JSObject>();
- }
+ ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context, closure_scope);
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, keys, JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS),
- JSObject);
+ DCHECK(ext->IsJSContextExtensionObject());
+ Handle<FixedArray> keys =
+ JSReceiver::GetKeys(ext, JSReceiver::OWN_ONLY).ToHandleChecked();
for (int i = 0; i < keys->length(); i++) {
HandleScope scope(isolate);
// Names of variables introduced by eval are strings.
DCHECK(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, Object::GetPropertyOrElement(ext, key), JSObject);
- RETURN_ON_EXCEPTION(isolate, Runtime::DefineObjectProperty(
- closure_scope, key, value, NONE),
- JSObject);
+ Handle<Object> value = Object::GetProperty(ext, key).ToHandleChecked();
+ JSObject::SetOwnPropertyIgnoreAttributes(closure_scope, key, value, NONE)
+ .Check();
}
}
@@ -1040,12 +1245,13 @@ static bool SetClosureVariableValue(Isolate* isolate, Handle<Context> context,
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
+ DCHECK(ext->IsJSContextExtensionObject());
+ Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
DCHECK(maybe.IsJust());
if (maybe.FromJust()) {
// We don't expect this to do anything except replacing property value.
- Runtime::DefineObjectProperty(ext, variable_name, new_value, NONE)
- .Assert();
+ JSObject::SetOwnPropertyIgnoreAttributes(ext, variable_name, new_value,
+ NONE).Check();
return true;
}
}
@@ -1074,17 +1280,16 @@ static bool SetScriptVariableValue(Handle<Context> context,
// Create a plain JSObject which materializes the scope for the specified
// catch context.
-MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeCatchScope(
- Isolate* isolate, Handle<Context> context) {
+static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
+ Handle<Context> context) {
DCHECK(context->IsCatchContext());
Handle<String> name(String::cast(context->extension()));
Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
isolate);
Handle<JSObject> catch_scope =
isolate->factory()->NewJSObject(isolate->object_function());
- RETURN_ON_EXCEPTION(isolate, Runtime::DefineObjectProperty(
- catch_scope, name, thrown_object, NONE),
- JSObject);
+ JSObject::SetOwnPropertyIgnoreAttributes(catch_scope, name, thrown_object,
+ NONE).Check();
return catch_scope;
}
@@ -1104,28 +1309,26 @@ static bool SetCatchVariableValue(Isolate* isolate, Handle<Context> context,
// Create a plain JSObject which materializes the block scope for the specified
// block context.
-MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeBlockScope(
- Isolate* isolate, Handle<ScopeInfo> scope_info, Handle<Context> context,
- JavaScriptFrame* frame, int inlined_jsframe_index) {
+static Handle<JSObject> MaterializeBlockScope(Isolate* isolate,
+ Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ JavaScriptFrame* frame,
+ int inlined_jsframe_index) {
Handle<JSObject> block_scope =
isolate->factory()->NewJSObject(isolate->object_function());
if (frame != nullptr) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- RETURN_ON_EXCEPTION(isolate,
- MaterializeStackLocalsWithFrameInspector(
- isolate, block_scope, scope_info, &frame_inspector),
- JSObject);
+ MaterializeStackLocalsWithFrameInspector(isolate, block_scope, scope_info,
+ &frame_inspector);
}
if (!context.is_null()) {
Handle<ScopeInfo> scope_info_from_context(
ScopeInfo::cast(context->extension()));
// Fill all context locals.
- if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info_from_context,
- context, block_scope)) {
- return MaybeHandle<JSObject>();
- }
+ ScopeInfo::CopyContextLocalsToScopeObject(scope_info_from_context, context,
+ block_scope);
}
return block_scope;
@@ -1145,10 +1348,7 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeModuleScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context,
- module_scope)) {
- return MaybeHandle<JSObject>();
- }
+ ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context, module_scope);
return module_scope;
}
@@ -1298,16 +1498,21 @@ class ScopeIterator {
context_ = Handle<Context>();
return;
}
- if (scope_type == ScopeTypeScript) seen_script_scope_ = true;
- if (nested_scope_chain_.is_empty()) {
- if (scope_type == ScopeTypeScript) {
- if (context_->IsScriptContext()) {
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- CHECK(context_->IsNativeContext());
- } else {
+ if (scope_type == ScopeTypeScript) {
+ seen_script_scope_ = true;
+ if (context_->IsScriptContext()) {
context_ = Handle<Context>(context_->previous(), isolate_);
}
+ if (!nested_scope_chain_.is_empty()) {
+ DCHECK_EQ(nested_scope_chain_.last()->scope_type(), SCRIPT_SCOPE);
+ nested_scope_chain_.RemoveLast();
+ DCHECK(nested_scope_chain_.is_empty());
+ }
+ CHECK(context_->IsNativeContext());
+ return;
+ }
+ if (nested_scope_chain_.is_empty()) {
+ context_ = Handle<Context>(context_->previous(), isolate_);
} else {
if (nested_scope_chain_.last()->HasContext()) {
DCHECK(context_->previous() != NULL);
@@ -1660,9 +1865,9 @@ RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
if (location.IsStepInLocation()) {
Smi* position_value = Smi::FromInt(location.position());
RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetElement(
- array, index, Handle<Object>(position_value, isolate),
- NONE, SLOPPY));
+ isolate,
+ JSObject::SetElement(array, index, handle(position_value, isolate),
+ SLOPPY));
index++;
}
}
@@ -1984,7 +2189,7 @@ static bool IsPositionAlignmentCodeCorrect(int alignment) {
RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
-
+ RUNTIME_ASSERT(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[1]);
@@ -2012,6 +2217,7 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
+ RUNTIME_ASSERT(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= function->shared()->start_position() &&
@@ -2036,6 +2242,7 @@ RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
+ RUNTIME_ASSERT(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= 0);
@@ -2067,6 +2274,7 @@ RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
RUNTIME_FUNCTION(Runtime_ClearBreakPoint) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
+ RUNTIME_ASSERT(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 0);
// Clear break point.
@@ -2164,6 +2372,7 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
RUNTIME_FUNCTION(Runtime_ClearStepping) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
+ RUNTIME_ASSERT(isolate->debug()->is_active());
isolate->debug()->ClearStepping();
return isolate->heap()->undefined_value();
}
@@ -2171,24 +2380,23 @@ RUNTIME_FUNCTION(Runtime_ClearStepping) {
// Helper function to find or create the arguments object for
// Runtime_DebugEvaluate.
-MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeArgumentsObject(
- Isolate* isolate, Handle<JSObject> target, Handle<JSFunction> function) {
+static void MaterializeArgumentsObject(Isolate* isolate,
+ Handle<JSObject> target,
+ Handle<JSFunction> function) {
// Do not materialize the arguments object for eval or top-level code.
// Skip if "arguments" is already taken.
- if (!function->shared()->is_function()) return target;
+ if (!function->shared()->is_function()) return;
Maybe<bool> maybe = JSReceiver::HasOwnProperty(
target, isolate->factory()->arguments_string());
- if (!maybe.IsJust()) return MaybeHandle<JSObject>();
- if (maybe.FromJust()) return target;
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) return;
// FunctionGetArguments can't throw an exception.
Handle<JSObject> arguments =
Handle<JSObject>::cast(Accessors::FunctionGetArguments(function));
Handle<String> arguments_str = isolate->factory()->arguments_string();
- RETURN_ON_EXCEPTION(isolate, Runtime::DefineObjectProperty(
- target, arguments_str, arguments, NONE),
- JSObject);
- return target;
+ JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
+ NONE).Check();
}
@@ -2225,8 +2433,6 @@ static MaybeHandle<Object> DebugEvaluate(Isolate* isolate,
result = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
}
- // Clear the oneshot breakpoints so that the debugger does not step further.
- isolate->debug()->ClearStepping();
return result;
}
@@ -2282,21 +2488,22 @@ class EvaluationContextBuilder {
ScopeIterator::ScopeType scope_type = it.Type();
if (scope_type == ScopeIterator::ScopeTypeLocal) {
+ Handle<Context> parent_context =
+ it.HasContext() ? it.CurrentContext() : outer_context;
+
+ // The "this" binding, if any, can't be bound via "with". If we need
+ // to, add another node onto the outer context to bind "this".
+ parent_context =
+ MaterializeReceiver(isolate, parent_context, function, frame);
+
Handle<JSObject> materialized_function =
NewJSObjectWithNullProto(isolate);
- if (!MaterializeStackLocalsWithFrameInspector(
- isolate, materialized_function, function, &frame_inspector)
- .ToHandle(&materialized_function))
- return;
+ MaterializeStackLocalsWithFrameInspector(isolate, materialized_function,
+ function, &frame_inspector);
- if (!MaterializeArgumentsObject(isolate, materialized_function,
- function)
- .ToHandle(&materialized_function))
- return;
+ MaterializeArgumentsObject(isolate, materialized_function, function);
- Handle<Context> parent_context =
- it.HasContext() ? it.CurrentContext() : outer_context;
Handle<Context> with_context = isolate->factory()->NewWithContext(
function, parent_context, materialized_function);
@@ -2323,10 +2530,9 @@ class EvaluationContextBuilder {
} else if (scope_type == ScopeIterator::ScopeTypeBlock) {
Handle<JSObject> materialized_object =
NewJSObjectWithNullProto(isolate);
- if (!MaterializeStackLocalsWithFrameInspector(
- isolate, materialized_object, it.CurrentScopeInfo(),
- &frame_inspector).ToHandle(&materialized_object))
- return;
+ MaterializeStackLocalsWithFrameInspector(isolate, materialized_object,
+ it.CurrentScopeInfo(),
+ &frame_inspector);
if (it.HasContext()) {
Handle<Context> cloned_context =
Handle<Context>::cast(FixedArray::CopySize(
@@ -2471,6 +2677,14 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
}
+static inline bool IsDebugContext(Isolate* isolate, Context* context) {
+ // Try to unwrap script context if it exist.
+ if (context->IsScriptContext()) context = context->previous();
+ DCHECK_NOT_NULL(context);
+ return context == *isolate->debug()->debug_context();
+}
+
+
RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
HandleScope scope(isolate);
@@ -2490,7 +2704,7 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
// Enter the top context from before the debugger was invoked.
SaveContext save(isolate);
SaveContext* top = &save;
- while (top != NULL && *top->context() == *isolate->debug()->debug_context()) {
+ while (top != NULL && IsDebugContext(isolate, *top->context())) {
top = top->prev();
}
if (top != NULL) {
@@ -2513,10 +2727,15 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
+ RUNTIME_ASSERT(isolate->debug()->is_active());
Handle<FixedArray> instances;
{
DebugScope debug_scope(isolate->debug());
+ if (debug_scope.failed()) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->heap()->exception();
+ }
// Fill the script objects.
instances = isolate->debug()->GetLoadedScripts();
}
@@ -2776,6 +2995,7 @@ RUNTIME_FUNCTION(Runtime_GetFunctionCodePositionFromSource) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
DCHECK(args.length() == 2);
+ RUNTIME_ASSERT(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
@@ -2816,6 +3036,11 @@ RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
DebugScope debug_scope(isolate->debug());
+ if (debug_scope.failed()) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->heap()->exception();
+ }
+
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -2828,7 +3053,15 @@ RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
RUNTIME_FUNCTION(Runtime_GetDebugContext) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
- Handle<Context> context = isolate->debug()->GetDebugContext();
+ Handle<Context> context;
+ {
+ DebugScope debug_scope(isolate->debug());
+ if (debug_scope.failed()) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->heap()->exception();
+ }
+ context = isolate->debug()->GetDebugContext();
+ }
if (context.is_null()) return isolate->heap()->undefined_value();
context->set_security_token(isolate->native_context()->security_token());
return context->global_proxy();
@@ -2913,6 +3146,8 @@ RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
// built-in function such as Array.forEach to enable stepping into the callback.
RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
DCHECK(args.length() == 1);
+ RUNTIME_ASSERT(isolate->debug()->is_active());
+
Debug* debug = isolate->debug();
if (!debug->IsStepping()) return isolate->heap()->undefined_value();
@@ -2977,9 +3212,20 @@ RUNTIME_FUNCTION(Runtime_DebugIsActive) {
}
+RUNTIME_FUNCTION(Runtime_DebugHandleStepIntoAccessor) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Debug* debug = isolate->debug();
+ // Handle stepping into constructors if step into is active.
+ if (debug->StepInActive()) debug->HandleStepIn(function, false);
+ return *isolate->factory()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
UNIMPLEMENTED();
return NULL;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
new file mode 100644
index 0000000000..c793e88b92
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -0,0 +1,75 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/arguments.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_ForInDone) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_SMI_ARG_CHECKED(index, 0);
+ CONVERT_SMI_ARG_CHECKED(length, 1);
+ DCHECK_LE(0, index);
+ DCHECK_LE(index, length);
+ return isolate->heap()->ToBoolean(index == length);
+}
+
+
+RUNTIME_FUNCTION(Runtime_ForInFilter) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ // TODO(turbofan): Fast case for array indices.
+ Handle<Name> name;
+ if (!Runtime::ToName(isolate, key).ToHandle(&name)) {
+ return isolate->heap()->exception();
+ }
+ Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
+ if (!result.IsJust()) return isolate->heap()->exception();
+ if (result.FromJust()) return *name;
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_ForInNext) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, cache_array, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 2);
+ CONVERT_SMI_ARG_CHECKED(index, 3);
+ Handle<Object> key = handle(cache_array->get(index), isolate);
+ // Don't need filtering if expected map still matches that of the receiver,
+ // and neither for proxies.
+ if (receiver->map() == *cache_type || *cache_type == Smi::FromInt(0)) {
+ return *key;
+ }
+ // TODO(turbofan): Fast case for array indices.
+ Handle<Name> name;
+ if (!Runtime::ToName(isolate, key).ToHandle(&name)) {
+ return isolate->heap()->exception();
+ }
+ Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
+ if (!result.IsJust()) return isolate->heap()->exception();
+ if (result.FromJust()) return *name;
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_ForInStep) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(index, 0);
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, Smi::kMaxValue);
+ return Smi::FromInt(index + 1);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 0bcbf9c53c..749e16b9ed 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -10,6 +10,7 @@
#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/frames.h"
+#include "src/messages.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -231,7 +232,6 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
target_shared->set_feedback_vector(source_shared->feedback_vector());
target_shared->set_internal_formal_parameter_count(
source_shared->internal_formal_parameter_count());
- target_shared->set_script(source_shared->script());
target_shared->set_start_position_and_type(
source_shared->start_position_and_type());
target_shared->set_end_position(source_shared->end_position());
@@ -241,6 +241,8 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
source_shared->opt_count_and_bailout_reason());
target_shared->set_native(was_native);
target_shared->set_profiler_ticks(source_shared->profiler_ticks());
+ SharedFunctionInfo::SetScript(
+ target_shared, Handle<Object>(source_shared->script(), isolate));
// Set the code of the target function.
target->ReplaceCode(source_shared->code());
@@ -310,14 +312,14 @@ RUNTIME_FUNCTION(Runtime_IsConstructor) {
}
-RUNTIME_FUNCTION(Runtime_SetInlineBuiltinFlag) {
+RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(*object);
- func->shared()->set_inline_builtin(true);
+ func->shared()->set_force_inline(true);
}
return isolate->heap()->undefined_value();
}
@@ -336,22 +338,36 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(Isolate* isolate,
frame->GetFunctions(&functions);
if (functions.length() > 1) {
int inlined_jsframe_index = functions.length() - 1;
- JSFunction* inlined_function = functions[inlined_jsframe_index];
- SlotRefValueBuilder slot_refs(
- frame, inlined_jsframe_index,
- inlined_function->shared()->internal_formal_parameter_count());
+ TranslatedState translated_values(frame);
+ translated_values.Prepare(false, frame->fp());
- int args_count = slot_refs.args_length();
+ int argument_count = 0;
+ TranslatedFrame* translated_frame =
+ translated_values.GetArgumentsInfoFromJSFrameIndex(
+ inlined_jsframe_index, &argument_count);
+ TranslatedFrame::iterator iter = translated_frame->begin();
- *total_argc = prefix_argc + args_count;
+ // Skip the function.
+ iter++;
+
+ // Skip the receiver.
+ iter++;
+ argument_count--;
+
+ *total_argc = prefix_argc + argument_count;
SmartArrayPointer<Handle<Object> > param_data(
NewArray<Handle<Object> >(*total_argc));
- slot_refs.Prepare(isolate);
- for (int i = 0; i < args_count; i++) {
- Handle<Object> val = slot_refs.GetNext(isolate, 0);
- param_data[prefix_argc + i] = val;
+ bool should_deoptimize = false;
+ for (int i = 0; i < argument_count; i++) {
+ should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
+ Handle<Object> value = iter->GetValue();
+ param_data[prefix_argc + i] = value;
+ iter++;
+ }
+
+ if (should_deoptimize) {
+ translated_values.StoreMaterializedValuesAndDeopt();
}
- slot_refs.Finish(isolate);
return param_data;
} else {
@@ -381,6 +397,7 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
// TODO(lrn): Create bound function in C++ code from premade shared info.
bound_function->shared()->set_bound(true);
+ bound_function->shared()->set_optimized_code_map(Smi::FromInt(0));
bound_function->shared()->set_inferred_name(isolate->heap()->empty_string());
// Get all arguments of calling function (Function.prototype.bind).
int argc = 0;
@@ -430,8 +447,11 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
isolate->native_context()->bound_function_map());
JSObject::MigrateToMap(bound_function, bound_function_map);
Handle<String> length_string = isolate->factory()->length_string();
+ // These attributes must be kept in sync with how the bootstrapper
+ // configures the bound_function_map retrieved above.
+ // We use ...IgnoreAttributes() here because of length's read-onliness.
PropertyAttributes attr =
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSObject::SetOwnPropertyIgnoreAttributes(
bound_function, length_string, new_length, attr));
@@ -584,6 +604,16 @@ RUNTIME_FUNCTION(Runtime_GetConstructorDelegate) {
}
+RUNTIME_FUNCTION(Runtime_GetOriginalConstructor) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 0);
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ return frame->IsConstructor() ? frame->GetOriginalConstructor()
+ : isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(Runtime_CallFunction) {
SealHandleScope shs(isolate);
return __RT_impl_Runtime_Call(args, isolate);
@@ -605,5 +635,13 @@ RUNTIME_FUNCTION(Runtime_IsFunction) {
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSFunction());
}
+
+
+RUNTIME_FUNCTION(Runtime_ThrowStrongModeTooFewArguments) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError(MessageTemplate::kStrongArity));
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index d8b084431b..ed86c4dd74 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -111,7 +111,7 @@ RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
int offset = generator_object->continuation();
DCHECK(offset > 0);
frame->set_pc(pc + offset);
- if (FLAG_enable_ool_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool) {
frame->set_constant_pool(
generator_object->function()->code()->constant_pool());
}
@@ -226,5 +226,5 @@ RUNTIME_FUNCTION(Runtime_GeneratorThrow) {
UNREACHABLE(); // Optimization disabled in SetUpGenerators().
return NULL;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index 346e773f86..3da71a98ef 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -9,6 +9,7 @@
#include "src/api-natives.h"
#include "src/arguments.h"
#include "src/i18n.h"
+#include "src/messages.h"
#include "src/runtime/runtime-utils.h"
#include "unicode/brkiter.h"
@@ -234,7 +235,7 @@ RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
Handle<JSObject> obj = Handle<JSObject>::cast(input);
Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
- Handle<Object> tag = JSObject::GetDataProperty(obj, marker);
+ Handle<Object> tag = JSReceiver::GetDataProperty(obj, marker);
return isolate->heap()->ToBoolean(!tag->IsUndefined());
}
@@ -251,7 +252,7 @@ RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
Handle<JSObject> obj = Handle<JSObject>::cast(input);
Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
- Handle<Object> tag = JSObject::GetDataProperty(obj, marker);
+ Handle<Object> tag = JSReceiver::GetDataProperty(obj, marker);
return isolate->heap()->ToBoolean(tag->IsString() &&
String::cast(*tag)->Equals(*expected_type));
}
@@ -281,23 +282,21 @@ RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) {
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
if (!input->IsJSObject()) {
- Vector<Handle<Object> > arguments = HandleVector(&input, 1);
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewTypeError("not_intl_object", arguments));
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotIntlObject, input));
}
Handle<JSObject> obj = Handle<JSObject>::cast(input);
Handle<Symbol> marker = isolate->factory()->intl_impl_object_symbol();
- Handle<Object> impl = JSObject::GetDataProperty(obj, marker);
+ Handle<Object> impl = JSReceiver::GetDataProperty(obj, marker);
if (impl->IsTheHole()) {
- Vector<Handle<Object> > arguments = HandleVector(&obj, 1);
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewTypeError("not_intl_object", arguments));
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotIntlObject, obj));
}
return *impl;
}
@@ -746,7 +745,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
return *isolate->factory()->NewStringFromStaticChars("unknown");
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_I18N_SUPPORT
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 1c32045460..290d7af2fa 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -60,6 +60,39 @@ RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
}
+RUNTIME_FUNCTION(Runtime_NewTypeError) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_INT32_ARG_CHECKED(template_index, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
+ auto message_template =
+ static_cast<MessageTemplate::Template>(template_index);
+ return *isolate->factory()->NewTypeError(message_template, arg0);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewReferenceError) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_INT32_ARG_CHECKED(template_index, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
+ auto message_template =
+ static_cast<MessageTemplate::Template>(template_index);
+ return *isolate->factory()->NewReferenceError(message_template, arg0);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_INT32_ARG_CHECKED(template_index, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
+ auto message_template =
+ static_cast<MessageTemplate::Template>(template_index);
+ return *isolate->factory()->NewSyntaxError(message_template, arg0);
+}
+
+
RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -70,6 +103,14 @@ RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
}
+RUNTIME_FUNCTION(Runtime_ThrowStrongModeImplicitConversion) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kStrongImplicitConversion));
+}
+
+
RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
DCHECK(args.length() == 3);
HandleScope scope(isolate);
@@ -79,7 +120,7 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
if (debug_event) isolate->debug()->OnPromiseReject(promise, value);
Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
// Do not report if we actually have a handler.
- if (JSObject::GetDataProperty(promise, key)->IsUndefined()) {
+ if (JSReceiver::GetDataProperty(promise, key)->IsUndefined()) {
isolate->ReportPromiseReject(promise, value,
v8::kPromiseRejectWithNoHandler);
}
@@ -93,7 +134,7 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
// At this point, no revocation has been issued before
- RUNTIME_ASSERT(JSObject::GetDataProperty(promise, key)->IsUndefined());
+ RUNTIME_ASSERT(JSReceiver::GetDataProperty(promise, key)->IsUndefined());
isolate->ReportPromiseReject(promise, Handle<Object>(),
v8::kPromiseHandlerAddedAfterReject);
return isolate->heap()->undefined_value();
@@ -408,5 +449,23 @@ RUNTIME_FUNCTION(Runtime_HarmonyToString) {
// TODO(caitp): Delete this runtime method when removing --harmony-tostring
return isolate->heap()->ToBoolean(FLAG_harmony_tostring);
}
+
+
+RUNTIME_FUNCTION(Runtime_GetTypeFeedbackVector) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ return function->shared()->feedback_vector();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetCallerJSFunction) {
+ SealHandleScope shs(isolate);
+ StackFrameIterator it(isolate);
+ RUNTIME_ASSERT(it.frame()->type() == StackFrame::STUB);
+ it.Advance();
+ RUNTIME_ASSERT(it.frame()->type() == StackFrame::JAVA_SCRIPT);
+ return JavaScriptFrame::cast(it.frame())->function();
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-json.cc b/deps/v8/src/runtime/runtime-json.cc
index 647f48b03e..68f76c56a8 100644
--- a/deps/v8/src/runtime/runtime-json.cc
+++ b/deps/v8/src/runtime/runtime-json.cc
@@ -49,5 +49,5 @@ RUNTIME_FUNCTION(Runtime_ParseJson) {
: JsonParser<false>::Parse(source));
return *result;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 76226d68f5..80af962e3c 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -16,7 +16,7 @@ namespace internal {
static Handle<Map> ComputeObjectLiteralMap(
Handle<Context> context, Handle<FixedArray> constant_properties,
- bool* is_result_from_cache) {
+ bool is_strong, bool* is_result_from_cache) {
int properties_length = constant_properties->length();
int number_of_properties = properties_length / 2;
@@ -30,18 +30,18 @@ static Handle<Map> ComputeObjectLiteralMap(
}
Isolate* isolate = context->GetIsolate();
return isolate->factory()->ObjectLiteralMapFromCache(
- context, number_of_properties, is_result_from_cache);
+ context, number_of_properties, is_strong, is_result_from_cache);
}
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
Isolate* isolate, Handle<FixedArray> literals,
- Handle<FixedArray> constant_properties);
+ Handle<FixedArray> constant_properties, bool is_strong);
MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
Isolate* isolate, Handle<FixedArray> literals,
Handle<FixedArray> constant_properties, bool should_have_fast_elements,
- bool has_function_literal) {
+ bool has_function_literal, bool is_strong) {
Handle<Context> context = isolate->native_context();
// In case we have function literals, we want the object to be in
@@ -50,9 +50,11 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
// not the same (which is the common case).
bool is_result_from_cache = false;
Handle<Map> map = has_function_literal
- ? Handle<Map>(context->object_function()->initial_map())
- : ComputeObjectLiteralMap(context, constant_properties,
- &is_result_from_cache);
+ ? Handle<Map>(is_strong
+ ? context->js_object_strong_map()
+ : context->object_function()->initial_map())
+ : ComputeObjectLiteralMap(context, constant_properties, is_strong,
+ &is_result_from_cache);
PretenureFlag pretenure_flag =
isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
@@ -82,7 +84,8 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
// simple object or array literal.
Handle<FixedArray> array = Handle<FixedArray>::cast(value);
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, CreateLiteralBoilerplate(isolate, literals, array),
+ isolate, value,
+ CreateLiteralBoilerplate(isolate, literals, array, is_strong),
Object);
}
MaybeHandle<Object> maybe_result;
@@ -91,8 +94,8 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
- maybe_result =
- JSObject::SetOwnElement(boilerplate, element_index, value, SLOPPY);
+ maybe_result = JSObject::SetOwnElementIgnoreAttributes(
+ boilerplate, element_index, value, NONE);
} else {
Handle<String> name(String::cast(*key));
DCHECK(!name->AsArrayIndex(&element_index));
@@ -102,8 +105,8 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
- maybe_result =
- JSObject::SetOwnElement(boilerplate, element_index, value, SLOPPY);
+ maybe_result = JSObject::SetOwnElementIgnoreAttributes(
+ boilerplate, element_index, value, NONE);
} else {
// Non-uint32 number.
DCHECK(key->IsNumber());
@@ -137,7 +140,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
Isolate* isolate, Handle<FixedArray> literals,
- Handle<FixedArray> elements) {
+ Handle<FixedArray> elements, bool is_strong) {
// Create the JSArray.
Handle<JSFunction> constructor = isolate->array_function();
@@ -156,7 +159,9 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
DisallowHeapAllocation no_gc;
DCHECK(IsFastElementsKind(constant_elements_kind));
Context* native_context = isolate->context()->native_context();
- Object* maps_array = native_context->js_array_maps();
+ Object* maps_array = is_strong
+ ? native_context->js_array_strong_maps()
+ : native_context->js_array_maps();
DCHECK(!maps_array->IsUndefined());
Object* map = FixedArray::cast(maps_array)->get(constant_elements_kind);
object->set_map(Map::cast(map));
@@ -186,13 +191,15 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
isolate->factory()->CopyFixedArray(fixed_array_values);
copied_elements_values = fixed_array_values_copy;
for (int i = 0; i < fixed_array_values->length(); i++) {
+ HandleScope scope(isolate);
if (fixed_array_values->get(i)->IsFixedArray()) {
// The value contains the constant_properties of a
// simple object or array literal.
Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, CreateLiteralBoilerplate(isolate, literals, fa),
+ isolate, result,
+ CreateLiteralBoilerplate(isolate, literals, fa, is_strong),
Object);
fixed_array_values_copy->set(i, *result);
}
@@ -208,19 +215,20 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate, Handle<FixedArray> literals, Handle<FixedArray> array) {
+ Isolate* isolate, Handle<FixedArray> literals, Handle<FixedArray> array,
+ bool is_strong) {
Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
const bool kHasNoFunctionLiteral = false;
switch (CompileTimeValue::GetLiteralType(array)) {
case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
return CreateObjectLiteralBoilerplate(isolate, literals, elements, true,
- kHasNoFunctionLiteral);
+ kHasNoFunctionLiteral, is_strong);
case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
return CreateObjectLiteralBoilerplate(isolate, literals, elements, false,
- kHasNoFunctionLiteral);
+ kHasNoFunctionLiteral, is_strong);
case CompileTimeValue::ARRAY_LITERAL:
return Runtime::CreateArrayLiteralBoilerplate(isolate, literals,
- elements);
+ elements, is_strong);
default:
UNREACHABLE();
return MaybeHandle<Object>();
@@ -238,6 +246,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
+ bool is_strong = (flags & ObjectLiteral::kIsStrong) != 0;
RUNTIME_ASSERT(literals_index >= 0 && literals_index < literals->length());
@@ -251,7 +260,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
isolate, raw_boilerplate,
CreateObjectLiteralBoilerplate(isolate, literals, constant_properties,
should_have_fast_elements,
- has_function_literal));
+ has_function_literal, is_strong));
boilerplate = Handle<JSObject>::cast(raw_boilerplate);
AllocationSiteCreationContext creation_context(isolate);
@@ -281,7 +290,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
Isolate* isolate, Handle<FixedArray> literals, int literals_index,
- Handle<FixedArray> elements) {
+ Handle<FixedArray> elements, bool is_strong) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(literals->get(literals_index), isolate);
Handle<AllocationSite> site;
@@ -290,7 +299,8 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
Handle<Object> boilerplate;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, boilerplate,
- Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements),
+ Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements,
+ is_strong),
AllocationSite);
AllocationSiteCreationContext creation_context(isolate);
@@ -318,9 +328,11 @@ static MaybeHandle<JSObject> CreateArrayLiteralImpl(Isolate* isolate,
RUNTIME_ASSERT_HANDLIFIED(
literals_index >= 0 && literals_index < literals->length(), JSObject);
Handle<AllocationSite> site;
+ bool is_strong = (flags & ArrayLiteral::kIsStrong) != 0;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, site,
- GetLiteralAllocationSite(isolate, literals, literals_index, elements),
+ GetLiteralAllocationSite(isolate, literals, literals_index, elements,
+ is_strong),
JSObject);
bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0;
@@ -424,5 +436,5 @@ RUNTIME_FUNCTION(Runtime_StoreArrayLiteralElement) {
}
return *object;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index e4c644e168..555fb6a74b 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -290,5 +290,5 @@ RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) {
}
return heap->true_value();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 2941b580f5..474b463291 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -252,5 +252,5 @@ RUNTIME_FUNCTION(Runtime_IsMinusZero) {
HeapNumber* number = HeapNumber::cast(obj);
return isolate->heap()->ToBoolean(IsMinusZero(number->value()));
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 2bc3af1254..3be4cc0f9f 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -586,5 +586,5 @@ RUNTIME_FUNCTION(Runtime_GetRootNaN) {
DCHECK(args.length() == 0);
return isolate->heap()->nan_value();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 79950c6218..da1ec4977b 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -17,40 +17,25 @@ namespace internal {
// Returns a single character string where first character equals
// string->Get(index).
static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
- if (index < static_cast<uint32_t>(string->length())) {
- Factory* factory = string->GetIsolate()->factory();
- return factory->LookupSingleCharacterStringFromCode(
- String::Flatten(string)->Get(index));
- }
- return Execution::CharAt(string, index);
+ DCHECK_LT(index, static_cast<uint32_t>(string->length()));
+ Factory* factory = string->GetIsolate()->factory();
+ return factory->LookupSingleCharacterStringFromCode(
+ String::Flatten(string)->Get(index));
}
MaybeHandle<Object> Runtime::GetElementOrCharAt(Isolate* isolate,
Handle<Object> object,
- uint32_t index) {
+ uint32_t index,
+ LanguageMode language_mode) {
// Handle [] indexing on Strings
- if (object->IsString()) {
+ if (object->IsString() &&
+ index < static_cast<uint32_t>(String::cast(*object)->length())) {
Handle<Object> result = GetCharAt(Handle<String>::cast(object), index);
if (!result->IsUndefined()) return result;
}
- // Handle [] indexing on String objects
- if (object->IsStringObjectWithCharacterAt(index)) {
- Handle<JSValue> js_value = Handle<JSValue>::cast(object);
- Handle<Object> result =
- GetCharAt(Handle<String>(String::cast(js_value->value())), index);
- if (!result->IsUndefined()) return result;
- }
-
- Handle<Object> result;
- if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- PrototypeIterator iter(isolate, object);
- return Object::GetElement(isolate, PrototypeIterator::GetCurrent(iter),
- index);
- } else {
- return Object::GetElement(isolate, object, index);
- }
+ return Object::GetElement(isolate, object, index, language_mode);
}
@@ -68,18 +53,19 @@ MaybeHandle<Name> Runtime::ToName(Isolate* isolate, Handle<Object> key) {
MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
Handle<Object> object,
- Handle<Object> key) {
+ Handle<Object> key,
+ LanguageMode language_mode) {
if (object->IsUndefined() || object->IsNull()) {
- Handle<Object> args[2] = {key, object};
- THROW_NEW_ERROR(isolate, NewTypeError("non_object_property_load",
- HandleVector(args, 2)),
- Object);
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kNonObjectPropertyLoad, key, object),
+ Object);
}
// Check if the given key is an array index.
- uint32_t index;
+ uint32_t index = 0;
if (key->ToArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
+ return GetElementOrCharAt(isolate, object, index, language_mode);
}
// Convert the key to a name - possibly by calling back into JavaScript.
@@ -88,158 +74,140 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
// Check if the name is trivially convertible to an index and get
// the element if so.
+ // TODO(verwaest): Make sure GetProperty(LookupIterator*) can handle this, and
+ // remove the special casing here.
if (name->AsArrayIndex(&index)) {
return GetElementOrCharAt(isolate, object, index);
} else {
- return Object::GetProperty(object, name);
+ return Object::GetProperty(object, name, language_mode);
}
}
-MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- LanguageMode language_mode) {
- if (object->IsUndefined() || object->IsNull()) {
- Handle<Object> args[2] = {key, object};
- THROW_NEW_ERROR(isolate, NewTypeError("non_object_property_store",
- HandleVector(args, 2)),
- Object);
- }
-
- if (object->IsJSProxy()) {
- Handle<Object> name_object;
- if (key->IsSymbol()) {
- name_object = key;
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name_object,
- Execution::ToString(isolate, key), Object);
- }
- Handle<Name> name = Handle<Name>::cast(name_object);
- return Object::SetProperty(Handle<JSProxy>::cast(object), name, value,
- language_mode);
- }
-
- // Check if the given key is an array index.
- uint32_t index;
- if (key->ToArrayIndex(&index)) {
- // TODO(verwaest): Support non-JSObject receivers.
- if (!object->IsJSObject()) return value;
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
-
- // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
- // of a string using [] notation. We need to support this too in
- // JavaScript.
- // In the case of a String object we just need to redirect the assignment to
- // the underlying string if the index is in range. Since the underlying
- // string does nothing with the assignment then we can ignore such
- // assignments.
- if (js_object->IsStringObjectWithCharacterAt(index)) {
- return value;
- }
-
- JSObject::ValidateElements(js_object);
- if (js_object->HasExternalArrayElements() ||
- js_object->HasFixedTypedArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
- Execution::ToNumber(isolate, value), Object);
+MaybeHandle<Object> Runtime::KeyedGetObjectProperty(
+ Isolate* isolate, Handle<Object> receiver_obj, Handle<Object> key_obj,
+ LanguageMode language_mode) {
+ // Fast cases for getting named properties of the receiver JSObject
+ // itself.
+ //
+ // The global proxy objects has to be excluded since LookupOwn on
+ // the global proxy object can return a valid result even though the
+ // global proxy object never has properties. This is the case
+ // because the global proxy object forwards everything to its hidden
+ // prototype including own lookups.
+ //
+ // Additionally, we need to make sure that we do not cache results
+ // for objects that require access checks.
+ if (receiver_obj->IsJSObject()) {
+ if (!receiver_obj->IsJSGlobalProxy() &&
+ !receiver_obj->IsAccessCheckNeeded() && key_obj->IsName()) {
+ DisallowHeapAllocation no_allocation;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
+ Handle<Name> key = Handle<Name>::cast(key_obj);
+ if (receiver->IsGlobalObject()) {
+ // Attempt dictionary lookup.
+ GlobalDictionary* dictionary = receiver->global_dictionary();
+ int entry = dictionary->FindEntry(key);
+ if (entry != GlobalDictionary::kNotFound) {
+ DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
+ PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(entry));
+ if (cell->property_details().type() == DATA) {
+ Object* value = cell->value();
+ if (!value->IsTheHole()) return Handle<Object>(value, isolate);
+ // If value is the hole (meaning, absent) do the general lookup.
+ }
+ }
+ } else if (!receiver->HasFastProperties()) {
+ // Attempt dictionary lookup.
+ NameDictionary* dictionary = receiver->property_dictionary();
+ int entry = dictionary->FindEntry(key);
+ if ((entry != NameDictionary::kNotFound) &&
+ (dictionary->DetailsAt(entry).type() == DATA)) {
+ Object* value = dictionary->ValueAt(entry);
+ return Handle<Object>(value, isolate);
+ }
}
- }
-
- MaybeHandle<Object> result = JSObject::SetElement(
- js_object, index, value, NONE, language_mode, true, SET_PROPERTY);
- JSObject::ValidateElements(js_object);
-
- return result.is_null() ? result : value;
- }
-
- if (key->IsName()) {
- Handle<Name> name = Handle<Name>::cast(key);
- if (name->AsArrayIndex(&index)) {
- // TODO(verwaest): Support non-JSObject receivers.
- if (!object->IsJSObject()) return value;
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- if (js_object->HasExternalArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, Execution::ToNumber(isolate, value), Object);
+ } else if (key_obj->IsSmi()) {
+ // JSObject without a name key. If the key is a Smi, check for a
+ // definite out-of-bounds access to elements, which is a strong indicator
+ // that subsequent accesses will also call the runtime. Proactively
+ // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
+ // doubles for those future calls in the case that the elements would
+ // become FAST_DOUBLE_ELEMENTS.
+ Handle<JSObject> js_object = Handle<JSObject>::cast(receiver_obj);
+ ElementsKind elements_kind = js_object->GetElementsKind();
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ if (Smi::cast(*key_obj)->value() >= js_object->elements()->length()) {
+ elements_kind = IsFastHoleyElementsKind(elements_kind)
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ JSObject::TransitionElementsKind(js_object, elements_kind);
}
+ } else {
+ DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
+ !IsFastElementsKind(elements_kind));
}
- return JSObject::SetElement(js_object, index, value, NONE, language_mode,
- true, SET_PROPERTY);
- } else {
- if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
- return Object::SetProperty(object, name, value, language_mode);
+ }
+ } else if (receiver_obj->IsString() && key_obj->IsSmi()) {
+ // Fast case for string indexing using [] with a smi index.
+ Handle<String> str = Handle<String>::cast(receiver_obj);
+ int index = Handle<Smi>::cast(key_obj)->value();
+ if (index >= 0 && index < str->length()) {
+ return GetCharAt(str, index);
}
}
- // Call-back into JavaScript to convert the key to a string.
- Handle<Object> converted;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, converted,
- Execution::ToString(isolate, key), Object);
- Handle<String> name = Handle<String>::cast(converted);
-
- if (name->AsArrayIndex(&index)) {
- // TODO(verwaest): Support non-JSObject receivers.
- if (!object->IsJSObject()) return value;
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- return JSObject::SetElement(js_object, index, value, NONE, language_mode,
- true, SET_PROPERTY);
- }
- return Object::SetProperty(object, name, value, language_mode);
+ // Fall back to GetObjectProperty.
+ return GetObjectProperty(isolate, receiver_obj, key_obj, language_mode);
}
-MaybeHandle<Object> Runtime::DefineObjectProperty(Handle<JSObject> js_object,
+MaybeHandle<Object> Runtime::DeleteObjectProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attrs) {
- Isolate* isolate = js_object->GetIsolate();
+ LanguageMode language_mode) {
// Check if the given key is an array index.
- uint32_t index;
+ uint32_t index = 0;
if (key->ToArrayIndex(&index)) {
- // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
- // of a string using [] notation. We need to support this too in
- // JavaScript.
- // In the case of a String object we just need to redirect the assignment to
- // the underlying string if the index is in range. Since the underlying
- // string does nothing with the assignment then we can ignore such
- // assignments.
- if (js_object->IsStringObjectWithCharacterAt(index)) {
- return value;
- }
+ return JSReceiver::DeleteElement(receiver, index, language_mode);
+ }
- return JSObject::SetElement(js_object, index, value, attrs, SLOPPY, false,
- DEFINE_PROPERTY);
+ Handle<Name> name;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
+
+ return JSReceiver::DeletePropertyOrElement(receiver, name, language_mode);
+}
+
+
+MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ LanguageMode language_mode) {
+ if (object->IsUndefined() || object->IsNull()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kNonObjectPropertyStore, key, object),
+ Object);
}
- if (key->IsName()) {
- Handle<Name> name = Handle<Name>::cast(key);
- if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attrs, SLOPPY, false,
- DEFINE_PROPERTY);
- } else {
- if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
- return JSObject::SetOwnPropertyIgnoreAttributes(js_object, name, value,
- attrs);
- }
+ // Check if the given key is an array index.
+ uint32_t index = 0;
+ if (key->ToArrayIndex(&index)) {
+ // TODO(verwaest): Support other objects as well.
+ if (!object->IsJSReceiver()) return value;
+ return JSReceiver::SetElement(Handle<JSReceiver>::cast(object), index,
+ value, language_mode);
}
- // Call-back into JavaScript to convert the key to a string.
- Handle<Object> converted;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, converted,
- Execution::ToString(isolate, key), Object);
- Handle<String> name = Handle<String>::cast(converted);
+ Handle<Name> name;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
- if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attrs, SLOPPY, false,
- DEFINE_PROPERTY);
- } else {
- return JSObject::SetOwnPropertyIgnoreAttributes(js_object, name, value,
- attrs);
- }
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
+ // TODO(verwaest): Support other objects as well.
+ if (it.IsElement() && !object->IsJSReceiver()) return value;
+ return Object::SetProperty(&it, value, language_mode,
+ Object::MAY_BE_STORE_FROM_KEYED);
}
@@ -360,61 +328,35 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
Factory* factory = isolate->factory();
PropertyAttributes attrs;
- uint32_t index = 0;
- Handle<Object> value;
- MaybeHandle<AccessorPair> maybe_accessors;
- // TODO(verwaest): Unify once indexed properties can be handled by the
- // LookupIterator.
- if (name->AsArrayIndex(&index)) {
- // Get attributes.
- Maybe<PropertyAttributes> maybe =
- JSReceiver::GetOwnElementAttribute(obj, index);
- if (!maybe.IsJust()) return MaybeHandle<Object>();
- attrs = maybe.FromJust();
- if (attrs == ABSENT) return factory->undefined_value();
-
- // Get AccessorPair if present.
- maybe_accessors = JSObject::GetOwnElementAccessorPair(obj, index);
-
- // Get value if not an AccessorPair.
- if (maybe_accessors.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, Runtime::GetElementOrCharAt(isolate, obj, index),
- Object);
- }
- } else {
- // Get attributes.
- LookupIterator it(obj, name, LookupIterator::HIDDEN);
- Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
- if (!maybe.IsJust()) return MaybeHandle<Object>();
- attrs = maybe.FromJust();
- if (attrs == ABSENT) return factory->undefined_value();
-
- // Get AccessorPair if present.
- if (it.state() == LookupIterator::ACCESSOR &&
- it.GetAccessors()->IsAccessorPair()) {
- maybe_accessors = Handle<AccessorPair>::cast(it.GetAccessors());
- }
+ // Get attributes.
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, obj, name,
+ LookupIterator::HIDDEN);
+ Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
+
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
+ attrs = maybe.FromJust();
+ if (attrs == ABSENT) return factory->undefined_value();
- // Get value if not an AccessorPair.
- if (maybe_accessors.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::GetProperty(&it),
- Object);
- }
- }
DCHECK(!isolate->has_pending_exception());
Handle<FixedArray> elms = factory->NewFixedArray(DESCRIPTOR_SIZE);
elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
- elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(!maybe_accessors.is_null()));
- Handle<AccessorPair> accessors;
- if (maybe_accessors.ToHandle(&accessors)) {
+ bool is_accessor_pair = it.state() == LookupIterator::ACCESSOR &&
+ it.GetAccessors()->IsAccessorPair();
+ elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(is_accessor_pair));
+
+ if (is_accessor_pair) {
+ Handle<AccessorPair> accessors =
+ Handle<AccessorPair>::cast(it.GetAccessors());
Handle<Object> getter(accessors->GetComponent(ACCESSOR_GETTER), isolate);
Handle<Object> setter(accessors->GetComponent(ACCESSOR_SETTER), isolate);
elms->set(GETTER_INDEX, *getter);
elms->set(SETTER_INDEX, *setter);
} else {
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::GetProperty(&it),
+ Object);
elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
elms->set(VALUE_INDEX, *value);
}
@@ -457,43 +399,7 @@ RUNTIME_FUNCTION(Runtime_IsExtensible) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
- if (obj->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, obj);
- if (iter.IsAtEnd()) return isolate->heap()->false_value();
- DCHECK(iter.GetCurrent()->IsJSGlobalObject());
- obj = JSObject::cast(iter.GetCurrent());
- }
- return isolate->heap()->ToBoolean(obj->map()->is_extensible());
-}
-
-
-RUNTIME_FUNCTION(Runtime_DisableAccessChecks) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0);
- Handle<Map> old_map(object->map());
- bool needs_access_checks = old_map->is_access_check_needed();
- if (needs_access_checks) {
- // Copy map so it won't interfere constructor's initial map.
- Handle<Map> new_map = Map::Copy(old_map, "DisableAccessChecks");
- new_map->set_is_access_check_needed(false);
- JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
- }
- return isolate->heap()->ToBoolean(needs_access_checks);
-}
-
-
-RUNTIME_FUNCTION(Runtime_EnableAccessChecks) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- Handle<Map> old_map(object->map());
- RUNTIME_ASSERT(!old_map->is_access_check_needed());
- // Copy map so it won't interfere constructor's initial map.
- Handle<Map> new_map = Map::Copy(old_map, "EnableAccessChecks");
- new_map->set_is_access_check_needed(true);
- JSObject::MigrateToMap(object, new_map);
- return isolate->heap()->undefined_value();
+ return isolate->heap()->ToBoolean(obj->IsExtensible());
}
@@ -548,28 +454,27 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Runtime::GetObjectProperty(isolate, object, key));
+ isolate, result,
+ Runtime::GetObjectProperty(isolate, object, key, SLOPPY));
return *result;
}
-MUST_USE_RESULT static MaybeHandle<Object> TransitionElements(
- Handle<Object> object, ElementsKind to_kind, Isolate* isolate) {
+RUNTIME_FUNCTION(Runtime_GetPropertyStrong) {
HandleScope scope(isolate);
- if (!object->IsJSObject()) {
- isolate->ThrowIllegalOperation();
- return MaybeHandle<Object>();
- }
- ElementsKind from_kind =
- Handle<JSObject>::cast(object)->map()->elements_kind();
- if (Map::IsValidElementsTransition(from_kind, to_kind)) {
- JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
- return object;
- }
- isolate->ThrowIllegalOperation();
- return MaybeHandle<Object>();
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::GetObjectProperty(isolate, object, key, STRONG));
+ return *result;
}
@@ -581,76 +486,25 @@ RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
- // Fast cases for getting named properties of the receiver JSObject
- // itself.
- //
- // The global proxy objects has to be excluded since LookupOwn on
- // the global proxy object can return a valid result even though the
- // global proxy object never has properties. This is the case
- // because the global proxy object forwards everything to its hidden
- // prototype including own lookups.
- //
- // Additionally, we need to make sure that we do not cache results
- // for objects that require access checks.
- if (receiver_obj->IsJSObject()) {
- if (!receiver_obj->IsJSGlobalProxy() &&
- !receiver_obj->IsAccessCheckNeeded() && key_obj->IsName()) {
- DisallowHeapAllocation no_allocation;
- Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
- Handle<Name> key = Handle<Name>::cast(key_obj);
- if (!receiver->HasFastProperties()) {
- // Attempt dictionary lookup.
- NameDictionary* dictionary = receiver->property_dictionary();
- int entry = dictionary->FindEntry(key);
- if ((entry != NameDictionary::kNotFound) &&
- (dictionary->DetailsAt(entry).type() == DATA)) {
- Object* value = dictionary->ValueAt(entry);
- if (!receiver->IsGlobalObject()) return value;
- DCHECK(value->IsPropertyCell());
- value = PropertyCell::cast(value)->value();
- if (!value->IsTheHole()) return value;
- // If value is the hole (meaning, absent) do the general lookup.
- }
- }
- } else if (key_obj->IsSmi()) {
- // JSObject without a name key. If the key is a Smi, check for a
- // definite out-of-bounds access to elements, which is a strong indicator
- // that subsequent accesses will also call the runtime. Proactively
- // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
- // doubles for those future calls in the case that the elements would
- // become FAST_DOUBLE_ELEMENTS.
- Handle<JSObject> js_object = Handle<JSObject>::cast(receiver_obj);
- ElementsKind elements_kind = js_object->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
- Handle<Smi> key = Handle<Smi>::cast(key_obj);
- if (key->value() >= js_object->elements()->length()) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- elements_kind = FAST_ELEMENTS;
- }
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, TransitionElements(js_object, elements_kind, isolate));
- }
- } else {
- DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
- !IsFastElementsKind(elements_kind));
- }
- }
- } else if (receiver_obj->IsString() && key_obj->IsSmi()) {
- // Fast case for string indexing using [] with a smi index.
- Handle<String> str = Handle<String>::cast(receiver_obj);
- int index = args.smi_at(1);
- if (index >= 0 && index < str->length()) {
- return *GetCharAt(str, index);
- }
- }
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::KeyedGetObjectProperty(isolate, receiver_obj, key_obj, SLOPPY));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_KeyedGetPropertyStrong) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
- // Fall back to GetObjectProperty.
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::GetObjectProperty(isolate, receiver_obj, key_obj));
+ Runtime::KeyedGetObjectProperty(isolate, receiver_obj, key_obj, STRONG));
return *result;
}
@@ -660,14 +514,14 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
RUNTIME_ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
#ifdef DEBUG
uint32_t index = 0;
- DCHECK(!key->ToArrayIndex(&index));
- LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ DCHECK(!name->ToArrayIndex(&index));
+ LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
RUNTIME_ASSERT(!it.IsFound());
@@ -676,47 +530,77 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- JSObject::SetOwnPropertyIgnoreAttributes(object, key, value, attrs));
+ JSObject::SetOwnPropertyIgnoreAttributes(object, name, value, attrs));
return *result;
}
-RUNTIME_FUNCTION(Runtime_SetProperty) {
+// Adds an element to an array.
+// This is used to create an indexed data property into an array.
+RUNTIME_FUNCTION(Runtime_AddElement) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 4);
+ RUNTIME_ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode_arg, 3);
- LanguageMode language_mode = language_mode_arg;
+
+ uint32_t index = 0;
+ CHECK(key->ToArrayIndex(&index));
+
+#ifdef DEBUG
+ LookupIterator it(isolate, object, index,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ RUNTIME_ASSERT(!it.IsFound());
+
+ if (object->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ RUNTIME_ASSERT(!JSArray::WouldChangeReadOnlyLength(array, index));
+ }
+#endif
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
+ JSObject::SetOwnElementIgnoreAttributes(object, index, value, NONE));
return *result;
}
-// Adds an element to an array.
-// This is used to create an indexed data property into an array.
-RUNTIME_FUNCTION(Runtime_AddElement) {
+RUNTIME_FUNCTION(Runtime_AppendElement) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+
+ uint32_t index;
+ CHECK(array->length()->ToArrayIndex(&index));
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSObject::AddDataElement(array, index, value, NONE));
+ JSObject::ValidateElements(array);
+ return *array;
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetProperty) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
-
- uint32_t index = 0;
- key->ToArrayIndex(&index);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode_arg, 3);
+ LanguageMode language_mode = language_mode_arg;
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::SetElement(object, index, value, attrs, SLOPPY,
- false, DEFINE_PROPERTY));
+ isolate, result,
+ Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
return *result;
}
@@ -724,12 +608,13 @@ RUNTIME_FUNCTION(Runtime_AddElement) {
RUNTIME_FUNCTION(Runtime_DeleteProperty) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 2);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSReceiver::DeleteProperty(object, key, language_mode));
+ isolate, result,
+ Runtime::DeleteObjectProperty(isolate, receiver, key, language_mode));
return *result;
}
@@ -1187,9 +1072,7 @@ static Object* Runtime_NewObjectHelper(Isolate* isolate,
Debug* debug = isolate->debug();
// Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) {
- debug->HandleStepIn(function, Handle<Object>::null(), 0, true);
- }
+ if (debug->StepInActive()) debug->HandleStepIn(function, true);
if (function->has_initial_map()) {
if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
@@ -1381,36 +1264,22 @@ RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, obj_value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- LookupIterator it(js_object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.IsFound() && it.state() == LookupIterator::ACCESS_CHECK) {
- if (!isolate->MayAccess(js_object)) {
- return isolate->heap()->undefined_value();
- }
- it.Next();
- }
-
- // Take special care when attributes are different and there is already
- // a property.
- if (it.state() == LookupIterator::ACCESSOR) {
- // Use IgnoreAttributes version since a readonly property may be
- // overridden and SetProperty does not allow this.
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::SetOwnPropertyIgnoreAttributes(
- js_object, name, obj_value, attrs, JSObject::DONT_FORCE_FIELD));
- return *result;
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
+ LookupIterator::OWN);
+ if (it.state() == LookupIterator::ACCESS_CHECK && !it.HasAccess()) {
+ return isolate->heap()->undefined_value();
}
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::DefineObjectProperty(js_object, name, obj_value, attrs));
+ isolate, result, JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, value, attrs, JSObject::DONT_FORCE_FIELD));
+
return *result;
}
@@ -1419,9 +1288,9 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
RUNTIME_FUNCTION(Runtime_GetDataProperty) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- return *JSObject::GetDataProperty(object, key);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ return *JSReceiver::GetDataProperty(object, name);
}
@@ -1518,6 +1387,15 @@ RUNTIME_FUNCTION(Runtime_IsSpecObject) {
}
+RUNTIME_FUNCTION(Runtime_IsStrong) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsJSReceiver() &&
+ JSReceiver::cast(obj)->map()->is_strong());
+}
+
+
RUNTIME_FUNCTION(Runtime_ClassOf) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -1557,5 +1435,5 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
setter, attrs));
return isolate->heap()->undefined_value();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-observe.cc b/deps/v8/src/runtime/runtime-observe.cc
index a942645236..8fc201da0e 100644
--- a/deps/v8/src/runtime/runtime-observe.cc
+++ b/deps/v8/src/runtime/runtime-observe.cc
@@ -58,7 +58,7 @@ RUNTIME_FUNCTION(Runtime_DeliverObservationChangeRecords) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, argument, 1);
- v8::TryCatch catcher;
+ v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
// We should send a message on uncaught exception thrown during
// Object.observe delivery while not interrupting further delivery, thus
// we make a call inside a verbose TryCatch.
@@ -157,5 +157,5 @@ RUNTIME_FUNCTION(Runtime_GetObjectContextNotifierPerformChange) {
Handle<Context> context(object_info->GetCreationContext(), isolate);
return context->native_object_notifier_perform_change();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 703d72b062..0f175c0168 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -81,5 +81,5 @@ RUNTIME_FUNCTION(Runtime_Fix) {
JSProxy::Fix(proxy);
return isolate->heap()->undefined_value();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 478573a68d..efa91b8485 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -7,6 +7,7 @@
#include "src/arguments.h"
#include "src/jsregexp-inl.h"
#include "src/jsregexp.h"
+#include "src/messages.h"
#include "src/runtime/runtime-utils.h"
#include "src/string-builder.h"
#include "src/string-search.h"
@@ -637,7 +638,11 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
// fresly allocated page or on an already swept page. Hence, the sweeper
// thread can not get confused with the filler creation. No synchronization
// needed.
- heap->CreateFillerObjectAt(end_of_string, delta);
+ // TODO(hpayer): We should shrink the large object page if the size
+ // of the object changed significantly.
+ if (!heap->lo_space()->Contains(*answer)) {
+ heap->CreateFillerObjectAt(end_of_string, delta);
+ }
heap->AdjustLiveBytes(answer->address(), -delta, Heap::CONCURRENT_TO_SWEEPER);
return *answer;
}
@@ -912,11 +917,9 @@ RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
bool success = false;
JSRegExp::Flags flags = RegExpFlagsFromString(flags_string, &success);
if (!success) {
- Handle<FixedArray> element = factory->NewFixedArray(1);
- element->set(0, *flags_string);
- Handle<JSArray> args = factory->NewJSArrayWithElements(element);
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewSyntaxError("invalid_regexp_flags", args));
+ isolate,
+ NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string));
}
Handle<String> escaped_source;
@@ -1186,5 +1189,5 @@ RUNTIME_FUNCTION(Runtime_IsRegExp) {
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSRegExp());
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index cf885ec0d8..700925db62 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -17,17 +17,15 @@ namespace internal {
static Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name) {
HandleScope scope(isolate);
- Handle<Object> args[1] = {name};
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError("var_redeclaration", HandleVector(args, 1)));
+ isolate, NewTypeError(MessageTemplate::kVarRedeclaration, name));
}
RUNTIME_FUNCTION(Runtime_ThrowConstAssignError) {
HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError("const_assign", HandleVector<Object>(NULL, 0)));
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError(MessageTemplate::kConstAssign));
}
@@ -250,6 +248,12 @@ RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
JSGlobalObject::cast(context_arg->extension()), isolate);
return DeclareGlobals(isolate, global, name, value, attr, is_var, is_const,
is_function);
+ } else if (context->IsScriptContext()) {
+ DCHECK(context->global_object()->IsJSGlobalObject());
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(context->global_object()), isolate);
+ return DeclareGlobals(isolate, global, name, value, attr, is_var, is_const,
+ is_function);
}
if (attributes != ABSENT) {
@@ -325,8 +329,12 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
// meanwhile. If so, re-introduce the variable in the context extension.
if (attributes == ABSENT) {
Handle<Context> declaration_context(context_arg->declaration_context());
- DCHECK(declaration_context->has_extension());
- holder = handle(declaration_context->extension(), isolate);
+ if (declaration_context->IsScriptContext()) {
+ holder = handle(declaration_context->global_object(), isolate);
+ } else {
+ DCHECK(declaration_context->has_extension());
+ holder = handle(declaration_context->extension(), isolate);
+ }
CHECK(holder->IsJSObject());
} else {
// For JSContextExtensionObjects, the initializer can be run multiple times
@@ -375,11 +383,7 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
Handle<FixedArray> parameter_map =
isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
parameter_map->set_map(isolate->heap()->sloppy_arguments_elements_map());
-
- Handle<Map> map = Map::Copy(handle(result->map()), "NewSloppyArguments");
- map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
-
- result->set_map(*map);
+ result->set_map(isolate->native_context()->fast_aliased_arguments_map());
result->set_elements(*parameter_map);
// Store the context and the arguments array at the beginning of the
@@ -514,10 +518,9 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
}
-static Handle<JSArray> NewRestParam(Isolate* isolate,
- Object** parameters,
- int num_params,
- int rest_index) {
+static Handle<JSArray> NewRestParam(Isolate* isolate, Object** parameters,
+ int num_params, int rest_index,
+ LanguageMode language_mode) {
parameters -= rest_index;
int num_elements = std::max(0, num_params - rest_index);
Handle<FixedArray> elements =
@@ -525,26 +528,29 @@ static Handle<JSArray> NewRestParam(Isolate* isolate,
for (int i = 0; i < num_elements; ++i) {
elements->set(i, *--parameters);
}
- return isolate->factory()->NewJSArrayWithElements(elements, FAST_ELEMENTS,
- num_elements);
+ return isolate->factory()->NewJSArrayWithElements(
+ elements, FAST_ELEMENTS, num_elements, strength(language_mode));
}
RUNTIME_FUNCTION(Runtime_NewRestParam) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK(args.length() == 4);
Object** parameters = reinterpret_cast<Object**>(args[0]);
CONVERT_SMI_ARG_CHECKED(num_params, 1);
CONVERT_SMI_ARG_CHECKED(rest_index, 2);
+ CONVERT_SMI_ARG_CHECKED(language_mode, 3);
- return *NewRestParam(isolate, parameters, num_params, rest_index);
+ return *NewRestParam(isolate, parameters, num_params, rest_index,
+ static_cast<LanguageMode>(language_mode));
}
RUNTIME_FUNCTION(Runtime_NewRestParamSlow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(rest_index, 0);
+ CONVERT_SMI_ARG_CHECKED(language_mode, 1);
JavaScriptFrameIterator it(isolate);
@@ -555,7 +561,8 @@ RUNTIME_FUNCTION(Runtime_NewRestParamSlow) {
int argument_count = frame->GetArgumentsLength();
Object** parameters = reinterpret_cast<Object**>(frame->GetParameterSlot(-1));
- return *NewRestParam(isolate, parameters, argument_count, rest_index);
+ return *NewRestParam(isolate, parameters, argument_count, rest_index,
+ static_cast<LanguageMode>(language_mode));
}
@@ -625,16 +632,23 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
Handle<ScriptContextTable> script_context_table(
native_context->script_context_table());
- Handle<String> clashed_name;
Object* name_clash_result =
FindNameClash(scope_info, global_object, script_context_table);
if (isolate->has_pending_exception()) return name_clash_result;
+ // Script contexts have a canonical empty function as their closure, not the
+ // anonymous closure containing the global code. See
+ // FullCodeGenerator::PushFunctionArgumentForContextAllocation.
+ Handle<JSFunction> closure(global_object->IsJSBuiltinsObject()
+ ? *function
+ : native_context->closure());
Handle<Context> result =
- isolate->factory()->NewScriptContext(function, scope_info);
+ isolate->factory()->NewScriptContext(closure, scope_info);
+
+ result->InitializeGlobalSlots();
DCHECK(function->context() == isolate->context());
- DCHECK(function->context()->global_object() == result->global_object());
+ DCHECK(*global_object == result->global_object());
Handle<ScriptContextTable> new_script_context_table =
ScriptContextTable::Extend(script_context_table, result);
@@ -1005,8 +1019,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
} else if (is_strict(language_mode)) {
// Setting read only property in strict mode.
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError("strict_cannot_assign", HandleVector(&name, 1)));
+ isolate, NewTypeError(MessageTemplate::kStrictCannotAssign, name));
}
return *value;
}
@@ -1049,7 +1062,7 @@ RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
// Try to convert the key to an index. If successful and within
// index return the the argument from the frame.
- uint32_t index;
+ uint32_t index = 0;
if (raw_key->ToArrayIndex(&index) && index < n) {
return frame->GetParameter(index);
}
@@ -1097,8 +1110,7 @@ RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
JSFunction* function = frame->function();
if (is_strict(function->shared()->language_mode())) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError("strict_arguments_callee",
- HandleVector<Object>(NULL, 0)));
+ isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
}
return function;
}
@@ -1125,5 +1137,5 @@ RUNTIME_FUNCTION(Runtime_Arguments) {
SealHandleScope shs(isolate);
return __RT_impl_Runtime_GetArgumentsProperty(args, isolate);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 8bfde943dd..3b9cfbf969 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -121,11 +121,13 @@ RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
if (isolate->has_pending_exception()) return isolate->heap()->exception();
subject = String::Flatten(subject);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- StringReplaceOneCharWithString(isolate, subject, search, replace, &found,
- kRecursionLimit));
- return *result;
+ if (StringReplaceOneCharWithString(isolate, subject, search, replace, &found,
+ kRecursionLimit).ToHandle(&result)) {
+ return *result;
+ }
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ // In case of empty handle and no pending exception we have stack overflow.
+ return isolate->StackOverflow();
}
@@ -137,7 +139,7 @@ RUNTIME_FUNCTION(Runtime_StringIndexOf) {
CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
- uint32_t start_index;
+ uint32_t start_index = 0;
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
@@ -188,7 +190,7 @@ RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
- uint32_t start_index;
+ uint32_t start_index = 0;
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
uint32_t pat_length = pat->length();
@@ -1349,5 +1351,5 @@ RUNTIME_FUNCTION(Runtime_StringGetLength) {
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
return Smi::FromInt(s->length());
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 31b6bed22e..412ee0ae31 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -26,42 +26,26 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
- Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
- if (name->IsString()) symbol->set_name(*name);
- return *symbol;
-}
-
-
-RUNTIME_FUNCTION(Runtime_CreatePrivateOwnSymbol) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
- Handle<Symbol> symbol = isolate->factory()->NewPrivateOwnSymbol();
- if (name->IsString()) symbol->set_name(*name);
- return *symbol;
+ return *isolate->factory()->NewPrivateSymbol(name);
}
-RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateOwnSymbol) {
+RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateSymbol) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
Handle<JSObject> registry = isolate->GetSymbolRegistry();
Handle<String> part = isolate->factory()->private_intern_string();
Handle<Object> privates;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, privates, Object::GetPropertyOrElement(registry, part));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, privates,
+ Object::GetProperty(registry, part));
Handle<Object> symbol;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, symbol, Object::GetPropertyOrElement(privates, name));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, symbol,
+ Object::GetProperty(privates, name));
if (!symbol->IsSymbol()) {
DCHECK(symbol->IsUndefined());
- symbol = isolate->factory()->NewPrivateSymbol();
- Handle<Symbol>::cast(symbol)->set_name(*name);
- Handle<Symbol>::cast(symbol)->set_is_own(true);
- JSObject::SetProperty(Handle<JSObject>::cast(privates), name, symbol,
- STRICT).Assert();
+ symbol = isolate->factory()->NewPrivateSymbol(name);
+ JSObject::AddProperty(Handle<JSObject>::cast(privates), name, symbol, NONE);
}
return *symbol;
}
@@ -96,5 +80,5 @@ RUNTIME_FUNCTION(Runtime_SymbolIsPrivate) {
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
return isolate->heap()->ToBoolean(symbol->is_private());
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index f590f81a2d..1325eeb67b 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -20,7 +20,8 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
// TODO(turbofan): Deoptimization is not supported yet.
- if (function->code()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+ if (function->code()->is_turbofanned() &&
+ function->shared()->asm_function() && !FLAG_turbo_asm_deoptimization) {
return isolate->heap()->undefined_value();
}
@@ -50,7 +51,8 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
// TODO(turbofan): Deoptimization is not supported yet.
- if (function->code()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+ if (function->code()->is_turbofanned() &&
+ function->shared()->asm_function() && !FLAG_turbo_asm_deoptimization) {
return isolate->heap()->undefined_value();
}
@@ -87,7 +89,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
// JSFunction::MarkForOptimization().
RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
(function->code()->kind() == Code::FUNCTION &&
- function->code()->optimizable()));
+ !function->shared()->optimization_disabled()));
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
@@ -131,8 +133,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// The following assertion was lifted from the DCHECK inside
// JSFunction::MarkForOptimization().
RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
- (function->code()->kind() == Code::FUNCTION &&
- function->code()->optimizable()));
+ !function->shared()->optimization_disabled());
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
@@ -212,6 +213,21 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationCount) {
}
+RUNTIME_FUNCTION(Runtime_GetUndetectable) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+
+ Local<v8::ObjectTemplate> desc = v8::ObjectTemplate::New(v8_isolate);
+ desc->MarkAsUndetectable();
+ Local<v8::Object> obj;
+ if (!desc->NewInstance(v8_isolate->GetCurrentContext()).ToLocal(&obj)) {
+ return nullptr;
+ }
+ return *Utils::OpenHandle(*obj);
+}
+
+
RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -266,8 +282,9 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
// and print some interesting cpu debugging info.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- os << "fp = " << frame->fp() << ", sp = " << frame->sp()
- << ", caller_sp = " << frame->caller_sp() << ": ";
+ os << "fp = " << static_cast<void*>(frame->fp())
+ << ", sp = " << static_cast<void*>(frame->sp())
+ << ", caller_sp = " << static_cast<void*>(frame->caller_sp()) << ": ";
} else {
os << "DebugPrint: ";
}
@@ -360,7 +377,8 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
DCHECK(args.length() == 0);
- return Smi::FromInt(Natives::GetBuiltinsCount());
+ return Smi::FromInt(Natives::GetBuiltinsCount() +
+ ExtraNatives::GetBuiltinsCount());
}
@@ -488,5 +506,5 @@ TYPED_ARRAYS(TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
#undef FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index fed6795509..4d35524703 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -16,7 +16,7 @@ namespace internal {
void Runtime::SetupArrayBuffer(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
bool is_external, void* data,
- size_t allocated_length) {
+ size_t allocated_length, SharedFlag shared) {
DCHECK(array_buffer->GetInternalFieldCount() ==
v8::ArrayBuffer::kInternalFieldCount);
for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
@@ -25,7 +25,8 @@ void Runtime::SetupArrayBuffer(Isolate* isolate,
array_buffer->set_backing_store(data);
array_buffer->set_bit_field(0);
array_buffer->set_is_external(is_external);
- array_buffer->set_is_neuterable(true);
+ array_buffer->set_is_neuterable(shared == SharedFlag::kNotShared);
+ array_buffer->set_is_shared(shared == SharedFlag::kShared);
if (data && !is_external) {
isolate->heap()->RegisterNewArrayBuffer(
@@ -42,7 +43,8 @@ void Runtime::SetupArrayBuffer(Isolate* isolate,
bool Runtime::SetupArrayBufferAllocatingData(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
size_t allocated_length,
- bool initialize) {
+ bool initialize,
+ SharedFlag shared) {
void* data;
CHECK(isolate->array_buffer_allocator() != NULL);
// Prevent creating array buffers when serializing.
@@ -59,7 +61,8 @@ bool Runtime::SetupArrayBufferAllocatingData(Isolate* isolate,
data = NULL;
}
- SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length);
+ SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length,
+ shared);
return true;
}
@@ -71,9 +74,10 @@ void Runtime::NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer) {
RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, holder, 0);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(byteLength, 1);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_shared, 2);
if (!holder->byte_length()->IsUndefined()) {
// ArrayBuffer is already initialized; probably a fuzz test.
return *holder;
@@ -83,8 +87,9 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- if (!Runtime::SetupArrayBufferAllocatingData(isolate, holder,
- allocated_length)) {
+ if (!Runtime::SetupArrayBufferAllocatingData(
+ isolate, holder, allocated_length, true,
+ is_shared ? SharedFlag::kShared : SharedFlag::kNotShared)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
@@ -139,6 +144,8 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
CHECK(Smi::FromInt(0) == array_buffer->byte_length());
return isolate->heap()->undefined_value();
}
+ // Shared array buffers should never be neutered.
+ RUNTIME_ASSERT(!array_buffer->is_shared());
DCHECK(!array_buffer->is_external());
void* backing_store = array_buffer->backing_store();
size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
@@ -175,12 +182,13 @@ void Runtime::ArrayIdToTypeAndSize(int arrayId, ExternalArrayType* array_type,
RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
HandleScope scope(isolate);
- DCHECK(args.length() == 5);
+ DCHECK(args.length() == 6);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
CONVERT_SMI_ARG_CHECKED(arrayId, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset_object, 3);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length_object, 4);
+ CONVERT_BOOLEAN_ARG_CHECKED(initialize, 5);
RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
arrayId <= Runtime::ARRAY_ID_LAST);
@@ -242,11 +250,12 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
DCHECK(IsExternalArrayElementsKind(holder->map()->elements_kind()));
} else {
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- Runtime::SetupArrayBuffer(isolate, buffer, true, NULL, byte_length);
+ Runtime::SetupArrayBuffer(isolate, buffer, true, NULL, byte_length,
+ SharedFlag::kNotShared);
holder->set_buffer(*buffer);
Handle<FixedTypedArrayBase> elements =
isolate->factory()->NewFixedTypedArray(static_cast<int>(length),
- array_type);
+ array_type, initialize);
holder->set_elements(*elements);
}
return isolate->heap()->undefined_value();
@@ -280,12 +289,14 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ size_t length = 0;
if (source->IsJSTypedArray() &&
JSTypedArray::cast(*source)->type() == array_type) {
- length_obj = Handle<Object>(JSTypedArray::cast(*source)->length(), isolate);
+ length_obj = handle(JSTypedArray::cast(*source)->length(), isolate);
+ length = JSTypedArray::cast(*source)->length_value();
+ } else {
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *length_obj, &length));
}
- size_t length = 0;
- RUNTIME_ASSERT(TryNumberToSize(isolate, *length_obj, &length));
if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
(length > (kMaxInt / element_size))) {
@@ -357,7 +368,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
#define BUFFER_VIEW_GETTER(Type, getter, accessor) \
RUNTIME_FUNCTION(Runtime_##Type##Get##getter) { \
HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
+ DCHECK_EQ(1, args.length()); \
CONVERT_ARG_HANDLE_CHECKED(JS##Type, holder, 0); \
return holder->accessor(); \
}
@@ -371,7 +382,7 @@ BUFFER_VIEW_GETTER(DataView, Buffer, buffer)
RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
return *holder->GetBuffer();
}
@@ -411,8 +422,8 @@ RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj));
size_t offset = 0;
RUNTIME_ASSERT(TryNumberToSize(isolate, *offset_obj, &offset));
- size_t target_length = NumberToSize(isolate, target->length());
- size_t source_length = NumberToSize(isolate, source->length());
+ size_t target_length = target->length_value();
+ size_t source_length = source->length_value();
size_t target_byte_length = NumberToSize(isolate, target->byte_length());
size_t source_byte_length = NumberToSize(isolate, source->byte_length());
if (offset > target_length || offset + source_length > target_length ||
@@ -467,6 +478,29 @@ RUNTIME_FUNCTION(Runtime_IsTypedArray) {
}
+RUNTIME_FUNCTION(Runtime_IsSharedTypedArray) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ return isolate->heap()->ToBoolean(
+ args[0]->IsJSTypedArray() &&
+ JSTypedArray::cast(args[0])->GetBuffer()->is_shared());
+}
+
+
+RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ if (!args[0]->IsJSTypedArray()) {
+ return isolate->heap()->false_value();
+ }
+
+ Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
+ return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
+ obj->type() != kExternalFloat32Array &&
+ obj->type() != kExternalFloat64Array);
+}
+
+
RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
@@ -720,5 +754,5 @@ DATA_VIEW_SETTER(Float32, float)
DATA_VIEW_SETTER(Float64, double)
#undef DATA_VIEW_SETTER
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-uri.cc b/deps/v8/src/runtime/runtime-uri.cc
index 477071ac78..e0eba4fe4b 100644
--- a/deps/v8/src/runtime/runtime-uri.cc
+++ b/deps/v8/src/runtime/runtime-uri.cc
@@ -284,5 +284,5 @@ RUNTIME_FUNCTION(Runtime_URIUnescape) {
: URIUnescape::Unescape<uc16>(isolate, source));
return *result;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index c44e40208f..4b072b1eb6 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -5,6 +5,7 @@
#ifndef V8_RUNTIME_RUNTIME_UTILS_H_
#define V8_RUNTIME_RUNTIME_UTILS_H_
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -54,6 +55,17 @@ namespace internal {
RUNTIME_ASSERT(args[index]->IsNumber()); \
double name = args.number_at(index);
+
+// Cast the given argument to a size_t and store its value in a variable with
+// the given name. If the argument is not a size_t call IllegalOperation and
+// return.
+#define CONVERT_SIZE_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsNumber()); \
+ Handle<Object> name##_object = args.at<Object>(index); \
+ size_t name = 0; \
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *name##_object, &name));
+
+
// Call the specified converter on the object *comand store the result in
// a variable of the specified type with the given name. If the
// object is not a Number call IllegalOperation and return.
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index f87e993cc8..da1ae40ba0 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -29,29 +29,43 @@ namespace internal {
// Entries have the form F(name, number of arguments, number of values):
-#define FOR_EACH_INTRINSIC_ARRAY(F) \
- F(FinishArrayPrototypeSetup, 1, 1) \
- F(SpecialArrayFunctions, 0, 1) \
- F(TransitionElementsKind, 2, 1) \
- F(PushIfAbsent, 2, 1) \
- F(ArrayConcat, 1, 1) \
- F(RemoveArrayHoles, 2, 1) \
- F(MoveArrayContents, 2, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(GetArrayKeys, 2, 1) \
- F(ArrayConstructor, -1, 1) \
- F(ArrayConstructorWithSubclassing, -1, 1) \
- F(InternalArrayConstructor, -1, 1) \
- F(NormalizeElements, 1, 1) \
- F(GrowArrayElements, 3, 1) \
- F(HasComplexElements, 1, 1) \
- F(ForInCacheArrayLength, 2, 1) /* TODO(turbofan): Only temporary */ \
- F(IsArray, 1, 1) \
- F(HasCachedArrayIndex, 1, 1) \
- F(GetCachedArrayIndex, 1, 1) \
+#define FOR_EACH_INTRINSIC_ARRAY(F) \
+ F(FinishArrayPrototypeSetup, 1, 1) \
+ F(SpecialArrayFunctions, 0, 1) \
+ F(TransitionElementsKind, 2, 1) \
+ F(PushIfAbsent, 2, 1) \
+ F(ArrayConcat, 1, 1) \
+ F(RemoveArrayHoles, 2, 1) \
+ F(MoveArrayContents, 2, 1) \
+ F(EstimateNumberOfElements, 1, 1) \
+ F(GetArrayKeys, 2, 1) \
+ F(ArrayConstructor, -1, 1) \
+ F(ArrayConstructorWithSubclassing, -1, 1) \
+ F(InternalArrayConstructor, -1, 1) \
+ F(NormalizeElements, 1, 1) \
+ F(GrowArrayElements, 2, 1) \
+ F(HasComplexElements, 1, 1) \
+ F(IsArray, 1, 1) \
+ F(HasCachedArrayIndex, 1, 1) \
+ F(GetCachedArrayIndex, 1, 1) \
+ F(FixedArrayGet, 2, 1) \
+ F(FixedArraySet, 3, 1) \
F(FastOneByteArrayJoin, 2, 1)
+#define FOR_EACH_INTRINSIC_ATOMICS(F) \
+ F(AtomicsCompareExchange, 4, 1) \
+ F(AtomicsLoad, 2, 1) \
+ F(AtomicsStore, 3, 1) \
+ F(AtomicsAdd, 3, 1) \
+ F(AtomicsSub, 3, 1) \
+ F(AtomicsAnd, 3, 1) \
+ F(AtomicsOr, 3, 1) \
+ F(AtomicsXor, 3, 1) \
+ F(AtomicsExchange, 3, 1) \
+ F(AtomicsIsLockFree, 1, 1)
+
+
#define FOR_EACH_INTRINSIC_CLASSES(F) \
F(ThrowNonMethodError, 0, 1) \
F(ThrowUnsupportedSuperError, 0, 1) \
@@ -64,22 +78,20 @@ namespace internal {
F(DefineClass, 6, 1) \
F(DefineClassMethod, 3, 1) \
F(ClassGetSourceCode, 1, 1) \
- F(LoadFromSuper, 3, 1) \
- F(LoadKeyedFromSuper, 3, 1) \
+ F(LoadFromSuper, 4, 1) \
+ F(LoadKeyedFromSuper, 4, 1) \
F(StoreToSuper_Strict, 4, 1) \
F(StoreToSuper_Sloppy, 4, 1) \
F(StoreKeyedToSuper_Strict, 4, 1) \
F(StoreKeyedToSuper_Sloppy, 4, 1) \
F(HandleStepInForDerivedConstructors, 1, 1) \
- F(DefaultConstructorCallSuper, 0, 1) \
- F(CallSuperWithSpread, 0, 1)
+ F(DefaultConstructorCallSuper, 2, 1) \
+ F(CallSuperWithSpread, 1, 1)
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
F(StringGetRawHashField, 1, 1) \
F(TheHole, 0, 1) \
- F(FixedArrayGet, 2, 1) \
- F(FixedArraySet, 3, 1) \
F(JSCollectionGetTable, 1, 1) \
F(GenericHash, 1, 1) \
F(SetInitialize, 1, 1) \
@@ -100,10 +112,10 @@ namespace internal {
F(GetWeakMapEntries, 2, 1) \
F(MapIteratorNext, 2, 1) \
F(WeakCollectionInitialize, 1, 1) \
- F(WeakCollectionGet, 2, 1) \
- F(WeakCollectionHas, 2, 1) \
- F(WeakCollectionDelete, 2, 1) \
- F(WeakCollectionSet, 3, 1) \
+ F(WeakCollectionGet, 3, 1) \
+ F(WeakCollectionHas, 3, 1) \
+ F(WeakCollectionDelete, 3, 1) \
+ F(WeakCollectionSet, 4, 1) \
F(GetWeakSetValues, 2, 1) \
F(ObservationWeakMapCreate, 0, 1)
@@ -115,12 +127,14 @@ namespace internal {
F(NotifyDeoptimized, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(TryInstallOptimizedCode, 1, 1) \
- F(CompileString, 2, 1)
+ F(CompileString, 2, 1) \
+ F(ResolvePossiblyDirectEval, 5, 1)
#define FOR_EACH_INTRINSIC_DATE(F) \
F(DateMakeDay, 2, 1) \
F(DateSetValue, 3, 1) \
+ F(IsDate, 1, 1) \
F(ThrowNotDateError, 0, 1) \
F(DateCurrentTime, 0, 1) \
F(DateParseString, 2, 1) \
@@ -134,6 +148,7 @@ namespace internal {
F(DebugBreak, 0, 1) \
F(SetDebugEventListener, 2, 1) \
F(ScheduleBreak, 0, 1) \
+ F(DebugGetInternalProperties, 1, 1) \
F(DebugGetPropertyDetails, 2, 1) \
F(DebugGetProperty, 2, 1) \
F(DebugPropertyTypeFromDetails, 1, 1) \
@@ -187,6 +202,13 @@ namespace internal {
F(DebugBreakInOptimizedCode, 0, 1)
+#define FOR_EACH_INTRINSIC_FORIN(F) \
+ F(ForInDone, 2, 1) \
+ F(ForInFilter, 2, 1) \
+ F(ForInNext, 4, 1) \
+ F(ForInStep, 1, 1)
+
+
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
F(IsSloppyModeFunction, 1, 1) \
F(FunctionGetName, 1, 1) \
@@ -207,8 +229,9 @@ namespace internal {
F(FunctionIsBuiltin, 1, 1) \
F(SetCode, 2, 1) \
F(SetNativeFlag, 1, 1) \
+ F(ThrowStrongModeTooFewArguments, 0, 1) \
F(IsConstructor, 1, 1) \
- F(SetInlineBuiltinFlag, 1, 1) \
+ F(SetForceInlineFlag, 1, 1) \
F(FunctionBindArguments, 4, 1) \
F(BoundFunctionGetBindings, 1, 1) \
F(NewObjectFromBound, 1, 1) \
@@ -216,6 +239,7 @@ namespace internal {
F(Apply, 5, 1) \
F(GetFunctionDelegate, 1, 1) \
F(GetConstructorDelegate, 1, 1) \
+ F(GetOriginalConstructor, 0, 1) \
F(CallFunction, -1 /* receiver + n args + function */, 1) \
F(IsConstructCall, 0, 1) \
F(IsFunction, 1, 1)
@@ -273,7 +297,11 @@ namespace internal {
F(UnwindAndFindExceptionHandler, 0, 1) \
F(PromoteScheduledException, 0, 1) \
F(ThrowReferenceError, 1, 1) \
+ F(NewTypeError, 2, 1) \
+ F(NewSyntaxError, 2, 1) \
+ F(NewReferenceError, 2, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowStrongModeImplicitConversion, 0, 1) \
F(PromiseRejectEvent, 3, 1) \
F(PromiseRevokeReject, 1, 1) \
F(PromiseHasHandlerSymbol, 0, 1) \
@@ -302,7 +330,9 @@ namespace internal {
F(IncrementStatsCounter, 1, 1) \
F(Likely, 1, 1) \
F(Unlikely, 1, 1) \
- F(HarmonyToString, 0, 1)
+ F(HarmonyToString, 0, 1) \
+ F(GetTypeFeedbackVector, 1, 1) \
+ F(GetCallerJSFunction, 0, 1)
#define FOR_EACH_INTRINSIC_JSON(F) \
@@ -401,16 +431,17 @@ namespace internal {
F(GetOwnProperty, 2, 1) \
F(PreventExtensions, 1, 1) \
F(IsExtensible, 1, 1) \
- F(DisableAccessChecks, 1, 1) \
- F(EnableAccessChecks, 1, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
F(ObjectFreeze, 1, 1) \
F(ObjectSeal, 1, 1) \
F(GetProperty, 2, 1) \
+ F(GetPropertyStrong, 2, 1) \
F(KeyedGetProperty, 2, 1) \
+ F(KeyedGetPropertyStrong, 2, 1) \
F(AddNamedProperty, 4, 1) \
F(SetProperty, 4, 1) \
- F(AddElement, 4, 1) \
+ F(AddElement, 3, 1) \
+ F(AppendElement, 2, 1) \
F(DeleteProperty, 3, 1) \
F(HasOwnProperty, 2, 1) \
F(HasProperty, 2, 1) \
@@ -449,6 +480,7 @@ namespace internal {
F(IsObject, 1, 1) \
F(IsUndetectableObject, 1, 1) \
F(IsSpecObject, 1, 1) \
+ F(IsStrong, 1, 1) \
F(ClassOf, 1, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
F(DefineSetterPropertyUnchecked, 4, 1)
@@ -502,8 +534,8 @@ namespace internal {
F(NewArguments, 1, 1) /* TODO(turbofan): Only temporary */ \
F(NewSloppyArguments, 3, 1) \
F(NewStrictArguments, 3, 1) \
- F(NewRestParam, 3, 1) \
- F(NewRestParamSlow, 1, 1) \
+ F(NewRestParam, 4, 1) \
+ F(NewRestParamSlow, 2, 1) \
F(NewClosureFromStubFailure, 1, 1) \
F(NewClosure, 3, 1) \
F(NewScriptContext, 2, 1) \
@@ -559,14 +591,13 @@ namespace internal {
F(StringGetLength, 1, 1)
-#define FOR_EACH_INTRINSIC_SYMBOL(F) \
- F(CreateSymbol, 1, 1) \
- F(CreatePrivateSymbol, 1, 1) \
- F(CreatePrivateOwnSymbol, 1, 1) \
- F(CreateGlobalPrivateOwnSymbol, 1, 1) \
- F(NewSymbolWrapper, 1, 1) \
- F(SymbolDescription, 1, 1) \
- F(SymbolRegistry, 0, 1) \
+#define FOR_EACH_INTRINSIC_SYMBOL(F) \
+ F(CreateSymbol, 1, 1) \
+ F(CreatePrivateSymbol, 1, 1) \
+ F(CreateGlobalPrivateSymbol, 1, 1) \
+ F(NewSymbolWrapper, 1, 1) \
+ F(SymbolDescription, 1, 1) \
+ F(SymbolRegistry, 0, 1) \
F(SymbolIsPrivate, 1, 1)
@@ -581,6 +612,7 @@ namespace internal {
F(GetOptimizationStatus, -1, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
F(GetOptimizationCount, 1, 1) \
+ F(GetUndetectable, 0, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
@@ -627,12 +659,12 @@ namespace internal {
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
- F(ArrayBufferInitialize, 2, 1) \
+ F(ArrayBufferInitialize, 3, 1) \
F(ArrayBufferGetByteLength, 1, 1) \
F(ArrayBufferSliceImpl, 3, 1) \
F(ArrayBufferIsView, 1, 1) \
F(ArrayBufferNeuter, 1, 1) \
- F(TypedArrayInitialize, 5, 1) \
+ F(TypedArrayInitialize, 6, 1) \
F(TypedArrayInitializeFromArrayLike, 4, 1) \
F(ArrayBufferViewGetByteLength, 1, 1) \
F(ArrayBufferViewGetByteOffset, 1, 1) \
@@ -642,6 +674,8 @@ namespace internal {
F(TypedArraySetFastCases, 3, 1) \
F(TypedArrayMaxSizeInHeap, 0, 1) \
F(IsTypedArray, 1, 1) \
+ F(IsSharedTypedArray, 1, 1) \
+ F(IsSharedIntegerTypedArray, 1, 1) \
F(DataViewInitialize, 4, 1) \
F(DataViewGetUint8, 3, 1) \
F(DataViewGetInt8, 3, 1) \
@@ -666,21 +700,20 @@ namespace internal {
F(URIUnescape, 1, 1)
-#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
- F(LoadLookupSlot, 2, 2) \
- F(LoadLookupSlotNoReferenceError, 2, 2) \
- F(ResolvePossiblyDirectEval, 6, 2) \
- F(ForInInit, 2, 2) /* TODO(turbofan): Only temporary */ \
- F(ForInNext, 4, 2) /* TODO(turbofan): Only temporary */
+#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
+ F(LoadLookupSlot, 2, 2) \
+ F(LoadLookupSlotNoReferenceError, 2, 2)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
FOR_EACH_INTRINSIC_ARRAY(F) \
+ FOR_EACH_INTRINSIC_ATOMICS(F) \
FOR_EACH_INTRINSIC_CLASSES(F) \
FOR_EACH_INTRINSIC_COLLECTIONS(F) \
FOR_EACH_INTRINSIC_COMPILER(F) \
FOR_EACH_INTRINSIC_DATE(F) \
FOR_EACH_INTRINSIC_DEBUG(F) \
+ FOR_EACH_INTRINSIC_FORIN(F) \
FOR_EACH_INTRINSIC_FUNCTION(F) \
FOR_EACH_INTRINSIC_GENERATOR(F) \
FOR_EACH_INTRINSIC_I18N(F) \
@@ -787,18 +820,24 @@ class Runtime : public AllStatic {
// Support getting the characters in a string using [] notation as
// in Firefox/SpiderMonkey, Safari and Opera.
MUST_USE_RESULT static MaybeHandle<Object> GetElementOrCharAt(
- Isolate* isolate, Handle<Object> object, uint32_t index);
+ Isolate* isolate, Handle<Object> object, uint32_t index,
+ LanguageMode language_mode = SLOPPY);
+
+ MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty(
+ Isolate* isolate, Handle<JSReceiver> receiver, Handle<Object> key,
+ LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> SetObjectProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key,
Handle<Object> value, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> DefineObjectProperty(
- Handle<JSObject> object, Handle<Object> key, Handle<Object> value,
- PropertyAttributes attr);
-
MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
- Isolate* isolate, Handle<Object> object, Handle<Object> key);
+ Isolate* isolate, Handle<Object> object, Handle<Object> key,
+ LanguageMode language_mode = SLOPPY);
+
+ MUST_USE_RESULT static MaybeHandle<Object> KeyedGetObjectProperty(
+ Isolate* isolate, Handle<Object> receiver_obj, Handle<Object> key_obj,
+ LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> GetPrototype(
Isolate* isolate, Handle<Object> object);
@@ -809,12 +848,13 @@ class Runtime : public AllStatic {
static void SetupArrayBuffer(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
bool is_external, void* data,
- size_t allocated_length);
+ size_t allocated_length,
+ SharedFlag shared = SharedFlag::kNotShared);
- static bool SetupArrayBufferAllocatingData(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
- size_t allocated_length,
- bool initialize = true);
+ static bool SetupArrayBufferAllocatingData(
+ Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t allocated_length, bool initialize = true,
+ SharedFlag shared = SharedFlag::kNotShared);
static void NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer);
@@ -843,14 +883,28 @@ class Runtime : public AllStatic {
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
Isolate* isolate, Handle<FixedArray> literals,
- Handle<FixedArray> elements);
+ Handle<FixedArray> elements, bool is_strong);
+
+
+ static void JSMapInitialize(Isolate* isolate, Handle<JSMap> map);
+ static void JSMapClear(Isolate* isolate, Handle<JSMap> map);
+ static void JSSetInitialize(Isolate* isolate, Handle<JSSet> set);
+ static void JSSetClear(Isolate* isolate, Handle<JSSet> set);
static void WeakCollectionInitialize(
Isolate* isolate, Handle<JSWeakCollection> weak_collection);
static void WeakCollectionSet(Handle<JSWeakCollection> weak_collection,
- Handle<Object> key, Handle<Object> value);
+ Handle<Object> key, Handle<Object> value,
+ int32_t hash);
static bool WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
Handle<Object> key);
+ static bool WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key, int32_t hash);
+
+ static MaybeHandle<JSArray> GetInternalProperties(Isolate* isolate,
+ Handle<Object>);
+
+ static bool AtomicIsLockFree(uint32_t size);
};
@@ -867,6 +921,29 @@ class DeclareGlobalsNativeFlag : public BitField<bool, 1, 1> {};
STATIC_ASSERT(LANGUAGE_END == 3);
class DeclareGlobalsLanguageMode : public BitField<LanguageMode, 2, 2> {};
+//---------------------------------------------------------------------------
+// Inline functions
+
+// Assume that 32-bit architectures don't have 64-bit atomic ops.
+// TODO(binji): can we do better here?
+#if V8_TARGET_ARCH_64_BIT && V8_HOST_ARCH_64_BIT
+
+#define ATOMICS_REQUIRE_LOCK_64_BIT 0
+
+inline bool Runtime::AtomicIsLockFree(uint32_t size) {
+ return size == 1 || size == 2 || size == 4 || size == 8;
+}
+
+#else
+
+#define ATOMICS_REQUIRE_LOCK_64_BIT 1
+
+inline bool Runtime::AtomicIsLockFree(uint32_t size) {
+ return size == 1 || size == 2 || size == 4;
+}
+
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index a0ae5b2ced..5214b7b8d1 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -212,5 +212,5 @@ uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info,
}
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index 5f6f2e25a7..79b5bff855 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -775,4 +775,5 @@ void Sampler::DoSample() {
#endif // USE_SIGNALS
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/scanner-character-streams.cc b/deps/v8/src/scanner-character-streams.cc
index a710330a2b..442bc75d6c 100644
--- a/deps/v8/src/scanner-character-streams.cc
+++ b/deps/v8/src/scanner-character-streams.cc
@@ -356,10 +356,7 @@ size_t ExternalStreamingStream::FillBuffer(size_t position) {
// chunk. This will only happen when the chunk was really small. We
// don't handle the case where a UTF-8 character is split over several
// chunks; in that case V8 won't crash, but it will be a parse error.
- delete[] current_data_;
- current_data_ = NULL;
- current_data_length_ = 0;
- current_data_offset_ = 0;
+ FlushCurrent();
continue; // Request a new chunk.
}
}
@@ -383,15 +380,91 @@ size_t ExternalStreamingStream::FillBuffer(size_t position) {
// Did we use all the data in the data chunk?
if (current_data_offset_ == current_data_length_) {
- delete[] current_data_;
- current_data_ = NULL;
- current_data_length_ = 0;
- current_data_offset_ = 0;
+ FlushCurrent();
}
}
return data_in_buffer;
}
+
+bool ExternalStreamingStream::SetBookmark() {
+ // Bookmarking for this stream is a bit more complex than expected, since
+ // the stream state is distributed over several places:
+ // - pos_ (inherited from Utf16CharacterStream)
+ // - buffer_cursor_ and buffer_end_ (also from Utf16CharacterStream)
+ // - buffer_ (from BufferedUtf16CharacterStream)
+ // - current_data_ (+ .._offset_ and .._length) (this class)
+ // - utf8_split_char_buffer_* (a partial utf8 symbol at the block boundary)
+ //
+ // The underlying source_stream_ instance likely could re-construct this
+ // local data for us, but with the given interfaces we have no way of
+ // accomplishing this. Thus, we'll have to save all data locally.
+ //
+ // What gets saved where:
+ // - pos_ => bookmark_
+ // - buffer_[buffer_cursor_ .. buffer_end_] => bookmark_buffer_
+ // - current_data_[.._offset_ .. .._length_] => bookmark_data_
+ // - utf8_split_char_buffer_* => bookmark_utf8_split...
+
+ bookmark_ = pos_;
+
+ size_t buffer_length = buffer_end_ - buffer_cursor_;
+ bookmark_buffer_.Dispose();
+ bookmark_buffer_ = Vector<uint16_t>::New(static_cast<int>(buffer_length));
+ CopyCharsUnsigned(bookmark_buffer_.start(), buffer_cursor_, buffer_length);
+
+ size_t data_length = current_data_length_ - current_data_offset_;
+ bookmark_data_.Dispose();
+ bookmark_data_ = Vector<uint8_t>::New(static_cast<int>(data_length));
+ CopyBytes(bookmark_data_.start(), current_data_ + current_data_offset_,
+ data_length);
+
+ bookmark_utf8_split_char_buffer_length_ = utf8_split_char_buffer_length_;
+ for (size_t i = 0; i < utf8_split_char_buffer_length_; i++) {
+ bookmark_utf8_split_char_buffer_[i] = utf8_split_char_buffer_[i];
+ }
+
+ return source_stream_->SetBookmark();
+}
+
+
+void ExternalStreamingStream::ResetToBookmark() {
+ source_stream_->ResetToBookmark();
+ FlushCurrent();
+
+ pos_ = bookmark_;
+
+ // bookmark_data_* => current_data_*
+ // (current_data_ assumes ownership of its memory.)
+ uint8_t* data = new uint8_t[bookmark_data_.length()];
+ current_data_offset_ = 0;
+ current_data_length_ = bookmark_data_.length();
+ CopyCharsUnsigned(data, bookmark_data_.begin(), bookmark_data_.length());
+ delete[] current_data_;
+ current_data_ = data;
+
+ // bookmark_buffer_ needs to be copied to buffer_.
+ CopyCharsUnsigned(buffer_, bookmark_buffer_.begin(),
+ bookmark_buffer_.length());
+ buffer_cursor_ = buffer_;
+ buffer_end_ = buffer_ + bookmark_buffer_.length();
+
+ // utf8 split char buffer
+ utf8_split_char_buffer_length_ = bookmark_utf8_split_char_buffer_length_;
+ for (size_t i = 0; i < bookmark_utf8_split_char_buffer_length_; i++) {
+ utf8_split_char_buffer_[i] = bookmark_utf8_split_char_buffer_[i];
+ }
+}
+
+
+void ExternalStreamingStream::FlushCurrent() {
+ delete[] current_data_;
+ current_data_ = NULL;
+ current_data_length_ = 0;
+ current_data_offset_ = 0;
+}
+
+
void ExternalStreamingStream::HandleUtf8SplitCharacters(
size_t* data_in_buffer) {
// Note the following property of UTF-8 which makes this function possible:
@@ -486,4 +559,5 @@ void ExternalTwoByteStringUtf16CharacterStream::ResetToBookmark() {
pos_ = bookmark_;
buffer_cursor_ = raw_data_ + bookmark_;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/scanner-character-streams.h b/deps/v8/src/scanner-character-streams.h
index 36d84bc0f6..582165710d 100644
--- a/deps/v8/src/scanner-character-streams.h
+++ b/deps/v8/src/scanner-character-streams.h
@@ -92,9 +92,15 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
current_data_(NULL),
current_data_offset_(0),
current_data_length_(0),
- utf8_split_char_buffer_length_(0) {}
-
- virtual ~ExternalStreamingStream() { delete[] current_data_; }
+ utf8_split_char_buffer_length_(0),
+ bookmark_(0),
+ bookmark_utf8_split_char_buffer_length_(0) {}
+
+ virtual ~ExternalStreamingStream() {
+ delete[] current_data_;
+ bookmark_buffer_.Dispose();
+ bookmark_data_.Dispose();
+ }
size_t BufferSeekForward(size_t delta) override {
// We never need to seek forward when streaming scripts. We only seek
@@ -107,8 +113,12 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
size_t FillBuffer(size_t position) override;
+ virtual bool SetBookmark() override;
+ virtual void ResetToBookmark() override;
+
private:
void HandleUtf8SplitCharacters(size_t* data_in_buffer);
+ void FlushCurrent();
ScriptCompiler::ExternalSourceStream* source_stream_;
v8::ScriptCompiler::StreamedSource::Encoding encoding_;
@@ -118,6 +128,14 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
// For converting UTF-8 characters which are split across two data chunks.
uint8_t utf8_split_char_buffer_[4];
size_t utf8_split_char_buffer_length_;
+
+ // Bookmark support. See comments in ExternalStreamingStream::SetBookmark
+ // for additional details.
+ size_t bookmark_;
+ Vector<uint16_t> bookmark_buffer_;
+ Vector<uint8_t> bookmark_data_;
+ uint8_t bookmark_utf8_split_char_buffer_[4];
+ size_t bookmark_utf8_split_char_buffer_length_;
};
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 8405591dc8..ad7c7d983c 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -42,7 +42,6 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
bookmark_c0_(kNoBookmark),
octal_pos_(Location::invalid()),
harmony_modules_(false),
- harmony_classes_(false),
harmony_unicode_(false) {
bookmark_current_.literal_chars = &bookmark_current_literal_;
bookmark_current_.raw_literal_chars = &bookmark_current_raw_literal_;
@@ -1099,79 +1098,74 @@ uc32 Scanner::ScanUnicodeEscape() {
// ----------------------------------------------------------------------------
// Keyword Matcher
-#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
- KEYWORD_GROUP('b') \
- KEYWORD("break", Token::BREAK) \
- KEYWORD_GROUP('c') \
- KEYWORD("case", Token::CASE) \
- KEYWORD("catch", Token::CATCH) \
- KEYWORD("class", \
- harmony_classes ? Token::CLASS : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("const", Token::CONST) \
- KEYWORD("continue", Token::CONTINUE) \
- KEYWORD_GROUP('d') \
- KEYWORD("debugger", Token::DEBUGGER) \
- KEYWORD("default", Token::DEFAULT) \
- KEYWORD("delete", Token::DELETE) \
- KEYWORD("do", Token::DO) \
- KEYWORD_GROUP('e') \
- KEYWORD("else", Token::ELSE) \
- KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
- KEYWORD("export", \
- harmony_modules ? Token::EXPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("extends", \
- harmony_classes ? Token::EXTENDS : Token::FUTURE_RESERVED_WORD) \
- KEYWORD_GROUP('f') \
- KEYWORD("false", Token::FALSE_LITERAL) \
- KEYWORD("finally", Token::FINALLY) \
- KEYWORD("for", Token::FOR) \
- KEYWORD("function", Token::FUNCTION) \
- KEYWORD_GROUP('i') \
- KEYWORD("if", Token::IF) \
- KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("import", \
- harmony_modules ? Token::IMPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("in", Token::IN) \
- KEYWORD("instanceof", Token::INSTANCEOF) \
- KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('l') \
- KEYWORD("let", Token::LET) \
- KEYWORD_GROUP('n') \
- KEYWORD("new", Token::NEW) \
- KEYWORD("null", Token::NULL_LITERAL) \
- KEYWORD_GROUP('p') \
- KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('r') \
- KEYWORD("return", Token::RETURN) \
- KEYWORD_GROUP('s') \
- KEYWORD("static", harmony_classes ? Token::STATIC \
- : Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("super", \
- harmony_classes ? Token::SUPER : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("switch", Token::SWITCH) \
- KEYWORD_GROUP('t') \
- KEYWORD("this", Token::THIS) \
- KEYWORD("throw", Token::THROW) \
- KEYWORD("true", Token::TRUE_LITERAL) \
- KEYWORD("try", Token::TRY) \
- KEYWORD("typeof", Token::TYPEOF) \
- KEYWORD_GROUP('v') \
- KEYWORD("var", Token::VAR) \
- KEYWORD("void", Token::VOID) \
- KEYWORD_GROUP('w') \
- KEYWORD("while", Token::WHILE) \
- KEYWORD("with", Token::WITH) \
- KEYWORD_GROUP('y') \
+#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
+ KEYWORD_GROUP('b') \
+ KEYWORD("break", Token::BREAK) \
+ KEYWORD_GROUP('c') \
+ KEYWORD("case", Token::CASE) \
+ KEYWORD("catch", Token::CATCH) \
+ KEYWORD("class", Token::CLASS) \
+ KEYWORD("const", Token::CONST) \
+ KEYWORD("continue", Token::CONTINUE) \
+ KEYWORD_GROUP('d') \
+ KEYWORD("debugger", Token::DEBUGGER) \
+ KEYWORD("default", Token::DEFAULT) \
+ KEYWORD("delete", Token::DELETE) \
+ KEYWORD("do", Token::DO) \
+ KEYWORD_GROUP('e') \
+ KEYWORD("else", Token::ELSE) \
+ KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("export", \
+ harmony_modules ? Token::EXPORT : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("extends", Token::EXTENDS) \
+ KEYWORD_GROUP('f') \
+ KEYWORD("false", Token::FALSE_LITERAL) \
+ KEYWORD("finally", Token::FINALLY) \
+ KEYWORD("for", Token::FOR) \
+ KEYWORD("function", Token::FUNCTION) \
+ KEYWORD_GROUP('i') \
+ KEYWORD("if", Token::IF) \
+ KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("import", \
+ harmony_modules ? Token::IMPORT : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("in", Token::IN) \
+ KEYWORD("instanceof", Token::INSTANCEOF) \
+ KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('l') \
+ KEYWORD("let", Token::LET) \
+ KEYWORD_GROUP('n') \
+ KEYWORD("new", Token::NEW) \
+ KEYWORD("null", Token::NULL_LITERAL) \
+ KEYWORD_GROUP('p') \
+ KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('r') \
+ KEYWORD("return", Token::RETURN) \
+ KEYWORD_GROUP('s') \
+ KEYWORD("static", Token::STATIC) \
+ KEYWORD("super", Token::SUPER) \
+ KEYWORD("switch", Token::SWITCH) \
+ KEYWORD_GROUP('t') \
+ KEYWORD("this", Token::THIS) \
+ KEYWORD("throw", Token::THROW) \
+ KEYWORD("true", Token::TRUE_LITERAL) \
+ KEYWORD("try", Token::TRY) \
+ KEYWORD("typeof", Token::TYPEOF) \
+ KEYWORD_GROUP('v') \
+ KEYWORD("var", Token::VAR) \
+ KEYWORD("void", Token::VOID) \
+ KEYWORD_GROUP('w') \
+ KEYWORD("while", Token::WHILE) \
+ KEYWORD("with", Token::WITH) \
+ KEYWORD_GROUP('y') \
KEYWORD("yield", Token::YIELD)
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length,
- bool harmony_modules,
- bool harmony_classes) {
+ bool harmony_modules) {
DCHECK(input_length >= 1);
const int kMinLength = 2;
const int kMaxLength = 10;
@@ -1219,7 +1213,7 @@ bool Scanner::IdentifierIsFutureStrictReserved(
}
return Token::FUTURE_STRICT_RESERVED_WORD ==
KeywordOrIdentifierToken(string->raw_data(), string->length(),
- harmony_modules_, harmony_classes_);
+ harmony_modules_);
}
@@ -1253,7 +1247,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
literal.Complete();
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
return KeywordOrIdentifierToken(chars.start(), chars.length(),
- harmony_modules_, harmony_classes_);
+ harmony_modules_);
}
HandleLeadSurrogate();
@@ -1303,10 +1297,8 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
if (next_.literal_chars->is_one_byte()) {
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(),
- chars.length(),
- harmony_modules_,
- harmony_classes_);
+ return KeywordOrIdentifierToken(chars.start(), chars.length(),
+ harmony_modules_);
}
return Token::IDENTIFIER;
}
@@ -1433,6 +1425,13 @@ double Scanner::DoubleValue() {
}
+bool Scanner::ContainsDot() {
+ DCHECK(is_literal_one_byte());
+ Vector<const uint8_t> str = literal_one_byte_string();
+ return std::find(str.begin(), str.end(), '.') != str.end();
+}
+
+
int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
if (is_literal_one_byte()) {
return finder->AddOneByteSymbol(literal_one_byte_string(), value);
@@ -1630,4 +1629,5 @@ byte* DuplicateFinder::BackupKey(Vector<const uint8_t> bytes,
return backing_store_.EndSequence().start();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index f387d84133..c842f987b6 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -435,6 +435,7 @@ class Scanner {
const AstRawString* CurrentRawSymbol(AstValueFactory* ast_value_factory);
double DoubleValue();
+ bool ContainsDot();
bool LiteralMatches(const char* data, int length, bool allow_escapes = true) {
if (is_literal_one_byte() &&
literal_length() == length &&
@@ -483,12 +484,7 @@ class Scanner {
void SetHarmonyModules(bool modules) {
harmony_modules_ = modules;
}
- bool HarmonyClasses() const {
- return harmony_classes_;
- }
- void SetHarmonyClasses(bool classes) {
- harmony_classes_ = classes;
- }
+
bool HarmonyUnicode() const { return harmony_unicode_; }
void SetHarmonyUnicode(bool unicode) { harmony_unicode_ = unicode; }
@@ -803,8 +799,6 @@ class Scanner {
bool has_multiline_comment_before_next_;
// Whether we scan 'module', 'import', 'export' as keywords.
bool harmony_modules_;
- // Whether we scan 'class', 'extends', 'static' and 'super' as keywords.
- bool harmony_classes_;
// Whether we allow \u{xxxxx}.
bool harmony_unicode_;
};
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 012744992f..4ed22b19dc 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -6,6 +6,7 @@
#include "src/v8.h"
+#include "src/bootstrapper.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
@@ -18,22 +19,42 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
// Collect stack and context locals.
ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
+ ZoneList<Variable*> context_globals(scope->ContextGlobalCount(), zone);
ZoneList<Variable*> strong_mode_free_variables(0, zone);
scope->CollectStackAndContextLocals(&stack_locals, &context_locals,
+ &context_globals,
&strong_mode_free_variables);
const int stack_local_count = stack_locals.length();
const int context_local_count = context_locals.length();
+ const int context_global_count = context_globals.length();
const int strong_mode_free_variable_count =
strong_mode_free_variables.length();
// Make sure we allocate the correct amount.
- DCHECK(scope->ContextLocalCount() == context_local_count);
+ DCHECK_EQ(scope->ContextLocalCount(), context_local_count);
+ DCHECK_EQ(scope->ContextGlobalCount(), context_global_count);
bool simple_parameter_list =
scope->is_function_scope() ? scope->is_simple_parameter_list() : true;
+ // Determine use and location of the "this" binding if it is present.
+ VariableAllocationInfo receiver_info;
+ if (scope->has_this_declaration()) {
+ Variable* var = scope->receiver();
+ if (!var->is_used()) {
+ receiver_info = UNUSED;
+ } else if (var->IsContextSlot()) {
+ receiver_info = CONTEXT;
+ } else {
+ DCHECK(var->IsParameter());
+ receiver_info = STACK;
+ }
+ } else {
+ receiver_info = NONE;
+ }
+
// Determine use and location of the function variable if it is present.
- FunctionVariableInfo function_name_info;
+ VariableAllocationInfo function_name_info;
VariableMode function_variable_mode;
if (scope->is_function_scope() && scope->function() != NULL) {
Variable* var = scope->function()->proxy()->var();
@@ -50,13 +71,16 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
function_name_info = NONE;
function_variable_mode = VAR;
}
+ DCHECK(context_global_count == 0 || scope->scope_type() == SCRIPT_SCOPE);
const bool has_function_name = function_name_info != NONE;
+ const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT;
const int parameter_count = scope->num_parameters();
const int length = kVariablePartIndex + parameter_count +
(1 + stack_local_count) + 2 * context_local_count +
+ 2 * context_global_count +
3 * strong_mode_free_variable_count +
- (has_function_name ? 2 : 0);
+ (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
Factory* factory = isolate->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
@@ -65,17 +89,18 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
int flags = ScopeTypeField::encode(scope->scope_type()) |
CallsEvalField::encode(scope->calls_eval()) |
LanguageModeField::encode(scope->language_mode()) |
+ ReceiverVariableField::encode(receiver_info) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode) |
AsmModuleField::encode(scope->asm_module()) |
AsmFunctionField::encode(scope->asm_function()) |
IsSimpleParameterListField::encode(simple_parameter_list) |
- BlockScopeIsClassScopeField::encode(scope->is_class_scope()) |
FunctionKindField::encode(scope->function_kind());
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
scope_info->SetStackLocalCount(stack_local_count);
scope_info->SetContextLocalCount(context_local_count);
+ scope_info->SetContextGlobalCount(context_global_count);
scope_info->SetStrongModeFreeVariableCount(strong_mode_free_variable_count);
int index = kVariablePartIndex;
@@ -116,6 +141,12 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
scope_info->set(index++, *context_locals[i]->name());
}
+ // Add context globals' names.
+ DCHECK(index == scope_info->ContextGlobalNameEntriesIndex());
+ for (int i = 0; i < context_global_count; ++i) {
+ scope_info->set(index++, *context_globals[i]->name());
+ }
+
// Add context locals' info.
DCHECK(index == scope_info->ContextLocalInfoEntriesIndex());
for (int i = 0; i < context_local_count; ++i) {
@@ -127,6 +158,18 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
scope_info->set(index++, Smi::FromInt(value));
}
+ // Add context globals' info.
+ DCHECK(index == scope_info->ContextGlobalInfoEntriesIndex());
+ for (int i = 0; i < context_global_count; ++i) {
+ Variable* var = context_globals[i];
+ // TODO(ishell): do we need this kind of info for globals here?
+ uint32_t value =
+ ContextLocalMode::encode(var->mode()) |
+ ContextLocalInitFlag::encode(var->initialization_flag()) |
+ ContextLocalMaybeAssignedFlag::encode(var->maybe_assigned());
+ scope_info->set(index++, Smi::FromInt(value));
+ }
+
DCHECK(index == scope_info->StrongModeFreeVariableNameEntriesIndex());
for (int i = 0; i < strong_mode_free_variable_count; ++i) {
scope_info->set(index++, *strong_mode_free_variables[i]->name());
@@ -146,6 +189,15 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
scope_info->set(index++, *end_position);
}
+ // If the receiver is allocated, add its index.
+ DCHECK(index == scope_info->ReceiverEntryIndex());
+ if (has_receiver) {
+ int var_index = scope->receiver()->index();
+ scope_info->set(index++, Smi::FromInt(var_index));
+ // ?? DCHECK(receiver_info != CONTEXT || var_index ==
+ // scope_info->ContextLength() - 1);
+ }
+
// If present, add the function variable name and its index.
DCHECK(index == scope_info->FunctionNameEntryIndex());
if (has_function_name) {
@@ -165,6 +217,79 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
}
+Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
+ DCHECK(isolate->bootstrapper()->IsActive());
+
+ const int stack_local_count = 0;
+ const int context_local_count = 1;
+ const int context_global_count = 0;
+ const int strong_mode_free_variable_count = 0;
+ const bool simple_parameter_list = true;
+ const VariableAllocationInfo receiver_info = CONTEXT;
+ const VariableAllocationInfo function_name_info = NONE;
+ const VariableMode function_variable_mode = VAR;
+ const bool has_function_name = false;
+ const bool has_receiver = true;
+ const int parameter_count = 0;
+ const int length = kVariablePartIndex + parameter_count +
+ (1 + stack_local_count) + 2 * context_local_count +
+ 2 * context_global_count +
+ 3 * strong_mode_free_variable_count +
+ (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
+
+ Factory* factory = isolate->factory();
+ Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
+
+ // Encode the flags.
+ int flags = ScopeTypeField::encode(SCRIPT_SCOPE) |
+ CallsEvalField::encode(false) |
+ LanguageModeField::encode(SLOPPY) |
+ ReceiverVariableField::encode(receiver_info) |
+ FunctionVariableField::encode(function_name_info) |
+ FunctionVariableMode::encode(function_variable_mode) |
+ AsmModuleField::encode(false) | AsmFunctionField::encode(false) |
+ IsSimpleParameterListField::encode(simple_parameter_list) |
+ FunctionKindField::encode(FunctionKind::kNormalFunction);
+ scope_info->SetFlags(flags);
+ scope_info->SetParameterCount(parameter_count);
+ scope_info->SetStackLocalCount(stack_local_count);
+ scope_info->SetContextLocalCount(context_local_count);
+ scope_info->SetContextGlobalCount(context_global_count);
+ scope_info->SetStrongModeFreeVariableCount(strong_mode_free_variable_count);
+
+ int index = kVariablePartIndex;
+ const int first_slot_index = 0;
+ DCHECK(index == scope_info->StackLocalFirstSlotIndex());
+ scope_info->set(index++, Smi::FromInt(first_slot_index));
+ DCHECK(index == scope_info->StackLocalEntriesIndex());
+
+ // Here we add info for context-allocated "this".
+ DCHECK(index == scope_info->ContextLocalNameEntriesIndex());
+ scope_info->set(index++, *isolate->factory()->this_string());
+ DCHECK(index == scope_info->ContextLocalInfoEntriesIndex());
+ const uint32_t value = ContextLocalMode::encode(CONST) |
+ ContextLocalInitFlag::encode(kCreatedInitialized) |
+ ContextLocalMaybeAssignedFlag::encode(kNotAssigned);
+ scope_info->set(index++, Smi::FromInt(value));
+
+ DCHECK(index == scope_info->StrongModeFreeVariableNameEntriesIndex());
+ DCHECK(index == scope_info->StrongModeFreeVariablePositionEntriesIndex());
+
+ // And here we record that this scopeinfo binds a receiver.
+ DCHECK(index == scope_info->ReceiverEntryIndex());
+ const int receiver_index = Context::MIN_CONTEXT_SLOTS + 0;
+ scope_info->set(index++, Smi::FromInt(receiver_index));
+
+ DCHECK(index == scope_info->FunctionNameEntryIndex());
+
+ DCHECK_EQ(index, scope_info->length());
+ DCHECK_EQ(scope_info->ParameterCount(), 0);
+ DCHECK_EQ(scope_info->ContextLength(), Context::MIN_CONTEXT_SLOTS + 1);
+
+ return scope_info;
+}
+
+
ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
return reinterpret_cast<ScopeInfo*>(isolate->heap()->empty_fixed_array());
}
@@ -204,22 +329,44 @@ int ScopeInfo::StackSlotCount() {
int ScopeInfo::ContextLength() {
if (length() > 0) {
int context_locals = ContextLocalCount();
+ int context_globals = ContextGlobalCount();
bool function_name_context_slot =
FunctionVariableField::decode(Flags()) == CONTEXT;
- bool has_context = context_locals > 0 || function_name_context_slot ||
+ bool has_context = context_locals > 0 || context_globals > 0 ||
+ function_name_context_slot ||
scope_type() == WITH_SCOPE ||
(scope_type() == ARROW_SCOPE && CallsSloppyEval()) ||
(scope_type() == FUNCTION_SCOPE && CallsSloppyEval()) ||
scope_type() == MODULE_SCOPE;
+
if (has_context) {
- return Context::MIN_CONTEXT_SLOTS + context_locals +
- (function_name_context_slot ? 1 : 0);
+ return Context::MIN_CONTEXT_SLOTS + context_locals + 2 * context_globals +
+ (function_name_context_slot ? 1 : 0);
}
}
return 0;
}
+bool ScopeInfo::HasReceiver() {
+ if (length() > 0) {
+ return NONE != ReceiverVariableField::decode(Flags());
+ } else {
+ return false;
+ }
+}
+
+
+bool ScopeInfo::HasAllocatedReceiver() {
+ if (length() > 0) {
+ VariableAllocationInfo allocation = ReceiverVariableField::decode(Flags());
+ return allocation == STACK || allocation == CONTEXT;
+ } else {
+ return false;
+ }
+}
+
+
bool ScopeInfo::HasFunctionName() {
if (length() > 0) {
return NONE != FunctionVariableField::decode(Flags());
@@ -280,14 +427,14 @@ int ScopeInfo::StackLocalIndex(int var) {
String* ScopeInfo::ContextLocalName(int var) {
- DCHECK(0 <= var && var < ContextLocalCount());
+ DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
int info_index = ContextLocalNameEntriesIndex() + var;
return String::cast(get(info_index));
}
VariableMode ScopeInfo::ContextLocalMode(int var) {
- DCHECK(0 <= var && var < ContextLocalCount());
+ DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
int info_index = ContextLocalInfoEntriesIndex() + var;
int value = Smi::cast(get(info_index))->value();
return ContextLocalMode::decode(value);
@@ -295,7 +442,7 @@ VariableMode ScopeInfo::ContextLocalMode(int var) {
InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
- DCHECK(0 <= var && var < ContextLocalCount());
+ DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
int info_index = ContextLocalInfoEntriesIndex() + var;
int value = Smi::cast(get(info_index))->value();
return ContextLocalInitFlag::decode(value);
@@ -303,7 +450,7 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
- DCHECK(0 <= var && var < ContextLocalCount());
+ DCHECK(0 <= var && var < ContextLocalCount() + ContextGlobalCount());
int info_index = ContextLocalInfoEntriesIndex() + var;
int value = Smi::cast(get(info_index))->value();
return ContextLocalMaybeAssignedFlag::decode(value);
@@ -317,7 +464,8 @@ bool ScopeInfo::LocalIsSynthetic(int var) {
// with user declarations, the current temporaries like .generator_object and
// .result start with a dot, so we can use that as a flag. It's a hack!
Handle<String> name(LocalName(var));
- return name->length() > 0 && name->Get(0) == '.';
+ return (name->length() > 0 && name->Get(0) == '.') ||
+ name->Equals(*GetIsolate()->factory()->this_string());
}
@@ -368,39 +516,56 @@ int ScopeInfo::StackSlotIndex(String* name) {
int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
Handle<String> name, VariableMode* mode,
+ VariableLocation* location,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
DCHECK(name->IsInternalizedString());
DCHECK(mode != NULL);
+ DCHECK(location != NULL);
DCHECK(init_flag != NULL);
if (scope_info->length() > 0) {
ContextSlotCache* context_slot_cache =
scope_info->GetIsolate()->context_slot_cache();
- int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
- maybe_assigned_flag);
+ int result = context_slot_cache->Lookup(*scope_info, *name, mode, location,
+ init_flag, maybe_assigned_flag);
if (result != ContextSlotCache::kNotFound) {
DCHECK(result < scope_info->ContextLength());
return result;
}
+ DCHECK_EQ(scope_info->ContextGlobalNameEntriesIndex(),
+ scope_info->ContextLocalNameEntriesIndex() +
+ scope_info->ContextLocalCount());
int start = scope_info->ContextLocalNameEntriesIndex();
- int end = scope_info->ContextLocalNameEntriesIndex() +
- scope_info->ContextLocalCount();
+ int end = scope_info->ContextGlobalNameEntriesIndex() +
+ scope_info->ContextGlobalCount();
for (int i = start; i < end; ++i) {
if (*name == scope_info->get(i)) {
int var = i - start;
*mode = scope_info->ContextLocalMode(var);
*init_flag = scope_info->ContextLocalInitFlag(var);
*maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
- result = Context::MIN_CONTEXT_SLOTS + var;
- context_slot_cache->Update(scope_info, name, *mode, *init_flag,
- *maybe_assigned_flag, result);
+
+ if (var < scope_info->ContextLocalCount()) {
+ *location = VariableLocation::CONTEXT;
+ result = Context::MIN_CONTEXT_SLOTS + var;
+ } else {
+ var -= scope_info->ContextLocalCount();
+ *location = VariableLocation::GLOBAL;
+ result = Context::MIN_CONTEXT_SLOTS +
+ scope_info->ContextLocalCount() + 2 * var;
+ }
+
+ context_slot_cache->Update(scope_info, name, *mode, *location,
+ *init_flag, *maybe_assigned_flag, result);
DCHECK(result < scope_info->ContextLength());
return result;
}
}
- // Cache as not found. Mode, init flag and maybe assigned flag don't matter.
- context_slot_cache->Update(scope_info, name, INTERNAL, kNeedsInitialization,
+ // Cache as not found. Mode, location, init flag and maybe assigned flag
+ // don't matter.
+ context_slot_cache->Update(scope_info, name, INTERNAL,
+ VariableLocation::CONTEXT, kNeedsInitialization,
kNotAssigned, -1);
}
return -1;
@@ -427,6 +592,13 @@ int ScopeInfo::ParameterIndex(String* name) {
}
+int ScopeInfo::ReceiverContextSlotIndex() {
+ if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT)
+ return Smi::cast(get(ReceiverEntryIndex()))->value();
+ return -1;
+}
+
+
int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
DCHECK(name->IsInternalizedString());
DCHECK(mode != NULL);
@@ -441,22 +613,17 @@ int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
}
-bool ScopeInfo::block_scope_is_class_scope() {
- return BlockScopeIsClassScopeField::decode(Flags());
-}
-
-
FunctionKind ScopeInfo::function_kind() {
return FunctionKindField::decode(Flags());
}
-bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
+void ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
Handle<Context> context,
Handle<JSObject> scope_object) {
Isolate* isolate = scope_info->GetIsolate();
int local_count = scope_info->ContextLocalCount();
- if (local_count == 0) return true;
+ if (local_count == 0) return;
// Fill all context locals to the context extension.
int first_context_var = scope_info->StackLocalCount();
int start = scope_info->ContextLocalNameEntriesIndex();
@@ -466,14 +633,12 @@ bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
Handle<Object> value = Handle<Object>(context->get(context_index), isolate);
// Reflect variables under TDZ as undefined in scope object.
if (value->IsTheHole()) continue;
- RETURN_ON_EXCEPTION_VALUE(
- isolate, Runtime::DefineObjectProperty(
- scope_object,
- Handle<String>(String::cast(scope_info->get(i + start))),
- value, ::NONE),
- false);
+ // This should always succeed.
+ // TODO(verwaest): Use AddDataProperty instead.
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ scope_object, handle(String::cast(scope_info->get(i + start))), value,
+ ::NONE).Check();
}
- return true;
}
@@ -498,28 +663,43 @@ int ScopeInfo::ContextLocalNameEntriesIndex() {
}
-int ScopeInfo::ContextLocalInfoEntriesIndex() {
+int ScopeInfo::ContextGlobalNameEntriesIndex() {
return ContextLocalNameEntriesIndex() + ContextLocalCount();
}
-int ScopeInfo::StrongModeFreeVariableNameEntriesIndex() {
+int ScopeInfo::ContextLocalInfoEntriesIndex() {
+ return ContextGlobalNameEntriesIndex() + ContextGlobalCount();
+}
+
+
+int ScopeInfo::ContextGlobalInfoEntriesIndex() {
return ContextLocalInfoEntriesIndex() + ContextLocalCount();
}
+int ScopeInfo::StrongModeFreeVariableNameEntriesIndex() {
+ return ContextGlobalInfoEntriesIndex() + ContextGlobalCount();
+}
+
+
int ScopeInfo::StrongModeFreeVariablePositionEntriesIndex() {
return StrongModeFreeVariableNameEntriesIndex() +
StrongModeFreeVariableCount();
}
-int ScopeInfo::FunctionNameEntryIndex() {
+int ScopeInfo::ReceiverEntryIndex() {
return StrongModeFreeVariablePositionEntriesIndex() +
2 * StrongModeFreeVariableCount();
}
+int ScopeInfo::FunctionNameEntryIndex() {
+ return ReceiverEntryIndex() + (HasAllocatedReceiver() ? 1 : 0);
+}
+
+
int ContextSlotCache::Hash(Object* data, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
@@ -529,6 +709,7 @@ int ContextSlotCache::Hash(Object* data, String* name) {
int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
+ VariableLocation* location,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
int index = Hash(data, name);
@@ -536,6 +717,7 @@ int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
if ((key.data == data) && key.name->Equals(name)) {
Value result(values_[index]);
if (mode != NULL) *mode = result.mode();
+ if (location != NULL) *location = result.location();
if (init_flag != NULL) *init_flag = result.initialization_flag();
if (maybe_assigned_flag != NULL)
*maybe_assigned_flag = result.maybe_assigned_flag();
@@ -546,7 +728,8 @@ int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
- VariableMode mode, InitializationFlag init_flag,
+ VariableMode mode, VariableLocation location,
+ InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag,
int slot_index) {
DisallowHeapAllocation no_gc;
@@ -559,10 +742,11 @@ void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
key.data = *data;
key.name = *internalized_name;
// Please note value only takes a uint as index.
- values_[index] = Value(mode, init_flag, maybe_assigned_flag,
+ values_[index] = Value(mode, location, init_flag, maybe_assigned_flag,
slot_index - kNotFound).raw();
#ifdef DEBUG
- ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
+ ValidateEntry(data, name, mode, location, init_flag, maybe_assigned_flag,
+ slot_index);
#endif
}
}
@@ -577,6 +761,7 @@ void ContextSlotCache::Clear() {
void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
VariableMode mode,
+ VariableLocation location,
InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag,
int slot_index) {
@@ -590,6 +775,7 @@ void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
DCHECK(key.name->Equals(*name));
Value result(values_[index]);
DCHECK(result.mode() == mode);
+ DCHECK(result.location() == location);
DCHECK(result.initialization_flag() == init_flag);
DCHECK(result.maybe_assigned_flag() == maybe_assigned_flag);
DCHECK(result.index() + kNotFound == slot_index);
@@ -665,4 +851,5 @@ Handle<ModuleInfo> ModuleInfo::Create(Isolate* isolate,
return info;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index 70a17cd7d4..adefaef974 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -21,12 +21,12 @@ class ContextSlotCache {
// Lookup context slot index for (data, name).
// If absent, kNotFound is returned.
int Lookup(Object* data, String* name, VariableMode* mode,
- InitializationFlag* init_flag,
+ VariableLocation* location, InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
// Update an element in the cache.
void Update(Handle<Object> data, Handle<String> name, VariableMode mode,
- InitializationFlag init_flag,
+ VariableLocation location, InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag, int slot_index);
// Clear the cache.
@@ -47,7 +47,8 @@ class ContextSlotCache {
#ifdef DEBUG
void ValidateEntry(Handle<Object> data, Handle<String> name,
- VariableMode mode, InitializationFlag init_flag,
+ VariableMode mode, VariableLocation location,
+ InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag, int slot_index);
#endif
@@ -58,16 +59,26 @@ class ContextSlotCache {
};
struct Value {
- Value(VariableMode mode, InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag, int index) {
+ enum VariableLocationFlag { kContext, kGlobal };
+
+ Value(VariableMode mode, VariableLocation location,
+ InitializationFlag init_flag, MaybeAssignedFlag maybe_assigned_flag,
+ int index) {
+ DCHECK(location == VariableLocation::CONTEXT ||
+ location == VariableLocation::GLOBAL);
+ VariableLocationFlag location_flag =
+ location == VariableLocation::CONTEXT ? kContext : kGlobal;
DCHECK(ModeField::is_valid(mode));
+ DCHECK(VariableLocationField::is_valid(location_flag));
DCHECK(InitField::is_valid(init_flag));
DCHECK(MaybeAssignedField::is_valid(maybe_assigned_flag));
DCHECK(IndexField::is_valid(index));
value_ = ModeField::encode(mode) | IndexField::encode(index) |
+ VariableLocationField::encode(location_flag) |
InitField::encode(init_flag) |
MaybeAssignedField::encode(maybe_assigned_flag);
DCHECK(mode == this->mode());
+ DCHECK(location == this->location());
DCHECK(init_flag == this->initialization_flag());
DCHECK(maybe_assigned_flag == this->maybe_assigned_flag());
DCHECK(index == this->index());
@@ -79,6 +90,17 @@ class ContextSlotCache {
VariableMode mode() { return ModeField::decode(value_); }
+ VariableLocation location() {
+ switch (VariableLocationField::decode(value_)) {
+ case kContext:
+ return VariableLocation::CONTEXT;
+ case kGlobal:
+ return VariableLocation::GLOBAL;
+ }
+ UNREACHABLE();
+ return VariableLocation::CONTEXT;
+ }
+
InitializationFlag initialization_flag() {
return InitField::decode(value_);
}
@@ -94,7 +116,9 @@ class ContextSlotCache {
class ModeField : public BitField<VariableMode, 0, 4> {};
class InitField : public BitField<InitializationFlag, 4, 1> {};
class MaybeAssignedField : public BitField<MaybeAssignedFlag, 5, 1> {};
- class IndexField : public BitField<int, 6, 32 - 6> {};
+ class VariableLocationField : public BitField<VariableLocationFlag, 6, 1> {
+ };
+ class IndexField : public BitField<int, 7, 32 - 7> {};
private:
uint32_t value_;
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 8b623f90ce..f9eef9ab21 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -14,6 +14,9 @@
namespace v8 {
namespace internal {
+// TODO(ishell): remove this once compiler support is landed.
+bool enable_context_globals = false;
+
// ----------------------------------------------------------------------------
// Implementation of LocalsMap
//
@@ -152,20 +155,19 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
outer_scope_ = outer_scope;
scope_type_ = scope_type;
function_kind_ = function_kind;
- block_scope_is_class_scope_ = false;
scope_name_ = ast_value_factory_->empty_string();
- dynamics_ = NULL;
- receiver_ = NULL;
+ dynamics_ = nullptr;
+ receiver_ = nullptr;
new_target_ = nullptr;
- function_ = NULL;
- arguments_ = NULL;
- illegal_redecl_ = NULL;
+ function_ = nullptr;
+ arguments_ = nullptr;
+ this_function_ = nullptr;
+ illegal_redecl_ = nullptr;
scope_inside_with_ = false;
scope_contains_with_ = false;
scope_calls_eval_ = false;
scope_uses_arguments_ = false;
scope_uses_super_property_ = false;
- scope_uses_this_ = false;
asm_module_ = false;
asm_function_ = outer_scope != NULL && outer_scope->asm_module_;
// Inherit the language mode from the parent scope.
@@ -173,14 +175,13 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
outer_scope_calls_sloppy_eval_ = false;
inner_scope_calls_eval_ = false;
inner_scope_uses_arguments_ = false;
- inner_scope_uses_this_ = false;
- inner_scope_uses_super_property_ = false;
force_eager_compilation_ = false;
force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
? outer_scope->has_forced_context_allocation() : false;
num_var_or_const_ = 0;
num_stack_slots_ = 0;
num_heap_slots_ = 0;
+ num_global_slots_ = 0;
num_modules_ = 0;
module_var_ = NULL,
rest_parameter_ = NULL;
@@ -191,7 +192,6 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
if (!scope_info.is_null()) {
scope_calls_eval_ = scope_info->CallsEval();
language_mode_ = scope_info->language_mode();
- block_scope_is_class_scope_ = scope_info->block_scope_is_class_scope();
function_kind_ = scope_info->function_kind();
}
}
@@ -285,11 +285,14 @@ bool Scope::Analyze(ParseInfo* info) {
}
#ifdef DEBUG
- if (info->isolate()->bootstrapper()->IsActive()
- ? FLAG_print_builtin_scopes
- : FLAG_print_scopes) {
- scope->Print();
+ bool native = info->isolate()->bootstrapper()->IsActive();
+ if (!info->shared_info().is_null()) {
+ Object* script = info->shared_info()->script();
+ native = script->IsScript() &&
+ Script::cast(script)->type()->value() == Script::TYPE_NATIVE;
}
+
+ if (native ? FLAG_print_builtin_scopes : FLAG_print_scopes) scope->Print();
#endif
info->set_scope(scope);
@@ -309,44 +312,32 @@ void Scope::Initialize() {
scope_inside_with_ = is_with_scope();
}
- // Declare convenience variables.
- // Declare and allocate receiver (even for the script scope, and even
- // if naccesses_ == 0).
- // NOTE: When loading parameters in the script scope, we must take
- // care not to access them as properties of the global object, but
- // instead load them directly from the stack. Currently, the only
- // such parameter is 'this' which is passed on the stack when
- // invoking scripts
- if (is_declaration_scope()) {
- DCHECK(!subclass_constructor || is_function_scope());
+ // Declare convenience variables and the receiver.
+ if (is_declaration_scope() && has_this_declaration()) {
Variable* var = variables_.Declare(
this, ast_value_factory_->this_string(),
subclass_constructor ? CONST : VAR, Variable::THIS,
subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
- var->AllocateTo(Variable::PARAMETER, -1);
receiver_ = var;
-
- if (subclass_constructor) {
- new_target_ =
- variables_.Declare(this, ast_value_factory_->new_target_string(),
- CONST, Variable::NEW_TARGET, kCreatedInitialized);
- new_target_->AllocateTo(Variable::PARAMETER, -2);
- new_target_->set_is_used();
- }
- } else {
- DCHECK(outer_scope() != NULL);
- receiver_ = outer_scope()->receiver();
}
if (is_function_scope() && !is_arrow_scope()) {
// Declare 'arguments' variable which exists in all non arrow functions.
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
- variables_.Declare(this,
- ast_value_factory_->arguments_string(),
- VAR,
- Variable::ARGUMENTS,
- kCreatedInitialized);
+ variables_.Declare(this, ast_value_factory_->arguments_string(), VAR,
+ Variable::ARGUMENTS, kCreatedInitialized);
+
+ if (subclass_constructor || FLAG_harmony_new_target) {
+ variables_.Declare(this, ast_value_factory_->new_target_string(), CONST,
+ Variable::NORMAL, kCreatedInitialized);
+ }
+
+ if (IsConciseMethod(function_kind_) || IsConstructor(function_kind_) ||
+ IsAccessorFunction(function_kind_)) {
+ variables_.Declare(this, ast_value_factory_->this_function_string(),
+ CONST, Variable::NORMAL, kCreatedInitialized);
+ }
}
}
@@ -380,7 +371,6 @@ Scope* Scope::FinalizeBlockScope() {
// Propagate usage flags to outer scope.
if (uses_arguments()) outer_scope_->RecordArgumentsUsage();
if (uses_super_property()) outer_scope_->RecordSuperPropertyUsage();
- if (uses_this()) outer_scope_->RecordThisUsage();
if (scope_calls_eval_) outer_scope_->RecordEvalCall();
return NULL;
@@ -402,28 +392,40 @@ Variable* Scope::LookupLocal(const AstRawString* name) {
// Check context slot lookup.
VariableMode mode;
- Variable::Location location = Variable::CONTEXT;
+ VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- int index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag);
+ int index =
+ ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode, &location,
+ &init_flag, &maybe_assigned_flag);
if (index < 0) {
// Check parameters.
index = scope_info_->ParameterIndex(*name_handle);
if (index < 0) return NULL;
mode = DYNAMIC;
- location = Variable::LOOKUP;
+ location = VariableLocation::LOOKUP;
init_flag = kCreatedInitialized;
// Be conservative and flag parameters as maybe assigned. Better information
// would require ScopeInfo to serialize the maybe_assigned bit also for
// parameters.
maybe_assigned_flag = kMaybeAssigned;
+ } else {
+ DCHECK(location != VariableLocation::GLOBAL ||
+ (is_script_scope() && IsDeclaredVariableMode(mode) &&
+ !IsLexicalVariableMode(mode)));
+ }
+
+ Variable::Kind kind = Variable::NORMAL;
+ if (location == VariableLocation::CONTEXT &&
+ index == scope_info_->ReceiverContextSlotIndex()) {
+ kind = Variable::THIS;
}
+ // TODO(marja, rossberg): Correctly declare FUNCTION, CLASS, NEW_TARGET, and
+ // ARGUMENTS bindings as their corresponding Variable::Kind.
- // TODO(marja, rossberg): Declare variables of the right Kind.
- Variable* var = variables_.Declare(this, name, mode, Variable::NORMAL,
- init_flag, maybe_assigned_flag);
+ Variable* var = variables_.Declare(this, name, mode, kind, init_flag,
+ maybe_assigned_flag);
var->AllocateTo(location, index);
return var;
}
@@ -444,7 +446,7 @@ Variable* Scope::LookupFunctionVar(const AstRawString* name,
VariableDeclaration* declaration = factory->NewVariableDeclaration(
proxy, mode, this, RelocInfo::kNoPosition);
DeclareFunctionVar(declaration);
- var->AllocateTo(Variable::CONTEXT, index);
+ var->AllocateTo(VariableLocation::CONTEXT, index);
return var;
} else {
return NULL;
@@ -467,15 +469,22 @@ Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode,
bool is_rest, bool* is_duplicate) {
DCHECK(!already_resolved());
DCHECK(is_function_scope());
- Variable* var = variables_.Declare(this, name, mode, Variable::NORMAL,
- kCreatedInitialized);
+
+ Variable* var;
+ if (!name->IsEmpty()) {
+ var = variables_.Declare(this, name, mode, Variable::NORMAL,
+ kCreatedInitialized);
+ // TODO(wingo): Avoid O(n^2) check.
+ *is_duplicate = IsDeclaredParameter(name);
+ } else {
+ var = new (zone())
+ Variable(this, name, TEMPORARY, Variable::NORMAL, kCreatedInitialized);
+ }
if (is_rest) {
DCHECK_NULL(rest_parameter_);
rest_parameter_ = var;
rest_index_ = num_parameters();
}
- // TODO(wingo): Avoid O(n^2) check.
- *is_duplicate = IsDeclaredParameter(name);
params_.Add(var, zone());
return var;
}
@@ -603,9 +612,11 @@ class VarAndOrder {
void Scope::CollectStackAndContextLocals(
ZoneList<Variable*>* stack_locals, ZoneList<Variable*>* context_locals,
+ ZoneList<Variable*>* context_globals,
ZoneList<Variable*>* strong_mode_free_variables) {
DCHECK(stack_locals != NULL);
DCHECK(context_locals != NULL);
+ DCHECK(context_globals != NULL);
// Collect internals which are always allocated on the heap.
for (int i = 0; i < internals_.length(); i++) {
@@ -654,6 +665,8 @@ void Scope::CollectStackAndContextLocals(
stack_locals->Add(var, zone());
} else if (var->IsContextSlot()) {
context_locals->Add(var, zone());
+ } else if (var->IsGlobalSlot()) {
+ context_globals->Add(var, zone());
}
}
}
@@ -693,7 +706,7 @@ bool Scope::HasTrivialContext() const {
for (const Scope* scope = this; scope != NULL; scope = scope->outer_scope_) {
if (scope->is_eval_scope()) return false;
if (scope->scope_inside_with_) return false;
- if (scope->num_heap_slots_ > 0) return false;
+ if (scope->ContextLocalCount() > 0) return false;
}
return true;
}
@@ -709,28 +722,18 @@ bool Scope::HasTrivialOuterContext() const {
}
-bool Scope::HasLazyCompilableOuterContext() const {
- Scope* outer = outer_scope_;
- if (outer == NULL) return true;
- // We have to prevent lazy compilation if this scope is inside a with scope
- // and all declaration scopes between them have empty contexts. Such
- // declaration scopes may become invisible during scope info deserialization.
- outer = outer->DeclarationScope();
- bool found_non_trivial_declarations = false;
- for (const Scope* scope = outer; scope != NULL; scope = scope->outer_scope_) {
- if (scope->is_with_scope() && !found_non_trivial_declarations) return false;
- if (scope->is_block_scope() && !scope->decls_.is_empty()) return false;
- if (scope->is_declaration_scope() && scope->num_heap_slots() > 0) {
- found_non_trivial_declarations = true;
- }
+bool Scope::AllowsLazyParsing() const {
+ // If we are inside a block scope, we must parse eagerly to find out how
+ // to allocate variables on the block scope. At this point, declarations may
+ // not have yet been parsed.
+ for (const Scope* scope = this; scope != NULL; scope = scope->outer_scope_) {
+ if (scope->is_block_scope()) return false;
}
- return true;
+ return AllowsLazyCompilation();
}
-bool Scope::AllowsLazyCompilation() const {
- return !force_eager_compilation_ && HasLazyCompilableOuterContext();
-}
+bool Scope::AllowsLazyCompilation() const { return !force_eager_compilation_; }
bool Scope::AllowsLazyCompilationWithoutContext() const {
@@ -795,7 +798,8 @@ void Scope::GetNestedScopeChain(Isolate* isolate,
void Scope::ReportMessage(int start_position, int end_position,
- const char* message, const AstRawString* arg) {
+ MessageTemplate::Template message,
+ const AstRawString* arg) {
// Propagate the error to the topmost scope targeted by this scope analysis
// phase.
Scope* top = this;
@@ -837,18 +841,21 @@ static void PrintName(const AstRawString* name) {
static void PrintLocation(Variable* var) {
switch (var->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::UNALLOCATED:
break;
- case Variable::PARAMETER:
+ case VariableLocation::PARAMETER:
PrintF("parameter[%d]", var->index());
break;
- case Variable::LOCAL:
+ case VariableLocation::LOCAL:
PrintF("local[%d]", var->index());
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
PrintF("context[%d]", var->index());
break;
- case Variable::LOOKUP:
+ case VariableLocation::GLOBAL:
+ PrintF("global[%d]", var->index());
+ break;
+ case VariableLocation::LOOKUP:
PrintF("lookup");
break;
}
@@ -880,7 +887,11 @@ static void PrintVar(int indent, Variable* var) {
static void PrintMap(int indent, VariableMap* map) {
for (VariableMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
- PrintVar(indent, var);
+ if (var == NULL) {
+ Indent(indent, "<?>\n");
+ } else {
+ PrintVar(indent, var);
+ }
}
}
@@ -930,21 +941,22 @@ void Scope::Print(int n) {
if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
if (scope_uses_super_property_)
Indent(n1, "// scope uses 'super' property\n");
- if (scope_uses_this_) Indent(n1, "// scope uses 'this'\n");
if (inner_scope_uses_arguments_) {
Indent(n1, "// inner scope uses 'arguments'\n");
}
- if (inner_scope_uses_super_property_)
- Indent(n1, "// inner scope uses 'super' property\n");
- if (inner_scope_uses_this_) Indent(n1, "// inner scope uses 'this'\n");
if (outer_scope_calls_sloppy_eval_) {
Indent(n1, "// outer scope calls 'eval' in sloppy context\n");
}
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
- if (num_stack_slots_ > 0) { Indent(n1, "// ");
- PrintF("%d stack slots\n", num_stack_slots_); }
- if (num_heap_slots_ > 0) { Indent(n1, "// ");
- PrintF("%d heap slots\n", num_heap_slots_); }
+ if (num_stack_slots_ > 0) {
+ Indent(n1, "// ");
+ PrintF("%d stack slots\n", num_stack_slots_);
+ }
+ if (num_heap_slots_ > 0) {
+ Indent(n1, "// ");
+ PrintF("%d heap slots (including %d global slots)\n", num_heap_slots_,
+ num_global_slots_);
+ }
// Print locals.
if (function_ != NULL) {
@@ -1005,7 +1017,7 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
Variable::NORMAL,
init_flag);
// Allocate it by giving it a dynamic lookup.
- var->AllocateTo(Variable::LOOKUP, -1);
+ var->AllocateTo(VariableLocation::LOOKUP, -1);
}
return var;
}
@@ -1049,7 +1061,11 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
DCHECK(is_script_scope());
}
- if (is_with_scope()) {
+ // "this" can't be shadowed by "eval"-introduced bindings or by "with" scopes.
+ // TODO(wingo): There are other variables in this category; add them.
+ bool name_can_be_shadowed = var == nullptr || !var->is_this();
+
+ if (is_with_scope() && name_can_be_shadowed) {
DCHECK(!already_resolved());
// The current scope is a with scope, so the variable binding can not be
// statically resolved. However, note that it was necessary to do a lookup
@@ -1060,7 +1076,7 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
if (var != NULL && proxy->is_assigned()) var->set_maybe_assigned();
*binding_kind = DYNAMIC_LOOKUP;
return NULL;
- } else if (calls_sloppy_eval()) {
+ } else if (calls_sloppy_eval() && name_can_be_shadowed) {
// A variable binding may have been found in an outer scope, but the current
// scope makes a sloppy 'eval' call, so the found variable may not be
// the correct one (the 'eval' may introduce a binding with the same name).
@@ -1219,7 +1235,8 @@ bool Scope::CheckStrongModeDeclaration(VariableProxy* proxy, Variable* var) {
eval_for_use == eval_for_declaration) {
DCHECK(proxy->end_position() != RelocInfo::kNoPosition);
ReportMessage(proxy->position(), proxy->end_position(),
- "strong_use_before_declaration", proxy->raw_name());
+ MessageTemplate::kStrongUseBeforeDeclaration,
+ proxy->raw_name());
return false;
}
return true;
@@ -1238,7 +1255,6 @@ ClassVariable* Scope::ClassVariableForMethod() const {
return nullptr;
}
DCHECK_NOT_NULL(outer_scope_);
- DCHECK(outer_scope_->is_class_scope());
// The class scope contains at most one variable, the class name.
DCHECK(outer_scope_->variables_.occupancy() <= 1);
if (outer_scope_->variables_.occupancy() == 0) return nullptr;
@@ -1288,13 +1304,6 @@ void Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
if (inner->scope_uses_arguments_ || inner->inner_scope_uses_arguments_) {
inner_scope_uses_arguments_ = true;
}
- if (inner->scope_uses_super_property_ ||
- inner->inner_scope_uses_super_property_) {
- inner_scope_uses_super_property_ = true;
- }
- if (inner->scope_uses_this_ || inner->inner_scope_uses_this_) {
- inner_scope_uses_this_ = true;
- }
}
if (inner->force_eager_compilation_) {
force_eager_compilation_ = true;
@@ -1310,7 +1319,7 @@ bool Scope::MustAllocate(Variable* var) {
// Give var a read/write use if there is a chance it might be accessed
// via an eval() call. This is only possible if the variable has a
// visible name.
- if ((var->is_this() || var->is_new_target() || !var->raw_name()->IsEmpty()) &&
+ if ((var->is_this() || !var->raw_name()->IsEmpty()) &&
(var->has_forced_context_allocation() || scope_calls_eval_ ||
inner_scope_calls_eval_ || scope_contains_with_ || is_catch_scope() ||
is_block_scope() || is_module_scope() || is_script_scope())) {
@@ -1359,13 +1368,13 @@ void Scope::AllocateStackSlot(Variable* var) {
if (is_block_scope()) {
DeclarationScope()->AllocateStackSlot(var);
} else {
- var->AllocateTo(Variable::LOCAL, num_stack_slots_++);
+ var->AllocateTo(VariableLocation::LOCAL, num_stack_slots_++);
}
}
void Scope::AllocateHeapSlot(Variable* var) {
- var->AllocateTo(Variable::CONTEXT, num_heap_slots_++);
+ var->AllocateTo(VariableLocation::CONTEXT, num_heap_slots_++);
}
@@ -1415,24 +1424,42 @@ void Scope::AllocateParameterLocals(Isolate* isolate) {
// Force context allocation of the parameter.
var->ForceContextAllocation();
}
+ AllocateParameter(var, i);
+ }
+}
- if (MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- DCHECK(var->IsUnallocated() || var->IsContextSlot());
- if (var->IsUnallocated()) {
- AllocateHeapSlot(var);
- }
- } else {
- DCHECK(var->IsUnallocated() || var->IsParameter());
- if (var->IsUnallocated()) {
- var->AllocateTo(Variable::PARAMETER, i);
- }
+
+void Scope::AllocateParameter(Variable* var, int index) {
+ if (MustAllocate(var)) {
+ if (MustAllocateInContext(var)) {
+ DCHECK(var->IsUnallocated() || var->IsContextSlot());
+ if (var->IsUnallocated()) {
+ AllocateHeapSlot(var);
+ }
+ } else {
+ DCHECK(var->IsUnallocated() || var->IsParameter());
+ if (var->IsUnallocated()) {
+ var->AllocateTo(VariableLocation::PARAMETER, index);
}
}
+ } else {
+ DCHECK(!var->IsGlobalSlot());
}
}
+void Scope::AllocateReceiver() {
+ DCHECK_NOT_NULL(receiver());
+ DCHECK_EQ(receiver()->scope(), this);
+
+ if (has_forced_context_allocation()) {
+ // Force context allocation of the receiver.
+ receiver()->ForceContextAllocation();
+ }
+ AllocateParameter(receiver(), -1);
+}
+
+
void Scope::AllocateNonParameterLocal(Isolate* isolate, Variable* var) {
DCHECK(var->scope() == this);
DCHECK(!var->IsVariable(isolate->factory()->dot_result_string()) ||
@@ -1447,7 +1474,23 @@ void Scope::AllocateNonParameterLocal(Isolate* isolate, Variable* var) {
}
-void Scope::AllocateNonParameterLocals(Isolate* isolate) {
+void Scope::AllocateDeclaredGlobal(Isolate* isolate, Variable* var) {
+ DCHECK(var->scope() == this);
+ DCHECK(!var->IsVariable(isolate->factory()->dot_result_string()) ||
+ !var->IsStackLocal());
+ if (var->IsUnallocated() && var->IsStaticGlobalObjectProperty()) {
+ DCHECK_EQ(-1, var->index());
+ DCHECK(var->name()->IsString());
+ var->AllocateTo(VariableLocation::GLOBAL, num_heap_slots_);
+ num_global_slots_++;
+ // Each global variable occupies two slots in the context: for reads
+ // and writes.
+ num_heap_slots_ += 2;
+ }
+}
+
+
+void Scope::AllocateNonParameterLocalsAndDeclaredGlobals(Isolate* isolate) {
// All variables that have no rewrite yet are non-parameter locals.
for (int i = 0; i < temps_.length(); i++) {
AllocateNonParameterLocal(isolate, temps_[i]);
@@ -1470,17 +1513,35 @@ void Scope::AllocateNonParameterLocals(Isolate* isolate) {
AllocateNonParameterLocal(isolate, vars[i].var());
}
+ if (enable_context_globals) {
+ for (int i = 0; i < var_count; i++) {
+ AllocateDeclaredGlobal(isolate, vars[i].var());
+ }
+ }
+
// For now, function_ must be allocated at the very end. If it gets
// allocated in the context, it must be the last slot in the context,
// because of the current ScopeInfo implementation (see
// ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
- if (function_ != NULL) {
+ if (function_ != nullptr) {
AllocateNonParameterLocal(isolate, function_->proxy()->var());
}
- if (rest_parameter_) {
+ if (rest_parameter_ != nullptr) {
AllocateNonParameterLocal(isolate, rest_parameter_);
}
+
+ Variable* new_target_var =
+ LookupLocal(ast_value_factory_->new_target_string());
+ if (new_target_var != nullptr && MustAllocate(new_target_var)) {
+ new_target_ = new_target_var;
+ }
+
+ Variable* this_function_var =
+ LookupLocal(ast_value_factory_->this_function_string());
+ if (this_function_var != nullptr && MustAllocate(this_function_var)) {
+ this_function_ = this_function_var;
+ }
}
@@ -1502,7 +1563,8 @@ void Scope::AllocateVariablesRecursively(Isolate* isolate) {
// Allocate variables for this scope.
// Parameters must be allocated first, if any.
if (is_function_scope()) AllocateParameterLocals(isolate);
- AllocateNonParameterLocals(isolate);
+ if (has_this_declaration()) AllocateReceiver();
+ AllocateNonParameterLocalsAndDeclaredGlobals(isolate);
// Force allocation of a context for this scope if necessary. For a 'with'
// scope and for a function scope that makes an 'eval' call we need a context,
@@ -1546,7 +1608,13 @@ int Scope::StackLocalCount() const {
int Scope::ContextLocalCount() const {
if (num_heap_slots() == 0) return 0;
+ bool is_function_var_in_context =
+ function_ != NULL && function_->proxy()->var()->IsContextSlot();
return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
- (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
+ 2 * num_global_slots() - (is_function_var_in_context ? 1 : 0);
}
-} } // namespace v8::internal
+
+
+int Scope::ContextGlobalCount() const { return num_global_slots(); }
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 5e3dc1f065..40863800e4 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -144,14 +144,15 @@ class Scope: public ZoneObject {
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
+ Variable::Kind kind = Variable::NORMAL,
int start_position = RelocInfo::kNoPosition,
int end_position = RelocInfo::kNoPosition) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
DCHECK(!already_resolved());
- VariableProxy* proxy = factory->NewVariableProxy(
- name, Variable::NORMAL, start_position, end_position);
+ VariableProxy* proxy =
+ factory->NewVariableProxy(name, kind, start_position, end_position);
unresolved_.Add(proxy, zone_);
return proxy;
}
@@ -217,9 +218,6 @@ class Scope: public ZoneObject {
// Inform the scope that the corresponding code uses "super".
void RecordSuperPropertyUsage() { scope_uses_super_property_ = true; }
- // Inform the scope that the corresponding code uses "this".
- void RecordThisUsage() { scope_uses_this_ = true; }
-
// Set the language mode flag (unless disabled by a global flag).
void SetLanguageMode(LanguageMode language_mode) {
language_mode_ = language_mode;
@@ -282,13 +280,6 @@ class Scope: public ZoneObject {
bool is_block_scope() const { return scope_type_ == BLOCK_SCOPE; }
bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
bool is_arrow_scope() const { return scope_type_ == ARROW_SCOPE; }
- void tag_as_class_scope() {
- DCHECK(is_block_scope());
- block_scope_is_class_scope_ = true;
- }
- bool is_class_scope() const {
- return is_block_scope() && block_scope_is_class_scope_;
- }
bool is_declaration_scope() const {
return is_eval_scope() || is_function_scope() ||
is_module_scope() || is_script_scope();
@@ -319,14 +310,13 @@ class Scope: public ZoneObject {
bool inner_uses_arguments() const { return inner_scope_uses_arguments_; }
// Does this scope access "super" property (super.foo).
bool uses_super_property() const { return scope_uses_super_property_; }
- // Does any inner scope access "super" property.
- bool inner_uses_super_property() const {
- return inner_scope_uses_super_property_;
+
+ bool NeedsHomeObject() const {
+ return scope_uses_super_property_ ||
+ (scope_calls_eval_ && (IsConciseMethod(function_kind()) ||
+ IsAccessorFunction(function_kind()) ||
+ IsConstructor(function_kind())));
}
- // Does this scope access "this".
- bool uses_this() const { return scope_uses_this_; }
- // Does any inner scope access "this".
- bool inner_uses_this() const { return inner_scope_uses_this_; }
const Scope* NearestOuterEvalScope() const {
if (is_eval_scope()) return this;
@@ -346,7 +336,20 @@ class Scope: public ZoneObject {
LanguageMode language_mode() const { return language_mode_; }
// The variable corresponding to the 'this' value.
- Variable* receiver() { return receiver_; }
+ Variable* receiver() {
+ DCHECK(has_this_declaration());
+ DCHECK_NOT_NULL(receiver_);
+ return receiver_;
+ }
+
+ Variable* LookupThis() { return Lookup(ast_value_factory_->this_string()); }
+
+ // TODO(wingo): Add a GLOBAL_SCOPE scope type which will lexically allocate
+ // "this" (and no other variable) on the native context. Script scopes then
+ // will not have a "this" declaration.
+ bool has_this_declaration() const {
+ return (is_function_scope() && !is_arrow_scope()) || is_module_scope();
+ }
// The variable corresponding to the 'new.target' value.
Variable* new_target_var() { return new_target_; }
@@ -401,6 +404,15 @@ class Scope: public ZoneObject {
return arguments_;
}
+ Variable* this_function_var() const {
+ // This is only used in derived constructors atm.
+ DCHECK(this_function_ == nullptr ||
+ (is_function_scope() && (IsConstructor(function_kind()) ||
+ IsConciseMethod(function_kind()) ||
+ IsAccessorFunction(function_kind()))));
+ return this_function_;
+ }
+
// Declarations list.
ZoneList<Declaration*>* declarations() { return &decls_; }
@@ -430,6 +442,7 @@ class Scope: public ZoneObject {
// handled separately.
void CollectStackAndContextLocals(
ZoneList<Variable*>* stack_locals, ZoneList<Variable*>* context_locals,
+ ZoneList<Variable*>* context_globals,
ZoneList<Variable*>* strong_mode_free_variables = nullptr);
// Current number of var or const locals.
@@ -438,9 +451,11 @@ class Scope: public ZoneObject {
// Result of variable allocation.
int num_stack_slots() const { return num_stack_slots_; }
int num_heap_slots() const { return num_heap_slots_; }
+ int num_global_slots() const { return num_global_slots_; }
int StackLocalCount() const;
int ContextLocalCount() const;
+ int ContextGlobalCount() const;
// For script scopes, the number of module literals (including nested ones).
int num_modules() const { return num_modules_; }
@@ -451,6 +466,9 @@ class Scope: public ZoneObject {
// Make sure this scope and all outer scopes are eagerly compiled.
void ForceEagerCompilation() { force_eager_compilation_ = true; }
+ // Determine if we can parse a function literal in this scope lazily.
+ bool AllowsLazyParsing() const;
+
// Determine if we can use lazy compilation for this scope.
bool AllowsLazyCompilation() const;
@@ -460,9 +478,6 @@ class Scope: public ZoneObject {
// True if the outer context of this scope is always the native context.
bool HasTrivialOuterContext() const;
- // True if the outer context allows lazy compilation of this scope.
- bool HasLazyCompilableOuterContext() const;
-
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
@@ -503,7 +518,8 @@ class Scope: public ZoneObject {
}
// Error handling.
- void ReportMessage(int start_position, int end_position, const char* message,
+ void ReportMessage(int start_position, int end_position,
+ MessageTemplate::Template message,
const AstRawString* arg);
// ---------------------------------------------------------------------------
@@ -524,8 +540,6 @@ class Scope: public ZoneObject {
// The scope type.
ScopeType scope_type_;
- // Some block scopes are tagged as class scopes.
- bool block_scope_is_class_scope_;
// If the scope is a function scope, this is the function kind.
FunctionKind function_kind_;
@@ -558,6 +572,8 @@ class Scope: public ZoneObject {
Variable* new_target_;
// Convenience variable; function scopes only.
Variable* arguments_;
+ // Convenience variable; Subclass constructor only
+ Variable* this_function_;
// Module descriptor; module scopes only.
ModuleDescriptor* module_descriptor_;
@@ -577,8 +593,6 @@ class Scope: public ZoneObject {
bool scope_uses_arguments_;
// This scope uses "super" property ('super.foo').
bool scope_uses_super_property_;
- // This scope uses "this".
- bool scope_uses_this_;
// This scope contains an "use asm" annotation.
bool asm_module_;
// This scope's outer context is an asm module.
@@ -593,8 +607,6 @@ class Scope: public ZoneObject {
bool outer_scope_calls_sloppy_eval_;
bool inner_scope_calls_eval_;
bool inner_scope_uses_arguments_;
- bool inner_scope_uses_super_property_;
- bool inner_scope_uses_this_;
bool force_eager_compilation_;
bool force_context_allocation_;
@@ -608,6 +620,7 @@ class Scope: public ZoneObject {
// Computed via AllocateVariables; function, block and catch scopes only.
int num_stack_slots_;
int num_heap_slots_;
+ int num_global_slots_;
// The number of modules (including nested ones).
int num_modules_;
@@ -704,8 +717,11 @@ class Scope: public ZoneObject {
void AllocateHeapSlot(Variable* var);
void AllocateParameterLocals(Isolate* isolate);
void AllocateNonParameterLocal(Isolate* isolate, Variable* var);
- void AllocateNonParameterLocals(Isolate* isolate);
+ void AllocateDeclaredGlobal(Isolate* isolate, Variable* var);
+ void AllocateNonParameterLocalsAndDeclaredGlobals(Isolate* isolate);
void AllocateVariablesRecursively(Isolate* isolate);
+ void AllocateParameter(Variable* var, int index);
+ void AllocateReceiver();
void AllocateModules();
// Resolve and fill in the allocation information for all variables
diff --git a/deps/v8/src/snapshot/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index e02a24c1d3..3649c2a69f 100644
--- a/deps/v8/src/snapshot/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -232,6 +232,5 @@ Vector<const char> NativesCollection<type>::GetScriptsSource() {
template class NativesCollection<CORE>;
template class NativesCollection<EXPERIMENTAL>;
template class NativesCollection<EXTRAS>;
-
-} // namespace v8::internal
+} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/serialize.cc b/deps/v8/src/snapshot/serialize.cc
index 7f123a3fbe..7588fbcf90 100644
--- a/deps/v8/src/snapshot/serialize.cc
+++ b/deps/v8/src/snapshot/serialize.cc
@@ -772,6 +772,8 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
new_code_objects_.Add(Code::cast(obj));
}
}
+ // Check alignment.
+ DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
return obj;
}
@@ -799,8 +801,14 @@ HeapObject* Deserializer::GetBackReferencedObject(int space) {
uint32_t chunk_index = back_reference.chunk_index();
DCHECK_LE(chunk_index, current_chunk_[space]);
uint32_t chunk_offset = back_reference.chunk_offset();
- obj = HeapObject::FromAddress(reservations_[space][chunk_index].start +
- chunk_offset);
+ Address address = reservations_[space][chunk_index].start + chunk_offset;
+ if (next_alignment_ != kWordAligned) {
+ int padding = Heap::GetFillToAlign(address, next_alignment_);
+ next_alignment_ = kWordAligned;
+ DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
+ address += padding;
+ }
+ obj = HeapObject::FromAddress(address);
}
if (deserializing_user_code() && obj->IsInternalizedString()) {
obj = String::cast(obj)->GetForwardedInternalizedString();
@@ -818,12 +826,26 @@ HeapObject* Deserializer::GetBackReferencedObject(int space) {
void Deserializer::ReadObject(int space_number, Object** write_back) {
Address address;
HeapObject* obj;
- int next_int = source_.GetInt();
-
- DCHECK_NE(kDoubleAlignmentSentinel, next_int);
- int size = next_int << kObjectAlignmentBits;
- address = Allocate(space_number, size);
- obj = HeapObject::FromAddress(address);
+ int size = source_.GetInt() << kObjectAlignmentBits;
+
+ if (next_alignment_ != kWordAligned) {
+ int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
+ address = Allocate(space_number, reserved);
+ obj = HeapObject::FromAddress(address);
+ // If one of the following assertions fails, then we are deserializing an
+ // aligned object when the filler maps have not been deserialized yet.
+ // We require filler maps as padding to align the object.
+ Heap* heap = isolate_->heap();
+ DCHECK(heap->free_space_map()->IsMap());
+ DCHECK(heap->one_pointer_filler_map()->IsMap());
+ DCHECK(heap->two_pointer_filler_map()->IsMap());
+ obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
+ address = obj->address();
+ next_alignment_ = kWordAligned;
+ } else {
+ address = Allocate(space_number, size);
+ obj = HeapObject::FromAddress(address);
+ }
isolate_->heap()->OnAllocationEvent(obj, size);
Object** current = reinterpret_cast<Object**>(address);
@@ -1017,14 +1039,17 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
FOUR_CASES(byte_code + 8) \
FOUR_CASES(byte_code + 12)
+#define SINGLE_CASE(where, how, within, space) \
+ CASE_STATEMENT(where, how, within, space) \
+ CASE_BODY(where, how, within, space)
+
// Deserialize a new object and write a pointer to it to the current
// object.
ALL_SPACES(kNewObject, kPlain, kStartOfObject)
// Support for direct instruction pointers in functions. It's an inner
// pointer because it points at the entry point, not at the start of the
// code object.
- CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
- CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
+ SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
// Deserialize a new code object and write a pointer to its first
// instruction to the current code object.
ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
@@ -1033,15 +1058,15 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC) || V8_OOL_CONSTANT_POOL
+ defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL
// Deserialize a new object from pointer found in code and write
// a pointer to it to the current object. Required only for MIPS, PPC or
- // ARM with ool constant pool, and omitted on the other architectures
+ // ARM with embedded constant pool, and omitted on the other architectures
// because it is fully unrolled and would cause bloat.
ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
- // object. Required only for MIPS, PPC or ARM with ool constant pool.
+ // object. Required only for MIPS, PPC or ARM with embedded constant pool.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
@@ -1055,45 +1080,33 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
// Find an object in the roots array and write a pointer to it to the
// current object.
- CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
- CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
-#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
- defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC)
+ SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL
// Find an object in the roots array and write a pointer to it to in code.
- CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
- CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)
+ SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
#endif
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
- CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
- CASE_BODY(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
+ SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
// Find an code entry in the partial snapshots cache and
// write a pointer to it to the current object.
- CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
- CASE_BODY(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
+ SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
// Find an external reference and write a pointer to it to the current
// object.
- CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
- CASE_BODY(kExternalReference, kPlain, kStartOfObject, 0)
+ SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
// Find an external reference and write a pointer to it in the current
// code object.
- CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
- CASE_BODY(kExternalReference, kFromCode, kStartOfObject, 0)
+ SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0)
// Find an object in the attached references and write a pointer to it to
// the current object.
- CASE_STATEMENT(kAttachedReference, kPlain, kStartOfObject, 0)
- CASE_BODY(kAttachedReference, kPlain, kStartOfObject, 0)
- CASE_STATEMENT(kAttachedReference, kPlain, kInnerPointer, 0)
- CASE_BODY(kAttachedReference, kPlain, kInnerPointer, 0)
- CASE_STATEMENT(kAttachedReference, kFromCode, kInnerPointer, 0)
- CASE_BODY(kAttachedReference, kFromCode, kInnerPointer, 0)
+ SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
+ SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0)
+ SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
// Find a builtin and write a pointer to it to the current object.
- CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0)
- CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0)
- CASE_STATEMENT(kBuiltin, kPlain, kInnerPointer, 0)
- CASE_BODY(kBuiltin, kPlain, kInnerPointer, 0)
- CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0)
- CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0)
+ SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
+ SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0)
+ SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
#undef CASE_STATEMENT
#undef CASE_BODY
@@ -1188,6 +1201,15 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
break;
}
+ case kAlignmentPrefix:
+ case kAlignmentPrefix + 1:
+ case kAlignmentPrefix + 2: {
+ DCHECK_EQ(kWordAligned, next_alignment_);
+ next_alignment_ =
+ static_cast<AllocationAlignment>(data - (kAlignmentPrefix - 1));
+ break;
+ }
+
STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
SIXTEEN_CASES(kRootArrayConstantsWithSkip)
@@ -1254,6 +1276,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
#undef SIXTEEN_CASES
#undef FOUR_CASES
+#undef SINGLE_CASE
default:
CHECK(false);
@@ -1581,6 +1604,7 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
PrintF("\n");
}
+ PutAlignmentPrefix(obj);
AllocationSpace space = back_reference.space();
if (skip == 0) {
sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
@@ -1674,6 +1698,18 @@ void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
}
+int Serializer::PutAlignmentPrefix(HeapObject* object) {
+ AllocationAlignment alignment = object->RequiredAlignment();
+ if (alignment != kWordAligned) {
+ DCHECK(1 <= alignment && alignment <= 3);
+ byte prefix = (kAlignmentPrefix - 1) + alignment;
+ sink_->Put(prefix, "Alignment");
+ return Heap::GetMaximumFillToAlign(alignment);
+ }
+ return 0;
+}
+
+
void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (obj->IsMap()) {
@@ -1755,11 +1791,10 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
}
back_reference = serializer_->AllocateLargeObject(size);
} else {
- back_reference = serializer_->Allocate(space, size);
+ int fill = serializer_->PutAlignmentPrefix(object_);
+ back_reference = serializer_->Allocate(space, size + fill);
sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
- int encoded_size = size >> kObjectAlignmentBits;
- DCHECK_NE(kDoubleAlignmentSentinel, encoded_size);
- sink_->PutInt(encoded_size, "ObjectSizeInWords");
+ sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
}
#ifdef OBJECT_PRINT
@@ -1838,6 +1873,28 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
}
+// Clear and later restore the next link in the weak cell, if the object is one.
+class UnlinkWeakCellScope {
+ public:
+ explicit UnlinkWeakCellScope(HeapObject* object) : weak_cell_(NULL) {
+ if (object->IsWeakCell()) {
+ weak_cell_ = WeakCell::cast(object);
+ next_ = weak_cell_->next();
+ weak_cell_->clear_next(object->GetHeap());
+ }
+ }
+
+ ~UnlinkWeakCellScope() {
+ if (weak_cell_) weak_cell_->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
+ }
+
+ private:
+ WeakCell* weak_cell_;
+ Object* next_;
+ DisallowHeapAllocation no_gc_;
+};
+
+
void Serializer::ObjectSerializer::Serialize() {
if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: ");
@@ -1854,7 +1911,23 @@ void Serializer::ObjectSerializer::Serialize() {
if (object_->IsPrototypeInfo()) {
Object* prototype_users = PrototypeInfo::cast(object_)->prototype_users();
if (prototype_users->IsWeakFixedArray()) {
- WeakFixedArray::cast(prototype_users)->Compact();
+ WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
+ array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
+ }
+ }
+ // Compaction of a prototype users list can require the registered users
+ // to update their remembered slots. That doesn't work if those users
+ // have already been serialized themselves. So if this object is a
+ // registered user, compact its prototype's user list now.
+ if (object_->IsMap()) {
+ Map* map = Map::cast(object_);
+ if (map->is_prototype_map() && map->prototype_info()->IsPrototypeInfo() &&
+ PrototypeInfo::cast(map->prototype_info())->registry_slot() !=
+ PrototypeInfo::UNREGISTERED) {
+ JSObject* proto = JSObject::cast(map->prototype());
+ PrototypeInfo* info = PrototypeInfo::cast(proto->map()->prototype_info());
+ WeakFixedArray* array = WeakFixedArray::cast(info->prototype_users());
+ array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
}
}
@@ -1862,6 +1935,11 @@ void Serializer::ObjectSerializer::Serialize() {
// Clear cached line ends.
Object* undefined = serializer_->isolate()->heap()->undefined_value();
Script::cast(object_)->set_line_ends(undefined);
+ Object* shared_list = Script::cast(object_)->shared_function_infos();
+ if (shared_list->IsWeakFixedArray()) {
+ WeakFixedArray::cast(shared_list)
+ ->Compact<WeakFixedArray::NullCallback>();
+ }
}
if (object_->IsExternalString()) {
@@ -1897,6 +1975,8 @@ void Serializer::ObjectSerializer::Serialize() {
return;
}
+ UnlinkWeakCellScope unlink_weak_cell(object_);
+
object_->IterateBody(map->instance_type(), size, this);
OutputRawData(object_->address() + size);
}
@@ -1921,6 +2001,8 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
serializer_->PutBackReference(object_, reference);
sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
+ UnlinkWeakCellScope unlink_weak_cell(object_);
+
object_->IterateBody(map->instance_type(), size, this);
OutputRawData(object_->address() + size);
}
@@ -1967,9 +2049,6 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
- // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
- if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
-
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
@@ -2041,9 +2120,6 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
- // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
- if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
-
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -2061,9 +2137,6 @@ void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
- // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
- if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
-
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
Cell* object = Cell::cast(rinfo->target_cell());
serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
@@ -2111,9 +2184,7 @@ Address Serializer::ObjectSerializer::PrepareCode() {
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
- if (!(FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool())) {
- rinfo->WipeOut();
- }
+ rinfo->WipeOut();
}
// We need to wipe out the header fields *after* wiping out the
// relocations, because some of these fields are needed for the latter.
@@ -2156,10 +2227,6 @@ int Serializer::ObjectSerializer::OutputRawData(
if (is_code_object_) object_start = PrepareCode();
const char* description = is_code_object_ ? "Code" : "Byte";
-#ifdef MEMORY_SANITIZER
- // Object sizes are usually rounded up with uninitialized padding space.
- MSAN_MEMORY_IS_INITIALIZED(object_start + base, bytes_to_output);
-#endif // MEMORY_SANITIZER
sink_->PutRaw(object_start + base, bytes_to_output, description);
}
if (to_skip != 0 && return_skip == kIgnoringReturn) {
@@ -2297,7 +2364,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
DCHECK(code_object->has_reloc_info_for_serialization());
// Only serialize the code for the toplevel function unless specified
// by flag. Replace code of inner functions by the lazy compile builtin.
- // This is safe, as checked in Compiler::BuildFunctionInfo.
+ // This is safe, as checked in Compiler::GetSharedFunctionInfo.
if (code_object != main_code_ && !FLAG_serialize_inner) {
SerializeBuiltin(Builtins::kCompileLazy, how_to_code, where_to_point);
} else {
@@ -2540,6 +2607,11 @@ Vector<const byte> SnapshotData::Payload() const {
class Checksum {
public:
explicit Checksum(Vector<const byte> payload) {
+#ifdef MEMORY_SANITIZER
+ // Computing the checksum includes padding bytes for objects like strings.
+ // Mark every object as initialized in the code serializer.
+ MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
+#endif // MEMORY_SANITIZER
// Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
uintptr_t a = 1;
uintptr_t b = 0;
@@ -2625,13 +2697,13 @@ SerializedCodeData::SerializedCodeData(const List<byte>& payload,
SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
Isolate* isolate, String* source) const {
uint32_t magic_number = GetMagicNumber();
+ if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
uint32_t c1 = GetHeaderValue(kChecksum1Offset);
uint32_t c2 = GetHeaderValue(kChecksum2Offset);
- if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
if (version_hash != Version::Hash()) return VERSION_MISMATCH;
if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
@@ -2699,4 +2771,5 @@ SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate,
delete scd;
return NULL;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/serialize.h b/deps/v8/src/snapshot/serialize.h
index 67ce69ab94..001d775392 100644
--- a/deps/v8/src/snapshot/serialize.h
+++ b/deps/v8/src/snapshot/serialize.h
@@ -324,12 +324,14 @@ class SerializerDeserializer: public ObjectVisitor {
// 0x07 Unused (including 0x27, 0x47, 0x67).
// 0x08..0x0c Reference to previous object from space.
kBackref = 0x08,
+ // 0x0d Unused (including 0x2d, 0x4d, 0x6d).
// 0x0e Unused (including 0x2e, 0x4e, 0x6e).
// 0x0f Unused (including 0x2f, 0x4f, 0x6f).
// 0x10..0x14 Reference to previous object from space after skip.
kBackrefWithSkip = 0x10,
+ // 0x15 Unused (including 0x35, 0x55, 0x75).
// 0x16 Unused (including 0x36, 0x56, 0x76).
- // 0x17 Unused (including 0x37, 0x57, 0x77).
+ // 0x17 Misc (including 0x37, 0x57, 0x77).
// 0x18 Root array item.
kRootArray = 0x18,
// 0x19 Object in the partial snapshot cache.
@@ -384,14 +386,18 @@ class SerializerDeserializer: public ObjectVisitor {
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
- static const int kSynchronize = 0x5d;
+ static const int kSynchronize = 0x17;
// Used for the source code of the natives, which is in the executable, but
// is referred to from external strings in the snapshot.
- static const int kNativesStringResource = 0x5e;
+ static const int kNativesStringResource = 0x37;
// Raw data of variable length.
- static const int kVariableRawData = 0x7d;
+ static const int kVariableRawData = 0x57;
// Repeats of variable length.
- static const int kVariableRepeat = 0x7e;
+ static const int kVariableRepeat = 0x77;
+ // Alignment prefixes 0x7d..0x7f
+ static const int kAlignmentPrefix = 0x7d;
+
+ // 0x5d..0x5f unused
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
@@ -515,7 +521,8 @@ class Deserializer: public SerializerDeserializer {
magic_number_(data->GetMagicNumber()),
external_reference_table_(NULL),
deserialized_large_objects_(0),
- deserializing_user_code_(false) {
+ deserializing_user_code_(false),
+ next_alignment_(kWordAligned) {
DecodeReservation(data->Reservations());
}
@@ -605,6 +612,8 @@ class Deserializer: public SerializerDeserializer {
bool deserializing_user_code_;
+ AllocationAlignment next_alignment_;
+
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
@@ -709,6 +718,9 @@ class Serializer : public SerializerDeserializer {
void PutBackReference(HeapObject* object, BackReference reference);
+ // Emit alignment prefix if necessary, return required padding space in bytes.
+ int PutAlignmentPrefix(HeapObject* object);
+
// Returns true if the object was successfully serialized.
bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip);
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 0e07cc149d..ab8a88486e 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -77,8 +77,9 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
if (!maybe_context.ToHandle(&result)) return MaybeHandle<Context>();
CHECK(result->IsContext());
// If the snapshot does not contain a custom script, we need to update
- // the global object for exactly one context.
- CHECK(EmbedsScript(isolate) || (*outdated_contexts_out)->length() == 1);
+ // the global object for exactly two contexts: the builtins context and the
+ // script context that has the global "this" binding.
+ CHECK(EmbedsScript(isolate) || (*outdated_contexts_out)->length() == 2);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = context_data.length();
@@ -136,7 +137,7 @@ void CalculateFirstPageSizes(bool is_default_snapshot,
2 * context_reservations[context_index].chunk_size()) +
Page::kObjectStartOffset;
// Add a small allowance to the code space for small scripts.
- if (space == CODE_SPACE) required += 32 * KB;
+ if (space == CODE_SPACE) required += 64 * KB;
} else {
// We expect the vanilla snapshot to only require on page per space.
DCHECK(!is_default_snapshot);
@@ -226,4 +227,5 @@ Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data) {
int context_length = data->raw_size - context_offset;
return Vector<const byte>(context_data, context_length);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-empty.cc b/deps/v8/src/snapshot/snapshot-empty.cc
index 0eea940100..464d3a800a 100644
--- a/deps/v8/src/snapshot/snapshot-empty.cc
+++ b/deps/v8/src/snapshot/snapshot-empty.cc
@@ -25,4 +25,5 @@ void DisposeNatives() {}
const v8::StartupData* Snapshot::DefaultSnapshotBlob() { return NULL; }
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-external.cc b/deps/v8/src/snapshot/snapshot-external.cc
index f5e3de49cd..1d5476cb5e 100644
--- a/deps/v8/src/snapshot/snapshot-external.cc
+++ b/deps/v8/src/snapshot/snapshot-external.cc
@@ -40,4 +40,5 @@ const v8::StartupData* Snapshot::DefaultSnapshotBlob() {
external_startup_data_mutex.Pointer());
return &external_startup_blob;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index c0179b7fca..7048c355ec 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -62,6 +62,5 @@ bool SnapshotByteSource::GetBlob(const byte** data, int* number_of_bytes) {
return false;
}
}
-
-} // namespace v8::internal
+} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 371afef4fc..7f89213f16 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -38,7 +38,8 @@ class Snapshot : public AllStatic {
static bool HaveASnapshotToStartFrom(Isolate* isolate) {
// Do not use snapshots if the isolate is used to create snapshots.
- return isolate->snapshot_blob() != NULL;
+ return isolate->snapshot_blob() != NULL &&
+ isolate->snapshot_blob()->data != NULL;
}
static bool EmbedsScript(Isolate* isolate);
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/string-builder.cc
index 38c3188a9b..7c46e0d523 100644
--- a/deps/v8/src/string-builder.cc
+++ b/deps/v8/src/string-builder.cc
@@ -55,25 +55,23 @@ IncrementalStringBuilder::IncrementalStringBuilder(Isolate* isolate)
}
-void IncrementalStringBuilder::Accumulate() {
- // Only accumulate fully written strings. Shrink first if necessary.
- DCHECK_EQ(current_index_, current_part()->length());
+void IncrementalStringBuilder::Accumulate(Handle<String> new_part) {
Handle<String> new_accumulator;
- if (accumulator()->length() + current_part()->length() > String::kMaxLength) {
+ if (accumulator()->length() + new_part->length() > String::kMaxLength) {
// Set the flag and carry on. Delay throwing the exception till the end.
new_accumulator = factory()->empty_string();
overflowed_ = true;
} else {
- new_accumulator = factory()
- ->NewConsString(accumulator(), current_part())
- .ToHandleChecked();
+ new_accumulator =
+ factory()->NewConsString(accumulator(), new_part).ToHandleChecked();
}
set_accumulator(new_accumulator);
}
void IncrementalStringBuilder::Extend() {
- Accumulate();
+ DCHECK_EQ(current_index_, current_part()->length());
+ Accumulate(current_part());
if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
part_length_ *= kPartLengthGrowthFactor;
}
@@ -91,7 +89,7 @@ void IncrementalStringBuilder::Extend() {
MaybeHandle<String> IncrementalStringBuilder::Finish() {
ShrinkCurrentPart();
- Accumulate();
+ Accumulate(current_part());
if (overflowed_) {
THROW_NEW_ERROR(isolate_, NewInvalidStringLengthError(), String);
}
@@ -103,9 +101,7 @@ void IncrementalStringBuilder::AppendString(Handle<String> string) {
ShrinkCurrentPart();
part_length_ = kInitialPartLength; // Allocate conservatively.
Extend(); // Attach current part and allocate new part.
- Handle<String> concat =
- factory()->NewConsString(accumulator(), string).ToHandleChecked();
- set_accumulator(concat);
-}
+ Accumulate(string);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index 43b690dba3..5314665329 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -384,7 +384,7 @@ class IncrementalStringBuilder {
}
// Add the current part to the accumulator.
- void Accumulate();
+ void Accumulate(Handle<String> new_part);
// Finish the current part and allocate a new part.
void Extend();
diff --git a/deps/v8/src/string-iterator.js b/deps/v8/src/string-iterator.js
index c19e808020..536430ee8f 100644
--- a/deps/v8/src/string-iterator.js
+++ b/deps/v8/src/string-iterator.js
@@ -2,16 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
-var GlobalObject = global.Object;
+// -------------------------------------------------------------------
+// Imports
+
var GlobalString = global.String;
-//-------------------------------------------------------------------
+var ArrayIteratorCreateResultObject;
+
+utils.Import(function(from) {
+ ArrayIteratorCreateResultObject = from.ArrayIteratorCreateResultObject;
+});
+
+// -------------------------------------------------------------------
var stringIteratorIteratedStringSymbol =
GLOBAL_PRIVATE("StringIterator#iteratedString");
@@ -31,12 +39,6 @@ function CreateStringIterator(string) {
}
-// 21.1.5.2.2 %StringIteratorPrototype%[@@iterator]
-function StringIteratorIterator() {
- return this;
-}
-
-
// 21.1.5.2.1 %StringIteratorPrototype%.next( )
function StringIteratorNext() {
var iterator = $toObject(this);
@@ -48,7 +50,7 @@ function StringIteratorNext() {
var s = GET_PRIVATE(iterator, stringIteratorIteratedStringSymbol);
if (IS_UNDEFINED(s)) {
- return $iteratorCreateResultObject(UNDEFINED, true);
+ return ArrayIteratorCreateResultObject(UNDEFINED, true);
}
var position = GET_PRIVATE(iterator, stringIteratorNextIndexSymbol);
@@ -57,7 +59,7 @@ function StringIteratorNext() {
if (position >= length) {
SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol,
UNDEFINED);
- return $iteratorCreateResultObject(UNDEFINED, true);
+ return ArrayIteratorCreateResultObject(UNDEFINED, true);
}
var first = %_StringCharCodeAt(s, position);
@@ -74,7 +76,7 @@ function StringIteratorNext() {
SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, position);
- return $iteratorCreateResultObject(resultString, false);
+ return ArrayIteratorCreateResultObject(resultString, false);
}
@@ -85,19 +87,16 @@ function StringPrototypeIterator() {
//-------------------------------------------------------------------
-%FunctionSetPrototype(StringIterator, new GlobalObject());
+%FunctionSetPrototype(StringIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(StringIterator, 'String Iterator');
-$installFunctions(StringIterator.prototype, DONT_ENUM, [
+utils.InstallFunctions(StringIterator.prototype, DONT_ENUM, [
'next', StringIteratorNext
]);
-$setFunctionName(StringIteratorIterator, symbolIterator);
-%AddNamedProperty(StringIterator.prototype, symbolIterator,
- StringIteratorIterator, DONT_ENUM);
%AddNamedProperty(StringIterator.prototype, symbolToStringTag,
"String Iterator", READ_ONLY | DONT_ENUM);
-$setFunctionName(StringPrototypeIterator, symbolIterator);
+utils.SetFunctionName(StringPrototypeIterator, symbolIterator);
%AddNamedProperty(GlobalString.prototype, symbolIterator,
StringPrototypeIterator, DONT_ENUM);
diff --git a/deps/v8/src/string-search.cc b/deps/v8/src/string-search.cc
index 0c18762750..837f938095 100644
--- a/deps/v8/src/string-search.cc
+++ b/deps/v8/src/string-search.cc
@@ -16,4 +16,5 @@ namespace internal {
// good_suffix_shift_table()
// suffix_table()
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index d53cdc092e..5f61e0da1d 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -18,6 +18,18 @@ char* HeapStringAllocator::allocate(unsigned bytes) {
}
+char* FixedStringAllocator::allocate(unsigned bytes) {
+ CHECK_LE(bytes, length_);
+ return buffer_;
+}
+
+
+char* FixedStringAllocator::grow(unsigned* old) {
+ *old = length_;
+ return buffer_;
+}
+
+
bool StringStream::Put(char c) {
if (full()) return false;
DCHECK(length_ < capacity_);
@@ -170,7 +182,7 @@ void StringStream::PrintObject(Object* o) {
} else if (o->IsNumber() || o->IsOddball()) {
return;
}
- if (o->IsHeapObject()) {
+ if (o->IsHeapObject() && object_print_mode_ == kPrintObjectVerbose) {
HeapObject* ho = HeapObject::cast(o);
DebugObjectCache* debug_object_cache = ho->GetIsolate()->
string_stream_debug_object_cache();
@@ -284,7 +296,8 @@ void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
#ifdef DEBUG
bool StringStream::IsMentionedObjectCacheClear(Isolate* isolate) {
- return isolate->string_stream_debug_object_cache()->length() == 0;
+ return object_print_mode_ == kPrintObjectConcise ||
+ isolate->string_stream_debug_object_cache()->length() == 0;
}
#endif
@@ -403,6 +416,7 @@ void StringStream::PrintByteArray(ByteArray* byte_array) {
void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
+ if (object_print_mode_ == kPrintObjectConcise) return;
DebugObjectCache* debug_object_cache =
isolate->string_stream_debug_object_cache();
Add("==== Key ============================================\n\n");
@@ -562,4 +576,5 @@ char* HeapStringAllocator::grow(unsigned* bytes) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index b8828ee620..cc50bb7150 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -35,6 +35,21 @@ class HeapStringAllocator final : public StringAllocator {
};
+class FixedStringAllocator final : public StringAllocator {
+ public:
+ FixedStringAllocator(char* buffer, unsigned length)
+ : buffer_(buffer), length_(length) {}
+ ~FixedStringAllocator() override{};
+ char* allocate(unsigned bytes) override;
+ char* grow(unsigned* bytes) override;
+
+ private:
+ char* buffer_;
+ unsigned length_;
+ DISALLOW_COPY_AND_ASSIGN(FixedStringAllocator);
+};
+
+
class FmtElm final {
public:
FmtElm(int value) : type_(INT) { // NOLINT
@@ -77,11 +92,14 @@ class FmtElm final {
class StringStream final {
public:
- explicit StringStream(StringAllocator* allocator):
- allocator_(allocator),
- capacity_(kInitialCapacity),
- length_(0),
- buffer_(allocator_->allocate(kInitialCapacity)) {
+ enum ObjectPrintMode { kPrintObjectConcise, kPrintObjectVerbose };
+ StringStream(StringAllocator* allocator,
+ ObjectPrintMode object_print_mode = kPrintObjectConcise)
+ : allocator_(allocator),
+ object_print_mode_(object_print_mode),
+ capacity_(kInitialCapacity),
+ length_(0),
+ buffer_(allocator_->allocate(kInitialCapacity)) {
buffer_[0] = 0;
}
@@ -134,7 +152,7 @@ class StringStream final {
void PrintMentionedObjectCache(Isolate* isolate);
static void ClearMentionedObjectCache(Isolate* isolate);
#ifdef DEBUG
- static bool IsMentionedObjectCacheClear(Isolate* isolate);
+ bool IsMentionedObjectCacheClear(Isolate* isolate);
#endif
static const int kInitialCapacity = 16;
@@ -143,6 +161,7 @@ class StringStream final {
void PrintObject(Object* obj);
StringAllocator* allocator_;
+ ObjectPrintMode object_print_mode_;
unsigned capacity_;
unsigned length_; // does not include terminating 0-character
char* buffer_;
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 03979fded3..3ddd6d26ce 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -2,16 +2,35 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $stringCharAt;
-var $stringIndexOf;
-var $stringSubstring;
-
-(function(global, shared, exports) {
+(function(global, utils) {
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
+var InternalArray = utils.InternalArray;
+var InternalPackedArray = utils.InternalPackedArray;
+
+var ArrayIndexOf;
+var ArrayJoin;
+var MathMax;
+var MathMin;
+var RegExpExec;
+var RegExpExecNoTests;
+var RegExpLastMatchInfo;
+
+utils.Import(function(from) {
+ ArrayIndexOf = from.ArrayIndexOf;
+ ArrayJoin = from.ArrayJoin;
+ MathMax = from.MathMax;
+ MathMin = from.MathMin;
+ RegExpExec = from.RegExpExec;
+ RegExpExecNoTests = from.RegExpExecNoTests;
+ RegExpLastMatchInfo = from.RegExpLastMatchInfo;
+});
//-------------------------------------------------------------------
@@ -153,21 +172,18 @@ function StringMatchJS(regexp) {
// value is discarded.
var lastIndex = regexp.lastIndex;
TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
- if (!regexp.global) return $regexpExecNoTests(regexp, subject, 0);
- var result = %StringMatch(subject, regexp, $regexpLastMatchInfo);
+ if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
+ var result = %StringMatch(subject, regexp, RegExpLastMatchInfo);
if (result !== null) $regexpLastMatchInfoOverride = null;
regexp.lastIndex = 0;
return result;
}
// Non-regexp argument.
regexp = new GlobalRegExp(regexp);
- return $regexpExecNoTests(regexp, subject, 0);
+ return RegExpExecNoTests(regexp, subject, 0);
}
-var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
-
-
// ECMA-262 v6, section 21.1.3.12
//
// For now we do nothing, as proper normalization requires big tables.
@@ -177,16 +193,20 @@ function StringNormalizeJS(form) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
var form = form ? TO_STRING_INLINE(form) : 'NFC';
- var normalizationForm = NORMALIZATION_FORMS.indexOf(form);
+
+ var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
+ var normalizationForm =
+ %_CallFunction(NORMALIZATION_FORMS, form, ArrayIndexOf);
if (normalizationForm === -1) {
- throw MakeRangeError(kNormalizationForm, NORMALIZATION_FORMS.join(', '));
+ throw MakeRangeError(kNormalizationForm,
+ %_CallFunction(NORMALIZATION_FORMS, ', ', ArrayJoin));
}
return %_ValueOf(this);
}
-// This has the same size as the $regexpLastMatchInfo array, and can be used
+// This has the same size as the RegExpLastMatchInfo array, and can be used
// for functions that expect that structure to be returned. It is used when
// the needle is a string rather than a regexp. In this case we can't update
// lastMatchArray without erroneously affecting the properties on the global
@@ -228,7 +248,7 @@ function StringReplace(search, replace) {
if (!search.global) {
// Non-global regexp search, string replace.
- var match = $regexpExec(search, subject, 0);
+ var match = RegExpExec(search, subject, 0);
if (match == null) {
search.lastIndex = 0
return subject;
@@ -237,7 +257,7 @@ function StringReplace(search, replace) {
return %_SubString(subject, 0, match[CAPTURE0]) +
%_SubString(subject, match[CAPTURE1], subject.length)
}
- return ExpandReplacement(replace, subject, $regexpLastMatchInfo,
+ return ExpandReplacement(replace, subject, RegExpLastMatchInfo,
%_SubString(subject, 0, match[CAPTURE0])) +
%_SubString(subject, match[CAPTURE1], subject.length);
}
@@ -246,17 +266,17 @@ function StringReplace(search, replace) {
search.lastIndex = 0;
if ($regexpLastMatchInfoOverride == null) {
return %StringReplaceGlobalRegExpWithString(
- subject, search, replace, $regexpLastMatchInfo);
+ subject, search, replace, RegExpLastMatchInfo);
} else {
// We use this hack to detect whether StringReplaceRegExpWithString
// found at least one hit. In that case we need to remove any
// override.
- var saved_subject = $regexpLastMatchInfo[LAST_SUBJECT_INDEX];
- $regexpLastMatchInfo[LAST_SUBJECT_INDEX] = 0;
+ var saved_subject = RegExpLastMatchInfo[LAST_SUBJECT_INDEX];
+ RegExpLastMatchInfo[LAST_SUBJECT_INDEX] = 0;
var answer = %StringReplaceGlobalRegExpWithString(
- subject, search, replace, $regexpLastMatchInfo);
- if (%_IsSmi($regexpLastMatchInfo[LAST_SUBJECT_INDEX])) {
- $regexpLastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
+ subject, search, replace, RegExpLastMatchInfo);
+ if (%_IsSmi(RegExpLastMatchInfo[LAST_SUBJECT_INDEX])) {
+ RegExpLastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
} else {
$regexpLastMatchInfoOverride = null;
}
@@ -405,7 +425,7 @@ function CaptureString(string, lastCaptureInfo, index) {
// TODO(lrn): This array will survive indefinitely if replace is never
// called again. However, it will be empty, since the contents are cleared
// in the finally block.
-var reusableReplaceArray = new InternalArray(16);
+var reusableReplaceArray = new InternalArray(4);
// Helper function for replacing regular expressions with the result of a
// function application in String.prototype.replace.
@@ -422,7 +442,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
}
var res = %RegExpExecMultiple(regexp,
subject,
- $regexpLastMatchInfo,
+ RegExpLastMatchInfo,
resultArray);
regexp.lastIndex = 0;
if (IS_NULL(res)) {
@@ -431,7 +451,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
return subject;
}
var len = res.length;
- if (NUMBER_OF_CAPTURES($regexpLastMatchInfo) == 2) {
+ if (NUMBER_OF_CAPTURES(RegExpLastMatchInfo) == 2) {
// If the number of captures is two then there are no explicit captures in
// the regexp, just the implicit capture that captures the whole match. In
// this case we can simplify quite a bit and end up with something faster.
@@ -485,7 +505,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
- var matchInfo = $regexpExec(regexp, subject, 0);
+ var matchInfo = RegExpExec(regexp, subject, 0);
if (IS_NULL(matchInfo)) {
regexp.lastIndex = 0;
return subject;
@@ -533,7 +553,7 @@ function StringSearch(re) {
} else {
regexp = new GlobalRegExp(re);
}
- var match = $regexpExec(regexp, TO_STRING_INLINE(this), 0);
+ var match = RegExpExec(regexp, TO_STRING_INLINE(this), 0);
if (match) {
return match[CAPTURE0];
}
@@ -619,7 +639,7 @@ function StringSplitJS(separator, limit) {
function StringSplitOnRegExp(subject, separator, limit, length) {
if (length === 0) {
- if ($regexpExec(separator, subject, 0, 0) != null) {
+ if (RegExpExec(separator, subject, 0, 0) != null) {
return [];
}
return [subject];
@@ -638,7 +658,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
break;
}
- var matchInfo = $regexpExec(separator, subject, startIndex);
+ var matchInfo = RegExpExec(separator, subject, startIndex);
if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
result[result.length] = %_SubString(subject, currentIndex, length);
break;
@@ -831,98 +851,101 @@ function StringFromCharCode(code) {
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
function HtmlEscape(str) {
- return TO_STRING_INLINE(str).replace(/"/g, "&quot;");
+ return %_CallFunction(TO_STRING_INLINE(str), /"/g, "&quot;", StringReplace);
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2
function StringAnchor(name) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.anchor");
- return "<a name=\"" + HtmlEscape(name) + "\">" + this + "</a>";
+ return "<a name=\"" + HtmlEscape(name) + "\">" + TO_STRING_INLINE(this) +
+ "</a>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.3
function StringBig() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.big");
- return "<big>" + this + "</big>";
+ return "<big>" + TO_STRING_INLINE(this) + "</big>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.4
function StringBlink() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.blink");
- return "<blink>" + this + "</blink>";
+ return "<blink>" + TO_STRING_INLINE(this) + "</blink>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.5
function StringBold() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.bold");
- return "<b>" + this + "</b>";
+ return "<b>" + TO_STRING_INLINE(this) + "</b>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.6
function StringFixed() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.fixed");
- return "<tt>" + this + "</tt>";
+ return "<tt>" + TO_STRING_INLINE(this) + "</tt>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.7
function StringFontcolor(color) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontcolor");
- return "<font color=\"" + HtmlEscape(color) + "\">" + this + "</font>";
+ return "<font color=\"" + HtmlEscape(color) + "\">" + TO_STRING_INLINE(this) +
+ "</font>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.8
function StringFontsize(size) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontsize");
- return "<font size=\"" + HtmlEscape(size) + "\">" + this + "</font>";
+ return "<font size=\"" + HtmlEscape(size) + "\">" + TO_STRING_INLINE(this) +
+ "</font>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.9
function StringItalics() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.italics");
- return "<i>" + this + "</i>";
+ return "<i>" + TO_STRING_INLINE(this) + "</i>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.10
function StringLink(s) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.link");
- return "<a href=\"" + HtmlEscape(s) + "\">" + this + "</a>";
+ return "<a href=\"" + HtmlEscape(s) + "\">" + TO_STRING_INLINE(this) + "</a>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.11
function StringSmall() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.small");
- return "<small>" + this + "</small>";
+ return "<small>" + TO_STRING_INLINE(this) + "</small>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.12
function StringStrike() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.strike");
- return "<strike>" + this + "</strike>";
+ return "<strike>" + TO_STRING_INLINE(this) + "</strike>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.13
function StringSub() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.sub");
- return "<sub>" + this + "</sub>";
+ return "<sub>" + TO_STRING_INLINE(this) + "</sub>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.14
function StringSup() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.sup");
- return "<sup>" + this + "</sup>";
+ return "<sup>" + TO_STRING_INLINE(this) + "</sup>";
}
// ES6 draft 01-20-14, section 21.1.3.13
@@ -963,7 +986,7 @@ function StringStartsWith(searchString /* position */) { // length == 1
}
var s_len = s.length;
- var start = $min($max(pos, 0), s_len);
+ var start = MathMin(MathMax(pos, 0), s_len);
var ss_len = ss.length;
if (ss_len + start > s_len) {
return false;
@@ -993,7 +1016,7 @@ function StringEndsWith(searchString /* position */) { // length == 1
}
}
- var end = $min($max(pos, 0), s_len);
+ var end = MathMin(MathMax(pos, 0), s_len);
var ss_len = ss.length;
var start = end - ss_len;
if (start < 0) {
@@ -1022,7 +1045,7 @@ function StringIncludes(searchString /* position */) { // length == 1
}
var s_len = s.length;
- var start = $min($max(pos, 0), s_len);
+ var start = MathMin(MathMax(pos, 0), s_len);
var ss_len = ss.length;
if (ss_len + start > s_len) {
return false;
@@ -1115,14 +1138,14 @@ function StringRaw(callSite) {
GlobalString.prototype, "constructor", GlobalString, DONT_ENUM);
// Set up the non-enumerable functions on the String object.
-$installFunctions(GlobalString, DONT_ENUM, [
+utils.InstallFunctions(GlobalString, DONT_ENUM, [
"fromCharCode", StringFromCharCode,
"fromCodePoint", StringFromCodePoint,
"raw", StringRaw
]);
// Set up the non-enumerable functions on the String prototype object.
-$installFunctions(GlobalString.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
"valueOf", StringValueOf,
"toString", StringToString,
"charAt", StringCharAtJS,
@@ -1167,8 +1190,18 @@ $installFunctions(GlobalString.prototype, DONT_ENUM, [
"sup", StringSup
]);
-$stringCharAt = StringCharAtJS;
-$stringIndexOf = StringIndexOfJS;
-$stringSubstring = StringSubstring;
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.StringCharAt = StringCharAtJS;
+ to.StringIndexOf = StringIndexOfJS;
+ to.StringLastIndexOf = StringLastIndexOfJS;
+ to.StringMatch = StringMatchJS;
+ to.StringReplace = StringReplace;
+ to.StringSplit = StringSplitJS;
+ to.StringSubstr = StringSubstr;
+ to.StringSubstring = StringSubstring;
+});
})
diff --git a/deps/v8/src/strings-storage.cc b/deps/v8/src/strings-storage.cc
index 6b19339ee7..533fa8959c 100644
--- a/deps/v8/src/strings-storage.cc
+++ b/deps/v8/src/strings-storage.cc
@@ -119,5 +119,5 @@ HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
return names_.LookupOrInsert(const_cast<char*>(str), hash);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index 2b48af3cc6..ec26845f51 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -419,4 +419,5 @@ double Strtod(Vector<const char> buffer, int exponent) {
return BignumStrtod(trimmed, exponent, guess);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/symbol.js
index ffbe847ca6..8ac7fe7011 100644
--- a/deps/v8/src/symbol.js
+++ b/deps/v8/src/symbol.js
@@ -12,15 +12,24 @@
var $symbolToString;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalObject = global.Object;
var GlobalSymbol = global.Symbol;
+var ObjectGetOwnPropertyKeys;
+
+utils.Import(function(from) {
+ ObjectGetOwnPropertyKeys = from.ObjectGetOwnPropertyKeys;
+});
+
// -------------------------------------------------------------------
function SymbolConstructor(x) {
@@ -62,7 +71,7 @@ function SymbolFor(key) {
function SymbolKeyFor(symbol) {
- if (!IS_SYMBOL(symbol)) throw MakeTypeError("not_a_symbol", [symbol]);
+ if (!IS_SYMBOL(symbol)) throw MakeTypeError(kSymbolKeyFor, symbol);
return %SymbolRegistry().keyFor[symbol];
}
@@ -73,7 +82,7 @@ function ObjectGetOwnPropertySymbols(obj) {
// TODO(arv): Proxies use a shared trap for String and Symbol keys.
- return $objectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_STRING);
+ return ObjectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_STRING);
}
//-------------------------------------------------------------------
@@ -81,7 +90,7 @@ function ObjectGetOwnPropertySymbols(obj) {
%SetCode(GlobalSymbol, SymbolConstructor);
%FunctionSetPrototype(GlobalSymbol, new GlobalObject());
-$installConstants(GlobalSymbol, [
+utils.InstallConstants(GlobalSymbol, [
// TODO(rossberg): expose when implemented.
// "hasInstance", symbolHasInstance,
// "isConcatSpreadable", symbolIsConcatSpreadable,
@@ -93,7 +102,7 @@ $installConstants(GlobalSymbol, [
"unscopables", symbolUnscopables
]);
-$installFunctions(GlobalSymbol, DONT_ENUM, [
+utils.InstallFunctions(GlobalSymbol, DONT_ENUM, [
"for", SymbolFor,
"keyFor", SymbolKeyFor
]);
@@ -103,12 +112,12 @@ $installFunctions(GlobalSymbol, DONT_ENUM, [
%AddNamedProperty(
GlobalSymbol.prototype, symbolToStringTag, "Symbol", DONT_ENUM | READ_ONLY);
-$installFunctions(GlobalSymbol.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalSymbol.prototype, DONT_ENUM, [
"toString", SymbolToString,
"valueOf", SymbolValueOf
]);
-$installFunctions(GlobalObject, DONT_ENUM, [
+utils.InstallFunctions(GlobalObject, DONT_ENUM, [
"getOwnPropertySymbols", ObjectGetOwnPropertySymbols
]);
diff --git a/deps/v8/src/templates.js b/deps/v8/src/templates.js
index ff94683fb1..b7e1527fc6 100644
--- a/deps/v8/src/templates.js
+++ b/deps/v8/src/templates.js
@@ -6,15 +6,23 @@
var $getTemplateCallSite;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
-var callSiteCache = new global.Map;
-var mapGetFn = global.Map.prototype.get;
-var mapSetFn = global.Map.prototype.set;
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalMap = global.Map;
+var InternalArray = utils.InternalArray;
+
+// -------------------------------------------------------------------
+
+var callSiteCache = new GlobalMap;
+var mapGetFn = GlobalMap.prototype.get;
+var mapSetFn = GlobalMap.prototype.set;
function SameCallSiteElements(rawStrings, other) {
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.cc b/deps/v8/src/third_party/fdlibm/fdlibm.cc
index b8bc243f4d..ea3efd35be 100644
--- a/deps/v8/src/third_party/fdlibm/fdlibm.cc
+++ b/deps/v8/src/third_party/fdlibm/fdlibm.cc
@@ -290,5 +290,5 @@ int rempio2(double x, double* y) {
}
return n;
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.js b/deps/v8/src/third_party/fdlibm/fdlibm.js
index 82f50cc514..a8935565b7 100644
--- a/deps/v8/src/third_party/fdlibm/fdlibm.js
+++ b/deps/v8/src/third_party/fdlibm/fdlibm.js
@@ -26,15 +26,26 @@
var kMath;
var rempio2result;
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalMath = global.Math;
-//-------------------------------------------------------------------
+var MathAbs;
+var MathExp;
+
+utils.Import(function(from) {
+ MathAbs = from.MathAbs;
+ MathExp = from.MathExp;
+});
+
+// -------------------------------------------------------------------
define INVPIO2 = kMath[0];
define PIO2_1 = kMath[1];
@@ -87,7 +98,7 @@ macro REMPIO2(X)
}
} else if (ix <= 0x413921fb) {
// |X| ~<= 2^19*(pi/2), medium size
- var t = $abs(X);
+ var t = MathAbs(X);
n = (t * INVPIO2 + 0.5) | 0;
var r = t - n * PIO2_1;
var w = n * PIO2_1T;
@@ -269,7 +280,7 @@ function KernelTan(x, y, returnTan) {
if (ix < 0x3e300000) { // |x| < 2^-28
if (((ix | %_DoubleLo(x)) | (returnTan + 1)) == 0) {
// x == 0 && returnTan = -1
- return 1 / $abs(x);
+ return 1 / MathAbs(x);
} else {
if (returnTan == 1) {
return x;
@@ -757,7 +768,7 @@ function MathSinh(x) {
x = x * 1; // Convert to number.
var h = (x < 0) ? -0.5 : 0.5;
// |x| in [0, 22]. return sign(x)*0.5*(E+E/(E+1))
- var ax = $abs(x);
+ var ax = MathAbs(x);
if (ax < 22) {
// For |x| < 2^-28, sinh(x) = x
if (ax < TWO_M28) return x;
@@ -766,11 +777,11 @@ function MathSinh(x) {
return h * (t + t / (t + 1));
}
// |x| in [22, log(maxdouble)], return 0.5 * exp(|x|)
- if (ax < LOG_MAXD) return h * $exp(ax);
+ if (ax < LOG_MAXD) return h * MathExp(ax);
// |x| in [log(maxdouble), overflowthreshold]
// overflowthreshold = 710.4758600739426
if (ax <= KSINH_OVERFLOW) {
- var w = $exp(0.5 * ax);
+ var w = MathExp(0.5 * ax);
var t = h * w;
return t * w;
}
@@ -808,7 +819,7 @@ function MathCosh(x) {
var ix = %_DoubleHi(x) & 0x7fffffff;
// |x| in [0,0.5*log2], return 1+expm1(|x|)^2/(2*exp(|x|))
if (ix < 0x3fd62e43) {
- var t = MathExpm1($abs(x));
+ var t = MathExpm1(MathAbs(x));
var w = 1 + t;
// For |x| < 2^-55, cosh(x) = 1
if (ix < 0x3c800000) return w;
@@ -816,14 +827,14 @@ function MathCosh(x) {
}
// |x| in [0.5*log2, 22], return (exp(|x|)+1/exp(|x|)/2
if (ix < 0x40360000) {
- var t = $exp($abs(x));
+ var t = MathExp(MathAbs(x));
return 0.5 * t + 0.5 / t;
}
// |x| in [22, log(maxdouble)], return half*exp(|x|)
- if (ix < 0x40862e42) return 0.5 * $exp($abs(x));
+ if (ix < 0x40862e42) return 0.5 * MathExp(MathAbs(x));
// |x| in [log(maxdouble), overflowthreshold]
- if ($abs(x) <= KCOSH_OVERFLOW) {
- var w = $exp(0.5 * $abs(x));
+ if (MathAbs(x) <= KCOSH_OVERFLOW) {
+ var w = MathExp(0.5 * MathAbs(x));
var t = 0.5 * w;
return t * w;
}
@@ -926,7 +937,7 @@ define TWO53 = 9007199254740992;
function MathLog2(x) {
x = x * 1; // Convert to number.
- var ax = $abs(x);
+ var ax = MathAbs(x);
var hx = %_DoubleHi(x);
var lx = %_DoubleLo(x);
var ix = hx & 0x7fffffff;
@@ -1012,7 +1023,7 @@ function MathLog2(x) {
//-------------------------------------------------------------------
-$installFunctions(GlobalMath, DONT_ENUM, [
+utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"cos", MathCos,
"sin", MathSin,
"tan", MathTan,
@@ -1024,7 +1035,7 @@ $installFunctions(GlobalMath, DONT_ENUM, [
"expm1", MathExpm1
]);
-%SetInlineBuiltinFlag(MathSin);
-%SetInlineBuiltinFlag(MathCos);
+%SetForceInlineFlag(MathSin);
+%SetForceInlineFlag(MathCos);
})
diff --git a/deps/v8/src/third_party/kernel/tools/perf/util/jitdump.h b/deps/v8/src/third_party/kernel/tools/perf/util/jitdump.h
deleted file mode 100644
index 85d51b79af..0000000000
--- a/deps/v8/src/third_party/kernel/tools/perf/util/jitdump.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#ifndef JITDUMP_H
-#define JITDUMP_H
-
-#include <sys/time.h>
-#include <time.h>
-#include <stdint.h>
-
-/* JiTD */
-#define JITHEADER_MAGIC 0x4A695444
-#define JITHEADER_MAGIC_SW 0x4454694A
-
-#define PADDING_8ALIGNED(x) ((((x) + 7) & 7) ^ 7)
-
-#define JITHEADER_VERSION 1
-
-struct jitheader {
- uint32_t magic; /* characters "jItD" */
- uint32_t version; /* header version */
- uint32_t total_size; /* total size of header */
- uint32_t elf_mach; /* elf mach target */
- uint32_t pad1; /* reserved */
- uint32_t pid; /* JIT process id */
- uint64_t timestamp; /* timestamp */
-};
-
-enum jit_record_type {
- JIT_CODE_LOAD = 0,
- JIT_CODE_MOVE = 1,
- JIT_CODE_DEBUG_INFO = 2,
- JIT_CODE_CLOSE = 3,
- JIT_CODE_MAX
-};
-
-/* record prefix (mandatory in each record) */
-struct jr_prefix {
- uint32_t id;
- uint32_t total_size;
- uint64_t timestamp;
-};
-
-struct jr_code_load {
- struct jr_prefix p;
-
- uint32_t pid;
- uint32_t tid;
- uint64_t vma;
- uint64_t code_addr;
- uint64_t code_size;
- uint64_t code_index;
-};
-
-struct jr_code_close {
- struct jr_prefix p;
-};
-
-struct jr_code_move {
- struct jr_prefix p;
-
- uint32_t pid;
- uint32_t tid;
- uint64_t vma;
- uint64_t old_code_addr;
- uint64_t new_code_addr;
- uint64_t code_size;
- uint64_t code_index;
-};
-
-struct jr_code_debug_info {
- struct jr_prefix p;
-
- uint64_t code_addr;
- uint64_t nr_entry;
-};
-
-union jr_entry {
- struct jr_code_debug_info info;
- struct jr_code_close close;
- struct jr_code_load load;
- struct jr_code_move move;
- struct jr_prefix prefix;
-};
-
-#endif /* !JITDUMP_H */
diff --git a/deps/v8/src/third_party/valgrind/LICENSE b/deps/v8/src/third_party/valgrind/LICENSE
new file mode 100644
index 0000000000..0be0037ca2
--- /dev/null
+++ b/deps/v8/src/third_party/valgrind/LICENSE
@@ -0,0 +1,54 @@
+----------------------------------------------------------------
+
+Notice that the following BSD-style license applies to this one
+file (valgrind.h) only. The rest of Valgrind is licensed under the
+terms of the GNU General Public License, version 2, unless
+otherwise indicated. See the COPYING file in the source
+distribution for details.
+
+----------------------------------------------------------------
+
+This file is part of Valgrind, a dynamic binary instrumentation
+framework.
+
+Copyright (C) 2000-2010 Julian Seward. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------
+
+Notice that the above BSD-style license applies to this one file
+(valgrind.h) only. The entire rest of Valgrind is licensed under
+the terms of the GNU General Public License, version 2. See the
+COPYING file in the source distribution for details.
+
+----------------------------------------------------------------
diff --git a/deps/v8/src/third_party/vtune/LICENSE b/deps/v8/src/third_party/vtune/LICENSE
new file mode 100644
index 0000000000..bd00b4c87d
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/LICENSE
@@ -0,0 +1,59 @@
+All files in this directory are provided by the following license if not stated
+otherwise in the individual file:
+====================
+This file is provided under a dual BSD/GPLv2 license. When using or
+redistributing this file, you may do so under either license.
+
+GPL LICENSE SUMMARY
+
+Copyright (c) 2005-2012 Intel Corporation. All rights reserved.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of version 2 of the GNU General Public License as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+The full GNU General Public License is included in this distribution
+in the file called LICENSE.GPL.
+
+Contact Information:
+http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+BSD LICENSE
+
+Copyright (c) 2005-2012 Intel Corporation. All rights reserved.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+====================
diff --git a/deps/v8/src/token.cc b/deps/v8/src/token.cc
index db8827102b..73e883f4bd 100644
--- a/deps/v8/src/token.cc
+++ b/deps/v8/src/token.cc
@@ -37,4 +37,5 @@ const char Token::token_type[] = {
#undef KT
#undef KK
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 9fe9e86564..09884d5066 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -106,9 +106,10 @@ void TransitionArray::Insert(Handle<Map> map, Handle<Name> name,
}
// We're gonna need a bigger TransitionArray.
- Handle<TransitionArray> result = Allocate(
- map->GetIsolate(), new_nof,
- Map::SlackForArraySize(number_of_transitions, kMaxNumberOfTransitions));
+ Handle<TransitionArray> result =
+ Allocate(map->GetIsolate(), new_nof,
+ Map::SlackForArraySize(false, number_of_transitions,
+ kMaxNumberOfTransitions));
// The map's transition array may have shrunk during the allocation above as
// it was weakly traversed, though it is guaranteed not to disappear. Trim the
@@ -223,6 +224,7 @@ Handle<String> TransitionArray::ExpectedTransitionKey(Handle<Map> map) {
// static
bool TransitionArray::CanHaveMoreTransitions(Handle<Map> map) {
+ if (map->is_dictionary_map()) return false;
Object* raw_transitions = map->raw_transitions();
if (IsFullTransitionArray(raw_transitions)) {
TransitionArray* transitions = TransitionArray::cast(raw_transitions);
@@ -233,17 +235,19 @@ bool TransitionArray::CanHaveMoreTransitions(Handle<Map> map) {
// static
-Handle<Map> TransitionArray::PutPrototypeTransition(Handle<Map> map,
- Handle<Object> prototype,
- Handle<Map> target_map) {
+void TransitionArray::PutPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype,
+ Handle<Map> target_map) {
DCHECK(HeapObject::cast(*prototype)->map()->IsMap());
// Don't cache prototype transition if this map is either shared, or a map of
// a prototype.
- if (map->is_prototype_map()) return map;
- if (map->is_dictionary_map() || !FLAG_cache_prototype_transitions) return map;
+ if (map->is_prototype_map()) return;
+ if (map->is_dictionary_map() || !FLAG_cache_prototype_transitions) return;
const int header = kProtoTransitionHeaderSize;
+ Handle<WeakCell> target_cell = Map::WeakCellForMap(target_map);
+
Handle<FixedArray> cache(GetPrototypeTransitions(*map));
int capacity = cache->length() - header;
int transitions = NumberOfPrototypeTransitions(*cache) + 1;
@@ -251,7 +255,7 @@ Handle<Map> TransitionArray::PutPrototypeTransition(Handle<Map> map,
if (transitions > capacity) {
// Grow array by factor 2 up to MaxCachedPrototypeTransitions.
int new_capacity = Min(kMaxCachedPrototypeTransitions, transitions * 2);
- if (new_capacity == capacity) return map;
+ if (new_capacity == capacity) return;
cache = FixedArray::CopySize(cache, header + new_capacity);
if (capacity < 0) {
@@ -267,10 +271,8 @@ Handle<Map> TransitionArray::PutPrototypeTransition(Handle<Map> map,
int last = NumberOfPrototypeTransitions(*cache);
int entry = header + last;
- cache->set(entry, *target_map);
+ cache->set(entry, *target_cell);
SetNumberOfPrototypeTransitions(*cache, last + 1);
-
- return map;
}
@@ -281,8 +283,12 @@ Handle<Map> TransitionArray::GetPrototypeTransition(Handle<Map> map,
FixedArray* cache = GetPrototypeTransitions(*map);
int number_of_transitions = NumberOfPrototypeTransitions(cache);
for (int i = 0; i < number_of_transitions; i++) {
- Map* target = Map::cast(cache->get(kProtoTransitionHeaderSize + i));
- if (target->prototype() == *prototype) return handle(target);
+ WeakCell* target_cell =
+ WeakCell::cast(cache->get(kProtoTransitionHeaderSize + i));
+ if (!target_cell->cleared() &&
+ Map::cast(target_cell->value())->prototype() == *prototype) {
+ return handle(Map::cast(target_cell->value()));
+ }
}
return Handle<Map>();
}
@@ -436,8 +442,9 @@ void TransitionArray::TraverseTransitionTreeInternal(Map* map,
FixedArray* proto_trans = transitions->GetPrototypeTransitions();
for (int i = 0; i < NumberOfPrototypeTransitions(proto_trans); ++i) {
int index = TransitionArray::kProtoTransitionHeaderSize + i;
- TraverseTransitionTreeInternal(Map::cast(proto_trans->get(index)),
- callback, data);
+ WeakCell* cell = WeakCell::cast(proto_trans->get(index));
+ TraverseTransitionTreeInternal(Map::cast(cell->value()), callback,
+ data);
}
}
for (int i = 0; i < transitions->number_of_transitions(); ++i) {
@@ -513,4 +520,5 @@ int TransitionArray::Search(PropertyKind kind, Name* name,
}
return SearchDetails(transition, kind, attributes, out_insertion_index);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 1cb91a222e..b0aab9502e 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -92,9 +92,8 @@ class TransitionArray: public FixedArray {
// 0: finger - index of the first free cell in the cache
// 1 + i: target map
static const int kMaxCachedPrototypeTransitions = 256;
- static Handle<Map> PutPrototypeTransition(Handle<Map> map,
- Handle<Object> prototype,
- Handle<Map> target_map);
+ static void PutPrototypeTransition(Handle<Map> map, Handle<Object> prototype,
+ Handle<Map> target_map);
static Handle<Map> GetPrototypeTransition(Handle<Map> map,
Handle<Object> prototype);
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index 9b64082d35..7fa51d802a 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
int TypeFeedbackVector::ic_metadata_length() const {
- return FLAG_vector_ics ? VectorICComputer::word_count(ICSlots()) : 0;
+ return VectorICComputer::word_count(ICSlots());
}
@@ -30,12 +30,6 @@ Handle<Object> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
}
-Handle<Object> TypeFeedbackVector::MonomorphicArraySentinel(
- Isolate* isolate, ElementsKind elements_kind) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
-}
-
-
Object* TypeFeedbackVector::RawUninitializedSentinel(Heap* heap) {
return heap->uninitialized_symbol();
}
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index 3cf81f8c5f..adf0a5078a 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/code-stubs.h"
#include "src/ic/ic.h"
#include "src/ic/ic-state.h"
#include "src/objects.h"
@@ -22,6 +23,12 @@ TypeFeedbackVector::VectorICKind TypeFeedbackVector::FromCodeKind(
return KindLoadIC;
case Code::KEYED_LOAD_IC:
return KindKeyedLoadIC;
+ case Code::STORE_IC:
+ DCHECK(FLAG_vector_stores);
+ return KindStoreIC;
+ case Code::KEYED_STORE_IC:
+ DCHECK(FLAG_vector_stores);
+ return KindKeyedStoreIC;
default:
// Shouldn't get here.
UNREACHABLE();
@@ -40,6 +47,12 @@ Code::Kind TypeFeedbackVector::FromVectorICKind(VectorICKind kind) {
return Code::LOAD_IC;
case KindKeyedLoadIC:
return Code::KEYED_LOAD_IC;
+ case KindStoreIC:
+ DCHECK(FLAG_vector_stores);
+ return Code::STORE_IC;
+ case KindKeyedStoreIC:
+ DCHECK(FLAG_vector_stores);
+ return Code::KEYED_STORE_IC;
case KindUnused:
break;
}
@@ -49,11 +62,6 @@ Code::Kind TypeFeedbackVector::FromVectorICKind(VectorICKind kind) {
Code::Kind TypeFeedbackVector::GetKind(FeedbackVectorICSlot slot) const {
- if (!FLAG_vector_ics) {
- // We only have CALL_ICs
- return Code::CALL_IC;
- }
-
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
VectorICKind b = VectorICComputer::decode(data, slot.ToInt());
@@ -62,11 +70,6 @@ Code::Kind TypeFeedbackVector::GetKind(FeedbackVectorICSlot slot) const {
void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot, Code::Kind kind) {
- if (!FLAG_vector_ics) {
- // Nothing to do if we only have CALL_ICs
- return;
- }
-
VectorICKind b = FromCodeKind(kind);
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
@@ -87,8 +90,7 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
const Spec* spec) {
const int slot_count = spec->slots();
const int ic_slot_count = spec->ic_slots();
- const int index_count =
- FLAG_vector_ics ? VectorICComputer::word_count(ic_slot_count) : 0;
+ const int index_count = VectorICComputer::word_count(ic_slot_count);
const int length = slot_count + (ic_slot_count * elements_per_ic_slot()) +
index_count + kReservedIndexCount;
if (length == kReservedIndexCount) {
@@ -118,10 +120,8 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
}
Handle<TypeFeedbackVector> vector = Handle<TypeFeedbackVector>::cast(array);
- if (FLAG_vector_ics) {
- for (int i = 0; i < ic_slot_count; i++) {
- vector->SetKind(FeedbackVectorICSlot(i), spec->GetKind(i));
- }
+ for (int i = 0; i < ic_slot_count; i++) {
+ vector->SetKind(FeedbackVectorICSlot(i), spec->GetKind(i));
}
return vector;
}
@@ -139,8 +139,6 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
bool TypeFeedbackVector::SpecDiffersFrom(
const ZoneFeedbackVectorSpec* other_spec) const {
- if (!FLAG_vector_ics) return false;
-
if (other_spec->slots() != Slots() || other_spec->ic_slots() != ICSlots()) {
return true;
}
@@ -213,6 +211,14 @@ void TypeFeedbackVector::ClearICSlotsImpl(SharedFunctionInfo* shared,
} else if (kind == Code::KEYED_LOAD_IC) {
KeyedLoadICNexus nexus(this, slot);
nexus.Clear(host);
+ } else if (kind == Code::STORE_IC) {
+ DCHECK(FLAG_vector_stores);
+ StoreICNexus nexus(this, slot);
+ nexus.Clear(host);
+ } else if (kind == Code::KEYED_STORE_IC) {
+ DCHECK(FLAG_vector_stores);
+ KeyedStoreICNexus nexus(this, slot);
+ nexus.Clear(host);
}
}
}
@@ -258,15 +264,40 @@ void FeedbackNexus::InstallHandlers(Handle<FixedArray> array,
}
+void FeedbackNexus::ConfigureUninitialized() {
+ SetFeedback(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+}
+
+
+void FeedbackNexus::ConfigurePremonomorphic() {
+ SetFeedback(*TypeFeedbackVector::PremonomorphicSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+}
+
+
+void FeedbackNexus::ConfigureMegamorphic() {
+ Isolate* isolate = GetIsolate();
+ SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+}
+
+
InlineCacheState LoadICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
- if (feedback == *vector()->UninitializedSentinel(isolate)) {
+ if (feedback == *TypeFeedbackVector::UninitializedSentinel(isolate)) {
return UNINITIALIZED;
- } else if (feedback == *vector()->MegamorphicSentinel(isolate)) {
+ } else if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
return MEGAMORPHIC;
- } else if (feedback == *vector()->PremonomorphicSentinel(isolate)) {
+ } else if (feedback == *TypeFeedbackVector::PremonomorphicSentinel(isolate)) {
return PREMONOMORPHIC;
} else if (feedback->IsFixedArray()) {
// Determine state purely by our structure, don't check if the maps are
@@ -285,11 +316,61 @@ InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
- if (feedback == *vector()->UninitializedSentinel(isolate)) {
+ if (feedback == *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ } else if (feedback == *TypeFeedbackVector::PremonomorphicSentinel(isolate)) {
+ return PREMONOMORPHIC;
+ } else if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
+ return MEGAMORPHIC;
+ } else if (feedback->IsFixedArray()) {
+ // Determine state purely by our structure, don't check if the maps are
+ // cleared.
+ return POLYMORPHIC;
+ } else if (feedback->IsWeakCell()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ } else if (feedback->IsName()) {
+ Object* extra = GetFeedbackExtra();
+ FixedArray* extra_array = FixedArray::cast(extra);
+ return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
+ }
+
+ return UNINITIALIZED;
+}
+
+
+InlineCacheState StoreICNexus::StateFromFeedback() const {
+ Isolate* isolate = GetIsolate();
+ Object* feedback = GetFeedback();
+
+ if (feedback == *TypeFeedbackVector::UninitializedSentinel(isolate)) {
return UNINITIALIZED;
- } else if (feedback == *vector()->PremonomorphicSentinel(isolate)) {
+ } else if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
+ return MEGAMORPHIC;
+ } else if (feedback == *TypeFeedbackVector::PremonomorphicSentinel(isolate)) {
return PREMONOMORPHIC;
- } else if (feedback == *vector()->MegamorphicSentinel(isolate)) {
+ } else if (feedback->IsFixedArray()) {
+ // Determine state purely by our structure, don't check if the maps are
+ // cleared.
+ return POLYMORPHIC;
+ } else if (feedback->IsWeakCell()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ }
+
+ return UNINITIALIZED;
+}
+
+
+InlineCacheState KeyedStoreICNexus::StateFromFeedback() const {
+ Isolate* isolate = GetIsolate();
+ Object* feedback = GetFeedback();
+
+ if (feedback == *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ } else if (feedback == *TypeFeedbackVector::PremonomorphicSentinel(isolate)) {
+ return PREMONOMORPHIC;
+ } else if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
return MEGAMORPHIC;
} else if (feedback->IsFixedArray()) {
// Determine state purely by our structure, don't check if the maps are
@@ -311,26 +392,32 @@ InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
InlineCacheState CallICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
- DCHECK(!FLAG_vector_ics ||
- GetFeedbackExtra() == *vector()->UninitializedSentinel(isolate));
+ DCHECK(GetFeedbackExtra() ==
+ *TypeFeedbackVector::UninitializedSentinel(isolate) ||
+ GetFeedbackExtra()->IsSmi());
- if (feedback == *vector()->MegamorphicSentinel(isolate)) {
+ if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
return GENERIC;
} else if (feedback->IsAllocationSite() || feedback->IsWeakCell()) {
return MONOMORPHIC;
}
- CHECK(feedback == *vector()->UninitializedSentinel(isolate));
+ CHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate));
return UNINITIALIZED;
}
-void CallICNexus::Clear(Code* host) { CallIC::Clear(GetIsolate(), host, this); }
+int CallICNexus::ExtractCallCount() {
+ Object* call_count = GetFeedbackExtra();
+ if (call_count->IsSmi()) {
+ int value = Smi::cast(call_count)->value() / 2;
+ return value;
+ }
+ return -1;
+}
-void CallICNexus::ConfigureGeneric() {
- SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
-}
+void CallICNexus::Clear(Code* host) { CallIC::Clear(GetIsolate(), host, this); }
void CallICNexus::ConfigureMonomorphicArray() {
@@ -340,63 +427,52 @@ void CallICNexus::ConfigureMonomorphicArray() {
GetIsolate()->factory()->NewAllocationSite();
SetFeedback(*new_site);
}
-}
-
-
-void CallICNexus::ConfigureUninitialized() {
- SetFeedback(*vector()->UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::FromInt(kCallCountIncrement), SKIP_WRITE_BARRIER);
}
void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
Handle<WeakCell> new_cell = GetIsolate()->factory()->NewWeakCell(function);
SetFeedback(*new_cell);
+ SetFeedbackExtra(Smi::FromInt(kCallCountIncrement), SKIP_WRITE_BARRIER);
}
-void KeyedLoadICNexus::ConfigureMegamorphic() {
- Isolate* isolate = GetIsolate();
- SetFeedback(*vector()->MegamorphicSentinel(isolate), SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*vector()->UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
-}
-
-
-void LoadICNexus::ConfigureMegamorphic() {
- SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*vector()->UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
-}
-
-
-void LoadICNexus::ConfigurePremonomorphic() {
- SetFeedback(*vector()->PremonomorphicSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*vector()->UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+void LoadICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
+ Handle<Code> handler) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ SetFeedback(*cell);
+ SetFeedbackExtra(*handler);
}
-void KeyedLoadICNexus::ConfigurePremonomorphic() {
- Isolate* isolate = GetIsolate();
- SetFeedback(*vector()->PremonomorphicSentinel(isolate), SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*vector()->UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
+ Handle<Map> receiver_map,
+ Handle<Code> handler) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ if (name.is_null()) {
+ SetFeedback(*cell);
+ SetFeedbackExtra(*handler);
+ } else {
+ SetFeedback(*name);
+ Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
+ array->set(0, *cell);
+ array->set(1, *handler);
+ }
}
-void LoadICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
- Handle<Code> handler) {
+void StoreICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
+ Handle<Code> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
SetFeedback(*cell);
SetFeedbackExtra(*handler);
}
-void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
- Handle<Map> receiver_map,
- Handle<Code> handler) {
+void KeyedStoreICNexus::ConfigureMonomorphic(Handle<Name> name,
+ Handle<Map> receiver_map,
+ Handle<Code> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
if (name.is_null()) {
SetFeedback(*cell);
@@ -416,7 +492,7 @@ void LoadICNexus::ConfigurePolymorphic(MapHandleList* maps,
int receiver_count = maps->length();
Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 2);
InstallHandlers(array, maps, handlers);
- SetFeedbackExtra(*vector()->UninitializedSentinel(isolate),
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
}
@@ -429,7 +505,37 @@ void KeyedLoadICNexus::ConfigurePolymorphic(Handle<Name> name,
Handle<FixedArray> array;
if (name.is_null()) {
array = EnsureArrayOfSize(receiver_count * 2);
- SetFeedbackExtra(*vector()->UninitializedSentinel(GetIsolate()),
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+ } else {
+ SetFeedback(*name);
+ array = EnsureExtraArrayOfSize(receiver_count * 2);
+ }
+
+ InstallHandlers(array, maps, handlers);
+}
+
+
+void StoreICNexus::ConfigurePolymorphic(MapHandleList* maps,
+ CodeHandleList* handlers) {
+ Isolate* isolate = GetIsolate();
+ int receiver_count = maps->length();
+ Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 2);
+ InstallHandlers(array, maps, handlers);
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+}
+
+
+void KeyedStoreICNexus::ConfigurePolymorphic(Handle<Name> name,
+ MapHandleList* maps,
+ CodeHandleList* handlers) {
+ int receiver_count = maps->length();
+ DCHECK(receiver_count > 1);
+ Handle<FixedArray> array;
+ if (name.is_null()) {
+ array = EnsureArrayOfSize(receiver_count * 2);
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
} else {
SetFeedback(*name);
@@ -559,5 +665,24 @@ Name* KeyedLoadICNexus::FindFirstName() const {
}
return NULL;
}
+
+
+Name* KeyedStoreICNexus::FindFirstName() const {
+ Object* feedback = GetFeedback();
+ if (feedback->IsString()) {
+ return Name::cast(feedback);
+ }
+ return NULL;
+}
+
+
+void StoreICNexus::Clear(Code* host) {
+ StoreIC::Clear(GetIsolate(), host, this);
+}
+
+
+void KeyedStoreICNexus::Clear(Code* host) {
+ KeyedStoreIC::Clear(GetIsolate(), host, this);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index f4887a3e60..a6f72210fc 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -29,7 +29,7 @@ class FeedbackVectorSpec {
int ic_slots() const { return has_ic_slot_ ? 1 : 0; }
Code::Kind GetKind(int ic_slot) const {
- DCHECK(FLAG_vector_ics && has_ic_slot_ && ic_slot == 0);
+ DCHECK(has_ic_slot_ && ic_slot == 0);
return ic_kind_;
}
@@ -46,9 +46,7 @@ class ZoneFeedbackVectorSpec {
: slots_(0), ic_slots_(0), ic_slot_kinds_(zone) {}
ZoneFeedbackVectorSpec(Zone* zone, int slots, int ic_slots)
- : slots_(slots),
- ic_slots_(ic_slots),
- ic_slot_kinds_(FLAG_vector_ics ? ic_slots : 0, zone) {}
+ : slots_(slots), ic_slots_(ic_slots), ic_slot_kinds_(ic_slots, zone) {}
int slots() const { return slots_; }
void increase_slots(int count) { slots_ += count; }
@@ -56,16 +54,14 @@ class ZoneFeedbackVectorSpec {
int ic_slots() const { return ic_slots_; }
void increase_ic_slots(int count) {
ic_slots_ += count;
- if (FLAG_vector_ics) ic_slot_kinds_.resize(ic_slots_);
+ ic_slot_kinds_.resize(ic_slots_);
}
void SetKind(int ic_slot, Code::Kind kind) {
- DCHECK(FLAG_vector_ics);
ic_slot_kinds_[ic_slot] = kind;
}
Code::Kind GetKind(int ic_slot) const {
- DCHECK(FLAG_vector_ics);
return static_cast<Code::Kind>(ic_slot_kinds_.at(ic_slot));
}
@@ -100,7 +96,7 @@ class TypeFeedbackVector : public FixedArray {
static const int kWithTypesIndex = 1;
static const int kGenericCountIndex = 2;
- static int elements_per_ic_slot() { return FLAG_vector_ics ? 2 : 1; }
+ static int elements_per_ic_slot() { return 2; }
int first_ic_slot_index() const {
DCHECK(length() >= kReservedIndexCount);
@@ -218,11 +214,6 @@ class TypeFeedbackVector : public FixedArray {
// The object that indicates a premonomorphic state.
static inline Handle<Object> PremonomorphicSentinel(Isolate* isolate);
- // The object that indicates a monomorphic state of Array with
- // ElementsKind
- static inline Handle<Object> MonomorphicArraySentinel(
- Isolate* isolate, ElementsKind elements_kind);
-
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
static inline Object* RawUninitializedSentinel(Heap* heap);
@@ -232,10 +223,12 @@ class TypeFeedbackVector : public FixedArray {
KindUnused = 0x0,
KindCallIC = 0x1,
KindLoadIC = 0x2,
- KindKeyedLoadIC = 0x3
+ KindKeyedLoadIC = 0x3,
+ KindStoreIC = 0x4,
+ KindKeyedStoreIC = 0x5,
};
- static const int kVectorICKindBits = 2;
+ static const int kVectorICKindBits = 3;
static VectorICKind FromCodeKind(Code::Kind kind);
static Code::Kind FromVectorICKind(VectorICKind kind);
void SetKind(FeedbackVectorICSlot slot, Code::Kind kind);
@@ -301,6 +294,10 @@ class FeedbackNexus {
virtual bool FindHandlers(CodeHandleList* code_list, int length = -1) const;
virtual Name* FindFirstName() const { return NULL; }
+ virtual void ConfigureUninitialized();
+ virtual void ConfigurePremonomorphic();
+ virtual void ConfigureMegamorphic();
+
Object* GetFeedback() const { return vector()->Get(slot()); }
Object* GetFeedbackExtra() const {
DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
@@ -341,6 +338,10 @@ class FeedbackNexus {
class CallICNexus : public FeedbackNexus {
public:
+ // Monomorphic call ics store call counts. Platform code needs to increment
+ // the count appropriately (ie, by 2).
+ static const int kCallCountIncrement = 2;
+
CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK(vector->GetKind(slot) == Code::CALL_IC);
@@ -352,8 +353,6 @@ class CallICNexus : public FeedbackNexus {
void Clear(Code* host);
- void ConfigureUninitialized();
- void ConfigureGeneric();
void ConfigureMonomorphicArray();
void ConfigureMonomorphic(Handle<JSFunction> function);
@@ -366,10 +365,11 @@ class CallICNexus : public FeedbackNexus {
MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const override {
return MaybeHandle<Code>();
}
- virtual bool FindHandlers(CodeHandleList* code_list,
- int length = -1) const override {
+ bool FindHandlers(CodeHandleList* code_list, int length = -1) const override {
return length == 0;
}
+
+ int ExtractCallCount();
};
@@ -386,8 +386,6 @@ class LoadICNexus : public FeedbackNexus {
void Clear(Code* host);
- void ConfigureMegamorphic();
- void ConfigurePremonomorphic();
void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Code> handler);
void ConfigurePolymorphic(MapHandleList* maps, CodeHandleList* handlers);
@@ -409,8 +407,53 @@ class KeyedLoadICNexus : public FeedbackNexus {
void Clear(Code* host);
- void ConfigureMegamorphic();
- void ConfigurePremonomorphic();
+ // name can be a null handle for element loads.
+ void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
+ Handle<Code> handler);
+ // name can be null.
+ void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
+ CodeHandleList* handlers);
+
+ InlineCacheState StateFromFeedback() const override;
+ Name* FindFirstName() const override;
+};
+
+
+class StoreICNexus : public FeedbackNexus {
+ public:
+ StoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->GetKind(slot) == Code::STORE_IC);
+ }
+ StoreICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->GetKind(slot) == Code::STORE_IC);
+ }
+
+ void Clear(Code* host);
+
+ void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Code> handler);
+
+ void ConfigurePolymorphic(MapHandleList* maps, CodeHandleList* handlers);
+
+ InlineCacheState StateFromFeedback() const override;
+};
+
+
+class KeyedStoreICNexus : public FeedbackNexus {
+ public:
+ KeyedStoreICNexus(Handle<TypeFeedbackVector> vector,
+ FeedbackVectorICSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->GetKind(slot) == Code::KEYED_STORE_IC);
+ }
+ KeyedStoreICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK(vector->GetKind(slot) == Code::KEYED_STORE_IC);
+ }
+
+ void Clear(Code* host);
+
// name can be a null handle for element loads.
void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
Handle<Code> handler);
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 4ad66f8557..ba983d63a8 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -317,15 +317,6 @@ Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
}
-void TypeFeedbackOracle::PropertyReceiverTypes(TypeFeedbackId id,
- Handle<Name> name,
- SmallMapList* receiver_types) {
- receiver_types->Clear();
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
- CollectReceiverTypes(id, name, flags, receiver_types);
-}
-
-
bool TypeFeedbackOracle::HasOnlyStringMaps(SmallMapList* receiver_types) {
bool all_strings = receiver_types->length() > 0;
for (int i = 0; i < receiver_types->length(); i++) {
@@ -335,18 +326,6 @@ bool TypeFeedbackOracle::HasOnlyStringMaps(SmallMapList* receiver_types) {
}
-void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
- TypeFeedbackId id,
- SmallMapList* receiver_types,
- bool* is_string,
- IcCheckType* key_type) {
- receiver_types->Clear();
- CollectReceiverTypes(id, receiver_types);
- *is_string = HasOnlyStringMaps(receiver_types);
- GetLoadKeyType(id, key_type);
-}
-
-
void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorICSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
@@ -451,7 +430,7 @@ void TypeFeedbackOracle::CollectReceiverTypes(T* obj, SmallMapList* types) {
}
-byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) {
+uint16_t TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) {
Handle<Object> object = GetInfo(id);
return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0;
}
@@ -540,4 +519,5 @@ void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 76a45dc847..965fca3010 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -42,14 +42,8 @@ class TypeFeedbackOracle: public ZoneObject {
IcCheckType* key_type);
void GetLoadKeyType(TypeFeedbackId id, IcCheckType* key_type);
- void PropertyReceiverTypes(TypeFeedbackId id, Handle<Name> name,
- SmallMapList* receiver_types);
void PropertyReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
- void KeyedPropertyReceiverTypes(TypeFeedbackId id,
- SmallMapList* receiver_types,
- bool* is_string,
- IcCheckType* key_type);
void KeyedPropertyReceiverTypes(FeedbackVectorICSlot slot,
SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type);
@@ -84,7 +78,7 @@ class TypeFeedbackOracle: public ZoneObject {
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
// of various cycles in our headers. Death to tons of implementations in
// headers!! :-P
- byte ToBooleanTypes(TypeFeedbackId id);
+ uint16_t ToBooleanTypes(TypeFeedbackId id);
// Get type information for arithmetic operations and compares.
void BinaryType(TypeFeedbackId id,
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index baf8edb991..ce52cdf15c 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -2,12 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalArray = global.Array;
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalDataView = global.DataView;
@@ -32,6 +35,16 @@ endmacro
TYPED_ARRAYS(DECLARE_GLOBALS)
+var MathMax;
+var MathMin;
+
+utils.Import(function(from) {
+ MathMax = from.MathMax;
+ MathMin = from.MathMin;
+});
+
+var InternalArray = utils.InternalArray;
+
// --------------- Typed Arrays ---------------------
macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
@@ -77,7 +90,7 @@ function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
|| (newLength > %_MaxSmi())) {
throw MakeRangeError(kInvalidTypedArrayLength);
}
- %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength, true);
}
function NAMEConstructByLength(obj, length) {
@@ -89,9 +102,9 @@ function NAMEConstructByLength(obj, length) {
var byteLength = l * ELEMENT_SIZE;
if (byteLength > %_TypedArrayMaxSizeInHeap()) {
var buffer = new GlobalArrayBuffer(byteLength);
- %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength, true);
} else {
- %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength, true);
}
}
@@ -102,7 +115,15 @@ function NAMEConstructByArrayLike(obj, arrayLike) {
if (l > %_MaxSmi()) {
throw MakeRangeError(kInvalidTypedArrayLength);
}
- if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) {
+ var initialized = false;
+ var byteLength = l * ELEMENT_SIZE;
+ if (byteLength <= %_TypedArrayMaxSizeInHeap()) {
+ %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength, false);
+ } else {
+ initialized =
+ %TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l);
+ }
+ if (!initialized) {
for (var i = 0; i < l; i++) {
// It is crucial that we let any execptions from arrayLike[i]
// propagate outside the function.
@@ -111,15 +132,40 @@ function NAMEConstructByArrayLike(obj, arrayLike) {
}
}
+function NAMEConstructByIterable(obj, iterable, iteratorFn) {
+ var list = new InternalArray();
+ // Reading the Symbol.iterator property of iterable twice would be
+ // observable with getters, so instead, we call the function which
+ // was already looked up, and wrap it in another iterable. The
+ // __proto__ of the new iterable is set to null to avoid any chance
+ // of modifications to Object.prototype being observable here.
+ var iterator = %_CallFunction(iterable, iteratorFn);
+ var newIterable = {
+ __proto__: null
+ };
+ // TODO(littledan): Computed properties don't work yet in nosnap.
+ // Rephrase when they do.
+ newIterable[symbolIterator] = function() { return iterator; }
+ for (var value of newIterable) {
+ list.push(value);
+ }
+ NAMEConstructByArrayLike(obj, list);
+}
+
function NAMEConstructor(arg1, arg2, arg3) {
if (%_IsConstructCall()) {
- if (IS_ARRAYBUFFER(arg1)) {
+ if (IS_ARRAYBUFFER(arg1) || IS_SHAREDARRAYBUFFER(arg1)) {
NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
} else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
NAMEConstructByLength(this, arg1);
} else {
- NAMEConstructByArrayLike(this, arg1);
+ var iteratorFn = arg1[symbolIterator];
+ if (IS_UNDEFINED(iteratorFn) || iteratorFn === $arrayValues) {
+ NAMEConstructByArrayLike(this, arg1);
+ } else {
+ NAMEConstructByIterable(this, arg1, iteratorFn);
+ }
}
} else {
throw MakeTypeError(kConstructorNotFunction, "NAME")
@@ -165,16 +211,16 @@ function NAMESubArray(begin, end) {
var srcLength = %_TypedArrayGetLength(this);
if (beginInt < 0) {
- beginInt = $max(0, srcLength + beginInt);
+ beginInt = MathMax(0, srcLength + beginInt);
} else {
- beginInt = $min(srcLength, beginInt);
+ beginInt = MathMin(srcLength, beginInt);
}
var endInt = IS_UNDEFINED(end) ? srcLength : end;
if (endInt < 0) {
- endInt = $max(0, srcLength + endInt);
+ endInt = MathMax(0, srcLength + endInt);
} else {
- endInt = $min(endInt, srcLength);
+ endInt = MathMin(endInt, srcLength);
}
if (endInt < beginInt) {
endInt = beginInt;
@@ -293,7 +339,7 @@ function TypedArraySet(obj, offset) {
}
function TypedArrayGetToStringTag() {
- if (!%IsTypedArray(this)) return;
+ if (!%_IsTypedArray(this)) return;
var name = %_ClassOf(this);
if (IS_UNDEFINED(name)) return;
return name;
@@ -312,16 +358,16 @@ macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
%AddNamedProperty(GlobalNAME.prototype,
"BYTES_PER_ELEMENT", ELEMENT_SIZE,
READ_ONLY | DONT_ENUM | DONT_DELETE);
- $installGetter(GlobalNAME.prototype, "buffer", NAME_GetBuffer);
- $installGetter(GlobalNAME.prototype, "byteOffset", NAME_GetByteOffset,
- DONT_ENUM | DONT_DELETE);
- $installGetter(GlobalNAME.prototype, "byteLength", NAME_GetByteLength,
- DONT_ENUM | DONT_DELETE);
- $installGetter(GlobalNAME.prototype, "length", NAME_GetLength,
- DONT_ENUM | DONT_DELETE);
- $installGetter(GlobalNAME.prototype, symbolToStringTag,
- TypedArrayGetToStringTag);
- $installFunctions(GlobalNAME.prototype, DONT_ENUM, [
+ utils.InstallGetter(GlobalNAME.prototype, "buffer", NAME_GetBuffer);
+ utils.InstallGetter(GlobalNAME.prototype, "byteOffset", NAME_GetByteOffset,
+ DONT_ENUM | DONT_DELETE);
+ utils.InstallGetter(GlobalNAME.prototype, "byteLength", NAME_GetByteLength,
+ DONT_ENUM | DONT_DELETE);
+ utils.InstallGetter(GlobalNAME.prototype, "length", NAME_GetLength,
+ DONT_ENUM | DONT_DELETE);
+ utils.InstallGetter(GlobalNAME.prototype, symbolToStringTag,
+ TypedArrayGetToStringTag);
+ utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
"subarray", NAMESubArray,
"set", TypedArraySet
]);
@@ -333,6 +379,7 @@ TYPED_ARRAYS(SETUP_TYPED_ARRAY)
function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
if (%_IsConstructCall()) {
+ // TODO(binji): support SharedArrayBuffers?
if (!IS_ARRAYBUFFER(buffer)) throw MakeTypeError(kDataViewNotArrayBuffer);
if (!IS_UNDEFINED(byteOffset)) {
byteOffset = $toPositiveInteger(byteOffset, kInvalidDataViewOffset);
@@ -427,11 +474,13 @@ DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
%AddNamedProperty(GlobalDataView.prototype, symbolToStringTag, "DataView",
READ_ONLY|DONT_ENUM);
-$installGetter(GlobalDataView.prototype, "buffer", DataViewGetBufferJS);
-$installGetter(GlobalDataView.prototype, "byteOffset", DataViewGetByteOffset);
-$installGetter(GlobalDataView.prototype, "byteLength", DataViewGetByteLength);
+utils.InstallGetter(GlobalDataView.prototype, "buffer", DataViewGetBufferJS);
+utils.InstallGetter(GlobalDataView.prototype, "byteOffset",
+ DataViewGetByteOffset);
+utils.InstallGetter(GlobalDataView.prototype, "byteLength",
+ DataViewGetByteLength);
-$installFunctions(GlobalDataView.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalDataView.prototype, DONT_ENUM, [
"getInt8", DataViewGetInt8JS,
"setInt8", DataViewSetInt8JS,
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index e9a9acb66e..1c6b84e2dc 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -235,6 +235,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
@@ -248,8 +249,6 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case JS_WEAK_SET_TYPE:
if (map->is_undetectable()) return kUndetectable;
return kOtherObject;
- case JS_GLOBAL_OBJECT_TYPE:
- return kGlobalObject;
case JS_FUNCTION_TYPE:
return kOtherObject; // TODO(rossberg): there should be a Function type.
case JS_REGEXP_TYPE:
@@ -1367,4 +1366,5 @@ template TypeImpl<HeapTypeConfig>::TypeHandle
TypeImpl<HeapTypeConfig>::Convert<Type>(
TypeImpl<ZoneTypeConfig>::TypeHandle, TypeImpl<HeapTypeConfig>::Region*);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 8f80624532..8d63908015 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -208,7 +208,7 @@ namespace internal {
V(InternalizedString, 1u << 13 | REPRESENTATION(kTaggedPointer)) \
V(OtherString, 1u << 14 | REPRESENTATION(kTaggedPointer)) \
V(Undetectable, 1u << 15 | REPRESENTATION(kTaggedPointer)) \
- V(GlobalObject, 1u << 16 | REPRESENTATION(kTaggedPointer)) \
+ /* Unused semantic bit 1u << 16 in case you are looking for a bit. */ \
V(OtherObject, 1u << 17 | REPRESENTATION(kTaggedPointer)) \
V(Proxy, 1u << 18 | REPRESENTATION(kTaggedPointer)) \
V(Internal, 1u << 19 | REPRESENTATION(kTagged | kUntagged)) \
@@ -221,18 +221,22 @@ namespace internal {
V(Integral32, kSigned32 | kUnsigned32) \
V(PlainNumber, kIntegral32 | kOtherNumber) \
V(OrderedNumber, kPlainNumber | kMinusZero) \
+ V(MinusZeroOrNaN, kMinusZero | kNaN) \
V(Number, kOrderedNumber | kNaN) \
V(String, kInternalizedString | kOtherString) \
V(UniqueName, kSymbol | kInternalizedString) \
V(Name, kSymbol | kString) \
+ V(BooleanOrNumber, kBoolean | kNumber) \
+ V(NullOrUndefined, kNull | kUndefined) \
V(NumberOrString, kNumber | kString) \
- V(PlainPrimitive, kNumberOrString | kBoolean | kNull | kUndefined) \
+ V(NumberOrUndefined, kNumber | kUndefined) \
+ V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
V(Primitive, kSymbol | kPlainPrimitive) \
- V(DetectableObject, kGlobalObject | kOtherObject) \
- V(DetectableReceiver, kDetectableObject | kProxy) \
+ V(DetectableReceiver, kOtherObject | kProxy) \
V(Detectable, kDetectableReceiver | kNumber | kName) \
- V(Object, kDetectableObject | kUndetectable) \
+ V(Object, kOtherObject | kUndetectable) \
V(Receiver, kObject | kProxy) \
+ V(ReceiverOrUndefined, kReceiver | kUndefined) \
V(StringOrReceiver, kString | kReceiver) \
V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
kReceiver) \
@@ -410,6 +414,14 @@ class TypeImpl : public Config::Base {
function->InitParameter(2, param2);
return function;
}
+ static TypeHandle Function(TypeHandle result, int arity, TypeHandle* params,
+ Region* region) {
+ FunctionHandle function = Function(result, Any(region), arity, region);
+ for (int i = 0; i < arity; ++i) {
+ function->InitParameter(i, params[i]);
+ }
+ return function;
+ }
static TypeHandle Union(TypeHandle type1, TypeHandle type2, Region* reg);
static TypeHandle Intersect(TypeHandle type1, TypeHandle type2, Region* reg);
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index ab015717e8..2f10328f09 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -105,7 +105,9 @@ void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
ZoneList<Variable*> local_vars(locals, zone());
ZoneList<Variable*> context_vars(scope->ContextLocalCount(), zone());
- scope->CollectStackAndContextLocals(&local_vars, &context_vars);
+ ZoneList<Variable*> global_vars(scope->ContextGlobalCount(), zone());
+ scope->CollectStackAndContextLocals(&local_vars, &context_vars,
+ &global_vars);
for (int i = 0; i < locals; i++) {
PrintObserved(local_vars.at(i),
frame->GetExpression(i),
@@ -346,9 +348,7 @@ void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
-void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
- expr->InitializeSharedInfo(Handle<Code>(info_->closure()->shared()->code()));
-}
+void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {}
void AstTyper::VisitClassLiteral(ClassLiteral* expr) {}
@@ -492,35 +492,20 @@ void AstTyper::VisitThrow(Throw* expr) {
void AstTyper::VisitProperty(Property* expr) {
// Collect type feedback.
FeedbackVectorICSlot slot(FeedbackVectorICSlot::Invalid());
- TypeFeedbackId id(TypeFeedbackId::None());
- if (FLAG_vector_ics) {
- slot = expr->PropertyFeedbackSlot();
- expr->set_inline_cache_state(oracle()->LoadInlineCacheState(slot));
- } else {
- id = expr->PropertyFeedbackId();
- expr->set_inline_cache_state(oracle()->LoadInlineCacheState(id));
- }
+ slot = expr->PropertyFeedbackSlot();
+ expr->set_inline_cache_state(oracle()->LoadInlineCacheState(slot));
if (!expr->IsUninitialized()) {
if (expr->key()->IsPropertyName()) {
Literal* lit_key = expr->key()->AsLiteral();
DCHECK(lit_key != NULL && lit_key->value()->IsString());
Handle<String> name = Handle<String>::cast(lit_key->value());
- if (FLAG_vector_ics) {
- oracle()->PropertyReceiverTypes(slot, name, expr->GetReceiverTypes());
- } else {
- oracle()->PropertyReceiverTypes(id, name, expr->GetReceiverTypes());
- }
+ oracle()->PropertyReceiverTypes(slot, name, expr->GetReceiverTypes());
} else {
bool is_string;
IcCheckType key_type;
- if (FLAG_vector_ics) {
- oracle()->KeyedPropertyReceiverTypes(slot, expr->GetReceiverTypes(),
- &is_string, &key_type);
- } else {
- oracle()->KeyedPropertyReceiverTypes(id, expr->GetReceiverTypes(),
- &is_string, &key_type);
- }
+ oracle()->KeyedPropertyReceiverTypes(slot, expr->GetReceiverTypes(),
+ &is_string, &key_type);
expr->set_is_string_access(is_string);
expr->set_key_type(key_type);
}
@@ -769,14 +754,17 @@ void AstTyper::VisitCompareOperation(CompareOperation* expr) {
}
-void AstTyper::VisitSpread(Spread* expr) { UNREACHABLE(); }
+void AstTyper::VisitSpread(Spread* expr) { RECURSE(Visit(expr->expression())); }
void AstTyper::VisitThisFunction(ThisFunction* expr) {
}
-void AstTyper::VisitSuperReference(SuperReference* expr) {}
+void AstTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {}
+
+
+void AstTyper::VisitSuperCallReference(SuperCallReference* expr) {}
void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
@@ -804,4 +792,5 @@ void AstTyper::VisitExportDeclaration(ExportDeclaration* declaration) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index 1f4b8b6262..4566a7cc96 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -5,14 +5,18 @@
// This file contains support for URI manipulations written in
// JavaScript.
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+//- ------------------------------------------------------------------
+// Imports
+
var GlobalObject = global.Object;
var GlobalArray = global.Array;
+var InternalArray = utils.InternalArray;
// -------------------------------------------------------------------
// Define internal helper functions.
@@ -160,11 +164,12 @@ function URIDecodeOctets(octets, result, index) {
// ECMA-262, section 15.1.3
function Encode(uri, unescape) {
+ uri = TO_STRING_INLINE(uri);
var uriLength = uri.length;
var array = new InternalArray(uriLength);
var index = 0;
for (var k = 0; k < uriLength; k++) {
- var cc1 = uri.charCodeAt(k);
+ var cc1 = %_StringCharCodeAt(uri, k);
if (unescape(cc1)) {
array[index++] = cc1;
} else {
@@ -174,7 +179,7 @@ function Encode(uri, unescape) {
} else {
k++;
if (k == uriLength) throw MakeURIError();
- var cc2 = uri.charCodeAt(k);
+ var cc2 = %_StringCharCodeAt(uri, k);
if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw MakeURIError();
index = URIEncodePair(cc1, cc2, array, index);
}
@@ -190,6 +195,7 @@ function Encode(uri, unescape) {
// ECMA-262, section 15.1.3
function Decode(uri, reserved) {
+ uri = TO_STRING_INLINE(uri);
var uriLength = uri.length;
var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
var index = 0;
@@ -197,15 +203,18 @@ function Decode(uri, reserved) {
// Optimistically assume one-byte string.
for ( ; k < uriLength; k++) {
- var code = uri.charCodeAt(k);
+ var code = %_StringCharCodeAt(uri, k);
if (code == 37) { // '%'
if (k + 2 >= uriLength) throw MakeURIError();
- var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
+ var cc = URIHexCharsToCharCode(%_StringCharCodeAt(uri, k+1),
+ %_StringCharCodeAt(uri, k+2));
if (cc >> 7) break; // Assumption wrong, two-byte string.
if (reserved(cc)) {
%_OneByteSeqStringSetChar(index++, 37, one_byte); // '%'.
- %_OneByteSeqStringSetChar(index++, uri.charCodeAt(k+1), one_byte);
- %_OneByteSeqStringSetChar(index++, uri.charCodeAt(k+2), one_byte);
+ %_OneByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k+1),
+ one_byte);
+ %_OneByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k+2),
+ one_byte);
} else {
%_OneByteSeqStringSetChar(index++, cc, one_byte);
}
@@ -224,10 +233,11 @@ function Decode(uri, reserved) {
index = 0;
for ( ; k < uriLength; k++) {
- var code = uri.charCodeAt(k);
+ var code = %_StringCharCodeAt(uri, k);
if (code == 37) { // '%'
if (k + 2 >= uriLength) throw MakeURIError();
- var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
+ var cc = URIHexCharsToCharCode(%_StringCharCodeAt(uri, ++k),
+ %_StringCharCodeAt(uri, ++k));
if (cc >> 7) {
var n = 0;
while (((cc << ++n) & 0x80) != 0) { }
@@ -236,15 +246,17 @@ function Decode(uri, reserved) {
octets[0] = cc;
if (k + 3 * (n - 1) >= uriLength) throw MakeURIError();
for (var i = 1; i < n; i++) {
- if (uri.charAt(++k) != '%') throw MakeURIError();
- octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
- uri.charCodeAt(++k));
+ if (uri[++k] != '%') throw MakeURIError();
+ octets[i] = URIHexCharsToCharCode(%_StringCharCodeAt(uri, ++k),
+ %_StringCharCodeAt(uri, ++k));
}
index = URIDecodeOctets(octets, two_byte, index);
} else if (reserved(cc)) {
%_TwoByteSeqStringSetChar(index++, 37, two_byte); // '%'.
- %_TwoByteSeqStringSetChar(index++, uri.charCodeAt(k - 1), two_byte);
- %_TwoByteSeqStringSetChar(index++, uri.charCodeAt(k), two_byte);
+ %_TwoByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k - 1),
+ two_byte);
+ %_TwoByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k),
+ two_byte);
} else {
%_TwoByteSeqStringSetChar(index++, cc, two_byte);
}
@@ -356,7 +368,7 @@ function URIEncodeComponent(component) {
// Set up non-enumerable URI functions on the global object and set
// their names.
-$installFunctions(global, DONT_ENUM, [
+utils.InstallFunctions(global, DONT_ENUM, [
"escape", URIEscapeJS,
"unescape", URIUnescapeJS,
"decodeURI", URIDecode,
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 5370e386ad..9f502bde3e 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -435,4 +435,5 @@ bool DoubleToBoolean(double d) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index dbb80e7522..582c576993 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -340,7 +340,7 @@ inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
hash = hash ^ (hash >> 4);
hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11);
hash = hash ^ (hash >> 16);
- return hash;
+ return hash & 0x3fffffff;
}
@@ -1047,17 +1047,19 @@ template <int dummy_parameter>
class VectorSlot {
public:
explicit VectorSlot(int id) : id_(id) {}
+
int ToInt() const { return id_; }
static VectorSlot Invalid() { return VectorSlot(kInvalidSlot); }
bool IsInvalid() const { return id_ == kInvalidSlot; }
VectorSlot next() const {
- DCHECK(id_ != kInvalidSlot);
+ DCHECK_NE(kInvalidSlot, id_);
return VectorSlot(id_ + 1);
}
- bool operator==(const VectorSlot& other) const { return id_ == other.id_; }
+ bool operator==(VectorSlot that) const { return this->id_ == that.id_; }
+ bool operator!=(VectorSlot that) const { return !(*this == that); }
private:
static const int kInvalidSlot = -1;
@@ -1066,6 +1068,12 @@ class VectorSlot {
};
+template <int dummy_parameter>
+size_t hash_value(VectorSlot<dummy_parameter> slot) {
+ return slot.ToInt();
+}
+
+
typedef VectorSlot<0> FeedbackVectorSlot;
typedef VectorSlot<1> FeedbackVectorICSlot;
@@ -1690,7 +1698,7 @@ bool StringToArrayIndex(Stream* stream, uint32_t* index) {
d = stream->GetNext() - '0';
if (d < 0 || d > 9) return false;
// Check that the new result is below the 32 bit limit.
- if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
+ if (result > 429496729U - ((d + 3) >> 3)) return false;
result = (result * 10) + d;
}
@@ -1708,6 +1716,41 @@ inline uintptr_t GetCurrentStackPosition() {
return limit;
}
+static inline double ReadDoubleValue(const void* p) {
+#ifndef V8_TARGET_ARCH_MIPS
+ return *reinterpret_cast<const double*>(p);
+#else // V8_TARGET_ARCH_MIPS
+ // Prevent compiler from using load-double (mips ldc1) on (possibly)
+ // non-64-bit aligned address.
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ const uint32_t* ptr = reinterpret_cast<const uint32_t*>(p);
+ c.u[0] = *ptr;
+ c.u[1] = *(ptr + 1);
+ return c.d;
+#endif // V8_TARGET_ARCH_MIPS
+}
+
+
+static inline void WriteDoubleValue(void* p, double value) {
+#ifndef V8_TARGET_ARCH_MIPS
+ *(reinterpret_cast<double*>(p)) = value;
+#else // V8_TARGET_ARCH_MIPS
+ // Prevent compiler from using load-double (mips sdc1) on (possibly)
+ // non-64-bit aligned address.
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ c.d = value;
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(p);
+ *ptr = c.u[0];
+ *(ptr + 1) = c.u[1];
+#endif // V8_TARGET_ARCH_MIPS
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 49a104f0aa..fcffebb344 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -143,4 +143,5 @@ void V8::SetSnapshotBlob(StartupData* snapshot_blob) {
CHECK(false);
#endif
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index ce54c758ed..72cb8caa29 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -2,153 +2,43 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $delete;
var $functionSourceString;
-var $getIterator;
-var $getMethod;
var $globalEval;
-var $installConstants;
-var $installFunctions;
-var $installGetter;
-var $isFinite;
-var $isNaN;
-var $newFunctionString;
-var $numberIsNaN;
-var $objectDefineProperties;
-var $objectDefineProperty;
-var $objectFreeze;
+var $objectDefineOwnProperty;
var $objectGetOwnPropertyDescriptor;
-var $objectGetOwnPropertyKeys;
-var $objectHasOwnProperty;
-var $objectIsFrozen;
-var $objectIsSealed;
-var $objectLookupGetter;
-var $objectLookupSetter;
-var $objectToString;
-var $overrideFunction;
-var $ownPropertyKeys;
-var $setFunctionName;
-var $setUpLockedPrototype;
var $toCompletePropertyDescriptor;
-var $toNameArray;
-(function(global, shared, exports) {
+(function(global, utils) {
%CheckIsBootstrapping();
+// ----------------------------------------------------------------------------
+// Imports
+
var GlobalArray = global.Array;
var GlobalBoolean = global.Boolean;
var GlobalFunction = global.Function;
var GlobalNumber = global.Number;
var GlobalObject = global.Object;
-
-// ----------------------------------------------------------------------------
-
-// ES6 - 9.2.11 SetFunctionName
-function SetFunctionName(f, name, prefix) {
- if (IS_SYMBOL(name)) {
- name = "[" + %SymbolDescription(name) + "]";
- }
- if (IS_UNDEFINED(prefix)) {
- %FunctionSetName(f, name);
- } else {
- %FunctionSetName(f, prefix + " " + name);
- }
-}
-
-
-// Helper function used to install functions on objects.
-function InstallFunctions(object, attributes, functions) {
- %OptimizeObjectForAddingMultipleProperties(object, functions.length >> 1);
- for (var i = 0; i < functions.length; i += 2) {
- var key = functions[i];
- var f = functions[i + 1];
- SetFunctionName(f, key);
- %FunctionRemovePrototype(f);
- %AddNamedProperty(object, key, f, attributes);
- %SetNativeFlag(f);
- }
- %ToFastProperties(object);
-}
-
-
-function OverrideFunction(object, name, f) {
- ObjectDefineProperty(object, name, { value: f,
- writeable: true,
- configurable: true,
- enumerable: false });
- SetFunctionName(f, name);
- %FunctionRemovePrototype(f);
- %SetNativeFlag(f);
-}
-
-
-// Helper function to install a getter-only accessor property.
-function InstallGetter(object, name, getter, attributes) {
- if (typeof attributes == "undefined") {
- attributes = DONT_ENUM;
- }
- SetFunctionName(getter, name, "get");
- %FunctionRemovePrototype(getter);
- %DefineAccessorPropertyUnchecked(object, name, getter, null, attributes);
- %SetNativeFlag(getter);
-}
-
-
-// Helper function to install a getter/setter accessor property.
-function InstallGetterSetter(object, name, getter, setter) {
- SetFunctionName(getter, name, "get");
- SetFunctionName(setter, name, "set");
- %FunctionRemovePrototype(getter);
- %FunctionRemovePrototype(setter);
- %DefineAccessorPropertyUnchecked(object, name, getter, setter, DONT_ENUM);
- %SetNativeFlag(getter);
- %SetNativeFlag(setter);
-}
-
-
-// Helper function for installing constant properties on objects.
-function InstallConstants(object, constants) {
- %OptimizeObjectForAddingMultipleProperties(object, constants.length >> 1);
- var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
- for (var i = 0; i < constants.length; i += 2) {
- var name = constants[i];
- var k = constants[i + 1];
- %AddNamedProperty(object, name, k, attributes);
- }
- %ToFastProperties(object);
-}
-
-
-// Prevents changes to the prototype of a built-in function.
-// The "prototype" property of the function object is made non-configurable,
-// and the prototype object is made non-extensible. The latter prevents
-// changing the __proto__ property.
-function SetUpLockedPrototype(constructor, fields, methods) {
- %CheckIsBootstrapping();
- var prototype = constructor.prototype;
- // Install functions first, because this function is used to initialize
- // PropertyDescriptor itself.
- var property_count = (methods.length >> 1) + (fields ? fields.length : 0);
- if (property_count >= 4) {
- %OptimizeObjectForAddingMultipleProperties(prototype, property_count);
- }
- if (fields) {
- for (var i = 0; i < fields.length; i++) {
- %AddNamedProperty(prototype, fields[i],
- UNDEFINED, DONT_ENUM | DONT_DELETE);
- }
- }
- for (var i = 0; i < methods.length; i += 2) {
- var key = methods[i];
- var f = methods[i + 1];
- %AddNamedProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
- %SetNativeFlag(f);
- }
- %InternalSetPrototype(prototype, null);
- %ToFastProperties(prototype);
-}
-
+var InternalArray = utils.InternalArray;
+var SetFunctionName = utils.SetFunctionName;
+
+var MathAbs;
+var ProxyDelegateCallAndConstruct;
+var ProxyDerivedHasOwnTrap;
+var ProxyDerivedKeysTrap;
+var StringIndexOf;
+
+utils.Import(function(from) {
+ MathAbs = from.MathAbs;
+ StringIndexOf = from.StringIndexOf;
+});
+
+utils.ImportFromExperimental(function(from) {
+ ProxyDelegateCallAndConstruct = from.ProxyDelegateCallAndConstruct;
+ ProxyDerivedHasOwnTrap = from.ProxyDerivedHasOwnTrap;
+ ProxyDerivedKeysTrap = from.ProxyDerivedKeysTrap;
+});
// ----------------------------------------------------------------------------
@@ -226,7 +116,7 @@ function GlobalEval(x) {
// Set up global object.
var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
-InstallConstants(global, [
+utils.InstallConstants(global, [
// ECMA 262 - 15.1.1.1.
"NaN", NAN,
// ECMA-262 - 15.1.1.2.
@@ -236,7 +126,7 @@ InstallConstants(global, [
]);
// Set up non-enumerable function on the global object.
-InstallFunctions(global, DONT_ENUM, [
+utils.InstallFunctions(global, DONT_ENUM, [
"isNaN", GlobalIsNaN,
"isFinite", GlobalIsFinite,
"parseInt", GlobalParseInt,
@@ -284,15 +174,18 @@ function ObjectValueOf() {
// ECMA-262 - 15.2.4.5
-function ObjectHasOwnProperty(V) {
- if (%_IsJSProxy(this)) {
+function ObjectHasOwnProperty(value) {
+ var name = $toName(value);
+ var object = TO_OBJECT_INLINE(this);
+
+ if (%_IsJSProxy(object)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(V)) return false;
+ if (IS_SYMBOL(value)) return false;
- var handler = %GetHandler(this);
- return CallTrap1(handler, "hasOwn", $proxyDerivedHasOwnTrap, $toName(V));
+ var handler = %GetHandler(object);
+ return CallTrap1(handler, "hasOwn", ProxyDerivedHasOwnTrap, name);
}
- return %HasOwnProperty(TO_OBJECT_INLINE(this), $toName(V));
+ return %HasOwnProperty(object, name);
}
@@ -373,7 +266,7 @@ function ObjectKeys(obj) {
obj = TO_OBJECT_INLINE(obj);
if (%_IsJSProxy(obj)) {
var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "keys", $proxyDerivedKeysTrap);
+ var names = CallTrap0(handler, "keys", ProxyDerivedKeysTrap);
return ToNameArray(names, "keys", false);
}
return %OwnKeys(obj);
@@ -530,7 +423,7 @@ function PropertyDescriptor() {
this.hasSetter_ = false;
}
-SetUpLockedPrototype(PropertyDescriptor, [
+utils.SetUpLockedPrototype(PropertyDescriptor, [
"value_",
"hasValue_",
"writable_",
@@ -793,14 +686,19 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
}
// Step 10a
if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
- if (!current.isWritable() && desc.isWritable()) {
- if (should_throw) {
- throw MakeTypeError(kRedefineDisallowed, p);
- } else {
- return false;
+ var currentIsWritable = current.isWritable();
+ if (currentIsWritable != desc.isWritable()) {
+ if (!currentIsWritable || IS_STRONG(obj)) {
+ if (should_throw) {
+ throw currentIsWritable
+ ? MakeTypeError(kStrongRedefineDisallowed, obj, p)
+ : MakeTypeError(kRedefineDisallowed, p);
+ } else {
+ return false;
+ }
}
}
- if (!current.isWritable() && desc.hasValue() &&
+ if (!currentIsWritable && desc.hasValue() &&
!$sameValue(desc.getValue(), current.getValue())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
@@ -904,72 +802,7 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
// ES5 section 15.4.5.1.
function DefineArrayProperty(obj, p, desc, should_throw) {
- // Note that the length of an array is not actually stored as part of the
- // property, hence we use generated code throughout this function instead of
- // DefineObjectProperty() to modify its value.
-
- // Step 3 - Special handling for length property.
- if (p === "length") {
- var length = obj.length;
- var old_length = length;
- if (!desc.hasValue()) {
- return DefineObjectProperty(obj, "length", desc, should_throw);
- }
- var new_length = $toUint32(desc.getValue());
- if (new_length != $toNumber(desc.getValue())) {
- throw MakeRangeError(kArrayLengthOutOfRange);
- }
- var length_desc = GetOwnPropertyJS(obj, "length");
- if (new_length != length && !length_desc.isWritable()) {
- if (should_throw) {
- throw MakeTypeError(kRedefineDisallowed, p);
- } else {
- return false;
- }
- }
- var threw = false;
-
- var emit_splice = %IsObserved(obj) && new_length !== old_length;
- var removed;
- if (emit_splice) {
- $observeBeginPerformSplice(obj);
- removed = [];
- if (new_length < old_length)
- removed.length = old_length - new_length;
- }
-
- while (new_length < length--) {
- var index = $toString(length);
- if (emit_splice) {
- var deletedDesc = GetOwnPropertyJS(obj, index);
- if (deletedDesc && deletedDesc.hasValue())
- removed[length - new_length] = deletedDesc.getValue();
- }
- if (!Delete(obj, index, false)) {
- new_length = length + 1;
- threw = true;
- break;
- }
- }
- threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw;
- if (emit_splice) {
- $observeEndPerformSplice(obj);
- $observeEnqueueSpliceRecord(obj,
- new_length < old_length ? new_length : old_length,
- removed,
- new_length > old_length ? new_length - old_length : 0);
- }
- if (threw) {
- if (should_throw) {
- throw MakeTypeError(kRedefineDisallowed, p);
- } else {
- return false;
- }
- }
- return true;
- }
-
- // Step 4 - Special handling for array index.
+ // Step 3 - Special handling for array index.
if (!IS_SYMBOL(p)) {
var index = $toUint32(p);
var emit_splice = false;
@@ -1023,6 +856,17 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
}
+function DefineOwnPropertyFromAPI(obj, p, value, desc) {
+ return DefineOwnProperty(obj, p, ToPropertyDescriptor({
+ value: value,
+ writable: desc[0],
+ enumerable: desc[1],
+ configurable: desc[2]
+ }),
+ false);
+}
+
+
// ES6 section 19.1.2.9
function ObjectGetPrototypeOf(obj) {
return %_GetPrototype(TO_OBJECT_INLINE(obj));
@@ -1067,7 +911,7 @@ function ToNameArray(obj, trap, includeSymbols) {
if (%HasOwnProperty(names, s)) {
throw MakeTypeError(kProxyRepeatedPropName, trap, s);
}
- array[index] = s;
+ array[realLength] = s;
++realLength;
names[s] = 0;
}
@@ -1274,7 +1118,7 @@ function ProxyFix(obj) {
if (%IsJSFunctionProxy(obj)) {
var callTrap = %GetCallTrap(obj);
var constructTrap = %GetConstructTrap(obj);
- var code = $proxyDelegateCallAndConstruct(callTrap, constructTrap);
+ var code = ProxyDelegateCallAndConstruct(callTrap, constructTrap);
%Fix(obj); // becomes a regular function
%SetCode(obj, code);
// TODO(rossberg): What about length and other properties? Not specified.
@@ -1323,7 +1167,10 @@ function ObjectSealJS(obj) {
function ObjectFreezeJS(obj) {
if (!IS_SPEC_OBJECT(obj)) return obj;
var isProxy = %_IsJSProxy(obj);
- if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj)) {
+ // TODO(conradw): Investigate modifying the fast path to accommodate strong
+ // objects.
+ if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj) ||
+ IS_STRONG(obj)) {
if (isProxy) {
ProxyFix(obj);
}
@@ -1452,7 +1299,7 @@ function ObjectConstructor(x) {
DONT_ENUM);
// Set up non-enumerable functions on the Object.prototype object.
-InstallFunctions(GlobalObject.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalObject.prototype, DONT_ENUM, [
"toString", ObjectToString,
"toLocaleString", ObjectToLocaleString,
"valueOf", ObjectValueOf,
@@ -1464,11 +1311,11 @@ InstallFunctions(GlobalObject.prototype, DONT_ENUM, [
"__defineSetter__", ObjectDefineSetter,
"__lookupSetter__", ObjectLookupSetter
]);
-InstallGetterSetter(GlobalObject.prototype, "__proto__", ObjectGetProto,
+utils.InstallGetterSetter(GlobalObject.prototype, "__proto__", ObjectGetProto,
ObjectSetProto);
// Set up non-enumerable functions in the Object object.
-InstallFunctions(GlobalObject, DONT_ENUM, [
+utils.InstallFunctions(GlobalObject, DONT_ENUM, [
"keys", ObjectKeys,
"create", ObjectCreate,
"defineProperty", ObjectDefineProperty,
@@ -1533,7 +1380,7 @@ function BooleanValueOf() {
%AddNamedProperty(GlobalBoolean.prototype, "constructor", GlobalBoolean,
DONT_ENUM);
-InstallFunctions(GlobalBoolean.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalBoolean.prototype, DONT_ENUM, [
"toString", BooleanToString,
"valueOf", BooleanValueOf
]);
@@ -1693,7 +1540,7 @@ function NumberIsSafeInteger(number) {
if (NumberIsFinite(number)) {
var integral = TO_INTEGER(number);
if (integral == number) {
- return $abs(integral) <= GlobalNumber.MAX_SAFE_INTEGER;
+ return MathAbs(integral) <= GlobalNumber.MAX_SAFE_INTEGER;
}
}
return false;
@@ -1710,7 +1557,7 @@ function NumberIsSafeInteger(number) {
%AddNamedProperty(GlobalNumber.prototype, "constructor", GlobalNumber,
DONT_ENUM);
-InstallConstants(GlobalNumber, [
+utils.InstallConstants(GlobalNumber, [
// ECMA-262 section 15.7.3.1.
"MAX_VALUE", 1.7976931348623157e+308,
// ECMA-262 section 15.7.3.2.
@@ -1730,7 +1577,7 @@ InstallConstants(GlobalNumber, [
]);
// Set up non-enumerable functions on the Number prototype object.
-InstallFunctions(GlobalNumber.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalNumber.prototype, DONT_ENUM, [
"toString", NumberToStringJS,
"toLocaleString", NumberToLocaleString,
"valueOf", NumberValueOf,
@@ -1740,7 +1587,7 @@ InstallFunctions(GlobalNumber.prototype, DONT_ENUM, [
]);
// Harmony Number constructor additions
-InstallFunctions(GlobalNumber, DONT_ENUM, [
+utils.InstallFunctions(GlobalNumber, DONT_ENUM, [
"isFinite", NumberIsFinite,
"isInteger", NumberIsInteger,
"isNaN", NumberIsNaN,
@@ -1749,7 +1596,7 @@ InstallFunctions(GlobalNumber, DONT_ENUM, [
"parseFloat", GlobalParseFloat
]);
-%SetInlineBuiltinFlag(NumberIsNaN);
+%SetForceInlineFlag(NumberIsNaN);
// ----------------------------------------------------------------------------
@@ -1856,6 +1703,10 @@ function FunctionBind(this_arg) { // Length is 1.
var result = %FunctionBindArguments(boundFunction, this,
this_arg, new_length);
+ var name = this.name;
+ var bound_name = IS_STRING(name) ? name : "";
+ SetFunctionName(result, bound_name, "bound");
+
// We already have caller and arguments properties on functions,
// which are non-configurable. It therefore makes no sence to
// try to redefine these as defined by the spec. The spec says
@@ -1878,7 +1729,7 @@ function NewFunctionString(args, function_token) {
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
// compile. We avoid this problem by checking for this early on.
- if (%_CallFunction(p, ')', $stringIndexOf) != -1) {
+ if (%_CallFunction(p, ')', StringIndexOf) != -1) {
throw MakeSyntaxError(kParenthesisInArgString);
}
// If the formal parameters include an unbalanced block comment, the
@@ -1908,7 +1759,7 @@ function FunctionConstructor(arg1) { // length == 1
%AddNamedProperty(GlobalFunction.prototype, "constructor", GlobalFunction,
DONT_ENUM);
-InstallFunctions(GlobalFunction.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalFunction.prototype, DONT_ENUM, [
"bind", FunctionBind,
"toString", FunctionToString
]);
@@ -1932,36 +1783,35 @@ function GetIterator(obj, method) {
return iterator;
}
-//----------------------------------------------------------------------------
+// ----------------------------------------------------------------------------
+// Exports
-$delete = Delete;
$functionSourceString = FunctionSourceString;
-$getIterator = GetIterator;
-$getMethod = GetMethod;
$globalEval = GlobalEval;
-$installConstants = InstallConstants;
-$installFunctions = InstallFunctions;
-$installGetter = InstallGetter;
-$isFinite = GlobalIsFinite;
-$isNaN = GlobalIsNaN;
-$newFunctionString = NewFunctionString;
-$numberIsNaN = NumberIsNaN;
-$objectDefineProperties = ObjectDefineProperties;
-$objectDefineProperty = ObjectDefineProperty;
-$objectFreeze = ObjectFreezeJS;
+$objectDefineOwnProperty = DefineOwnPropertyFromAPI;
$objectGetOwnPropertyDescriptor = ObjectGetOwnPropertyDescriptor;
-$objectGetOwnPropertyKeys = ObjectGetOwnPropertyKeys;
-$objectHasOwnProperty = ObjectHasOwnProperty;
-$objectIsFrozen = ObjectIsFrozen;
-$objectIsSealed = ObjectIsSealed;
-$objectLookupGetter = ObjectLookupGetter;
-$objectLookupSetter = ObjectLookupSetter;
-$objectToString = ObjectToString;
-$overrideFunction = OverrideFunction;
-$ownPropertyKeys = OwnPropertyKeys;
-$setFunctionName = SetFunctionName;
-$setUpLockedPrototype = SetUpLockedPrototype;
$toCompletePropertyDescriptor = ToCompletePropertyDescriptor;
-$toNameArray = ToNameArray;
+
+utils.ObjectDefineProperties = ObjectDefineProperties;
+utils.ObjectDefineProperty = ObjectDefineProperty;
+
+utils.Export(function(to) {
+ to.Delete = Delete;
+ to.GetIterator = GetIterator;
+ to.GetMethod = GetMethod;
+ to.IsFinite = GlobalIsFinite;
+ to.IsNaN = GlobalIsNaN;
+ to.NewFunctionString = NewFunctionString;
+ to.NumberIsNaN = NumberIsNaN;
+ to.ObjectDefineProperty = ObjectDefineProperty;
+ to.ObjectFreeze = ObjectFreezeJS;
+ to.ObjectGetOwnPropertyKeys = ObjectGetOwnPropertyKeys;
+ to.ObjectHasOwnProperty = ObjectHasOwnProperty;
+ to.ObjectIsFrozen = ObjectIsFrozen;
+ to.ObjectIsSealed = ObjectIsSealed;
+ to.ObjectToString = ObjectToString;
+ to.OwnPropertyKeys = OwnPropertyKeys;
+ to.ToNameArray = ToNameArray;
+});
})
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index c0b8bd7843..18a45abd73 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -39,7 +39,7 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
name_(name),
mode_(mode),
kind_(kind),
- location_(UNALLOCATED),
+ location_(VariableLocation::UNALLOCATED),
index_(-1),
initializer_position_(RelocInfo::kNoPosition),
has_strong_mode_reference_(false),
@@ -58,9 +58,15 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return (IsDynamicVariableMode(mode_) ||
- (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)))
- && scope_ != NULL && scope_->is_script_scope();
+ return IsDynamicVariableMode(mode_) || IsStaticGlobalObjectProperty();
+}
+
+
+bool Variable::IsStaticGlobalObjectProperty() const {
+ // Temporaries are never global, they must always be allocated in the
+ // activation frame.
+ return (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)) &&
+ scope_ != NULL && scope_->is_script_scope() && !is_this();
}
@@ -71,4 +77,5 @@ int Variable::CompareIndex(Variable* const* v, Variable* const* w) {
return x - y;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 384a885954..deebc5f80c 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -20,34 +20,7 @@ class ClassVariable;
class Variable: public ZoneObject {
public:
- enum Kind { NORMAL, FUNCTION, CLASS, THIS, NEW_TARGET, ARGUMENTS };
-
- enum Location {
- // Before and during variable allocation, a variable whose location is
- // not yet determined. After allocation, a variable looked up as a
- // property on the global object (and possibly absent). name() is the
- // variable name, index() is invalid.
- UNALLOCATED,
-
- // A slot in the parameter section on the stack. index() is the
- // parameter index, counting left-to-right. The receiver is index -1;
- // the first parameter is index 0.
- PARAMETER,
-
- // A slot in the local section on the stack. index() is the variable
- // index in the stack frame, starting at 0.
- LOCAL,
-
- // An indexed slot in a heap context. index() is the variable index in
- // the context object on the heap, starting at 0. scope() is the
- // corresponding scope.
- CONTEXT,
-
- // A named slot in a heap context. name() is the variable name in the
- // context object on the heap, with lookup starting at the current
- // context. index() is invalid.
- LOOKUP
- };
+ enum Kind { NORMAL, FUNCTION, CLASS, THIS, ARGUMENTS };
Variable(Scope* scope, const AstRawString* name, VariableMode mode, Kind kind,
InitializationFlag initialization_flag,
@@ -86,13 +59,20 @@ class Variable: public ZoneObject {
return !is_this() && name().is_identical_to(n);
}
- bool IsUnallocated() const { return location_ == UNALLOCATED; }
- bool IsParameter() const { return location_ == PARAMETER; }
- bool IsStackLocal() const { return location_ == LOCAL; }
+ bool IsUnallocated() const {
+ return location_ == VariableLocation::UNALLOCATED;
+ }
+ bool IsParameter() const { return location_ == VariableLocation::PARAMETER; }
+ bool IsStackLocal() const { return location_ == VariableLocation::LOCAL; }
bool IsStackAllocated() const { return IsParameter() || IsStackLocal(); }
- bool IsContextSlot() const { return location_ == CONTEXT; }
- bool IsLookupSlot() const { return location_ == LOOKUP; }
+ bool IsContextSlot() const { return location_ == VariableLocation::CONTEXT; }
+ bool IsGlobalSlot() const { return location_ == VariableLocation::GLOBAL; }
+ bool IsUnallocatedOrGlobalSlot() const {
+ return IsUnallocated() || IsGlobalSlot();
+ }
+ bool IsLookupSlot() const { return location_ == VariableLocation::LOOKUP; }
bool IsGlobalObjectProperty() const;
+ bool IsStaticGlobalObjectProperty() const;
bool is_dynamic() const { return IsDynamicVariableMode(mode_); }
bool is_const_mode() const { return IsImmutableVariableMode(mode_); }
@@ -103,9 +83,18 @@ class Variable: public ZoneObject {
bool is_function() const { return kind_ == FUNCTION; }
bool is_class() const { return kind_ == CLASS; }
bool is_this() const { return kind_ == THIS; }
- bool is_new_target() const { return kind_ == NEW_TARGET; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
+ // For script scopes, the "this" binding is provided by a ScriptContext added
+ // to the global's ScriptContextTable. This binding might not statically
+ // resolve to a Variable::THIS binding, instead being DYNAMIC_LOCAL. However
+ // any variable named "this" does indeed refer to a Variable::THIS binding;
+ // the grammar ensures this to be the case. So wherever a "this" binding
+ // might be provided by the global, use HasThisName instead of is_this().
+ bool HasThisName(Isolate* isolate) const {
+ return is_this() || *name() == *isolate->factory()->this_string();
+ }
+
ClassVariable* AsClassVariable() {
DCHECK(is_class());
return reinterpret_cast<ClassVariable*>(this);
@@ -125,13 +114,13 @@ class Variable: public ZoneObject {
local_if_not_shadowed_ = local;
}
- Location location() const { return location_; }
+ VariableLocation location() const { return location_; }
int index() const { return index_; }
InitializationFlag initialization_flag() const {
return initialization_flag_;
}
- void AllocateTo(Location location, int index) {
+ void AllocateTo(VariableLocation location, int index) {
location_ = location;
index_ = index;
}
@@ -162,7 +151,7 @@ class Variable: public ZoneObject {
const AstRawString* name_;
VariableMode mode_;
Kind kind_;
- Location location_;
+ VariableLocation location_;
int index_;
int initializer_position_;
// Tracks whether the variable is bound to a VariableProxy which is in strong
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index 895c61b4ec..4f3128b918 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -69,14 +69,34 @@ class Vector {
return Vector<T>(result, length_);
}
- void Sort(int (*cmp)(const T*, const T*)) {
- std::sort(start(), start() + length(), RawComparer(cmp));
+ template <typename CompareFunction>
+ void Sort(CompareFunction cmp, size_t s, size_t l) {
+ std::sort(start() + s, start() + s + l, RawComparer<CompareFunction>(cmp));
+ }
+
+ template <typename CompareFunction>
+ void Sort(CompareFunction cmp) {
+ std::sort(start(), start() + length(), RawComparer<CompareFunction>(cmp));
}
void Sort() {
std::sort(start(), start() + length());
}
+ template <typename CompareFunction>
+ void StableSort(CompareFunction cmp, size_t s, size_t l) {
+ std::stable_sort(start() + s, start() + s + l,
+ RawComparer<CompareFunction>(cmp));
+ }
+
+ template <typename CompareFunction>
+ void StableSort(CompareFunction cmp) {
+ std::stable_sort(start(), start() + length(),
+ RawComparer<CompareFunction>(cmp));
+ }
+
+ void StableSort() { std::stable_sort(start(), start() + length()); }
+
void Truncate(int length) {
DCHECK(length <= length_);
length_ = length;
@@ -122,15 +142,16 @@ class Vector {
T* start_;
int length_;
+ template <typename CookedComparer>
class RawComparer {
public:
- explicit RawComparer(int (*cmp)(const T*, const T*)) : cmp_(cmp) {}
+ explicit RawComparer(CookedComparer cmp) : cmp_(cmp) {}
bool operator()(const T& a, const T& b) {
return cmp_(&a, &b) < 0;
}
private:
- int (*cmp_)(const T*, const T*);
+ CookedComparer cmp_;
};
};
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index eaef96d44d..eae80c85cc 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -79,4 +79,5 @@ void Version::GetSONAME(Vector<char> str) {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/weak-collection.js b/deps/v8/src/weak-collection.js
index f9863c1b20..75350931ed 100644
--- a/deps/v8/src/weak-collection.js
+++ b/deps/v8/src/weak-collection.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, shared, exports) {
+(function(global, utils) {
"use strict";
@@ -43,7 +43,9 @@ function WeakMapGet(key) {
'WeakMap.prototype.get', this);
}
if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
- return %WeakCollectionGet(this, key);
+ var hash = $getExistingHash(key);
+ if (IS_UNDEFINED(hash)) return UNDEFINED;
+ return %WeakCollectionGet(this, key, hash);
}
@@ -52,10 +54,8 @@ function WeakMapSet(key, value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakMap.prototype.set', this);
}
- if (!IS_SPEC_OBJECT(key)) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionSet(this, key, value);
+ if (!IS_SPEC_OBJECT(key)) throw MakeTypeError(kInvalidWeakMapKey);
+ return %WeakCollectionSet(this, key, value, $getHash(key));
}
@@ -65,7 +65,9 @@ function WeakMapHas(key) {
'WeakMap.prototype.has', this);
}
if (!IS_SPEC_OBJECT(key)) return false;
- return %WeakCollectionHas(this, key);
+ var hash = $getExistingHash(key);
+ if (IS_UNDEFINED(hash)) return false;
+ return %WeakCollectionHas(this, key, hash);
}
@@ -75,7 +77,9 @@ function WeakMapDelete(key) {
'WeakMap.prototype.delete', this);
}
if (!IS_SPEC_OBJECT(key)) return false;
- return %WeakCollectionDelete(this, key);
+ var hash = $getExistingHash(key);
+ if (IS_UNDEFINED(hash)) return false;
+ return %WeakCollectionDelete(this, key, hash);
}
@@ -90,7 +94,7 @@ function WeakMapDelete(key) {
DONT_ENUM | READ_ONLY);
// Set up the non-enumerable functions on the WeakMap prototype object.
-$installFunctions(GlobalWeakMap.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalWeakMap.prototype, DONT_ENUM, [
"get", WeakMapGet,
"set", WeakMapSet,
"has", WeakMapHas,
@@ -124,10 +128,8 @@ function WeakSetAdd(value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakSet.prototype.add', this);
}
- if (!IS_SPEC_OBJECT(value)) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionSet(this, value, true);
+ if (!IS_SPEC_OBJECT(value)) throw MakeTypeError(kInvalidWeakSetValue);
+ return %WeakCollectionSet(this, value, true, $getHash(value));
}
@@ -137,7 +139,9 @@ function WeakSetHas(value) {
'WeakSet.prototype.has', this);
}
if (!IS_SPEC_OBJECT(value)) return false;
- return %WeakCollectionHas(this, value);
+ var hash = $getExistingHash(value);
+ if (IS_UNDEFINED(hash)) return false;
+ return %WeakCollectionHas(this, value, hash);
}
@@ -147,7 +151,9 @@ function WeakSetDelete(value) {
'WeakSet.prototype.delete', this);
}
if (!IS_SPEC_OBJECT(value)) return false;
- return %WeakCollectionDelete(this, value);
+ var hash = $getExistingHash(value);
+ if (IS_UNDEFINED(hash)) return false;
+ return %WeakCollectionDelete(this, value, hash);
}
@@ -162,7 +168,7 @@ function WeakSetDelete(value) {
DONT_ENUM | READ_ONLY);
// Set up the non-enumerable functions on the WeakSet prototype object.
-$installFunctions(GlobalWeakSet.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalWeakSet.prototype, DONT_ENUM, [
"add", WeakSetAdd,
"has", WeakSetHas,
"delete", WeakSetDelete
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 1a20109612..90deaba4fe 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -267,14 +267,12 @@ void Assembler::emit_vex_prefix(Register reg, Register vreg, const Operand& rm,
}
-Address Assembler::target_address_at(Address pc,
- ConstantPoolArray* constant_pool) {
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
return Memory::int32_at(pc) + pc + 4;
}
-void Assembler::set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index cafa402fd6..cb93ab878b 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -3883,6 +3883,12 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dq(uint64_t data) {
+ EnsureSpace ensure_space(this);
+ emitq(data);
+}
+
+
void Assembler::dq(Label* label) {
EnsureSpace ensure_space(this);
if (label->is_bound()) {
@@ -3921,20 +3927,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return isolate->factory()->empty_constant_pool_array();
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return;
-}
-
-
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE |
@@ -3954,6 +3946,7 @@ bool RelocInfo::IsInConstantPool() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 27f04419a3..15c531960f 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -535,15 +535,12 @@ class Assembler : public AssemblerBase {
// the absolute address of the target.
// These functions convert between absolute Addresses of Code objects and
// the relative displacements stored in the code.
- static inline Address target_address_at(Address pc,
- ConstantPoolArray* constant_pool);
- static inline void set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED) ;
+ static inline Address target_address_at(Address pc, Address constant_pool);
+ static inline void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
static inline void set_target_address_at(Address pc,
@@ -551,7 +548,7 @@ class Assembler : public AssemblerBase {
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@@ -707,6 +704,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m, where m must be a power of 2.
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -1628,16 +1628,19 @@ class Assembler : public AssemblerBase {
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, const SourcePosition position);
- // Allocate a constant pool of the correct size for the generated code.
- Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
-
- // Generate the constant pool for the generated code.
- void PopulateConstantPool(ConstantPoolArray* constant_pool);
+ void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+ }
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dq(uint64_t data);
+ void dp(uintptr_t data) { dq(data); }
void dq(Label* label);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index cff6e6d791..fa957a1b67 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -136,6 +136,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
+ bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -156,12 +157,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(rbx);
}
- // Store a smi-tagged arguments count on the stack.
+ // Preserve the incoming parameters on the stack.
__ Integer32ToSmi(rax, rax);
__ Push(rax);
-
- // Push the function to invoke on the stack.
__ Push(rdi);
+ if (use_new_target) {
+ __ Push(rdx);
+ }
Label rt_call, normal_new, allocated, count_incremented;
__ cmpp(rdx, rdi);
@@ -353,17 +355,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdi: FixedArray
// rax: start of next object
// rdx: number of elements
- { Label loop, entry;
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movp(Operand(rcx, 0), rdx);
- __ addp(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpp(rcx, rax);
- __ j(below, &loop);
- }
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ InitializeFieldsWithFiller(rcx, rax, rdx);
// Store the initialized FixedArray into the properties field of
// the JSObject
@@ -396,7 +390,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated);
if (create_memento) {
- __ movp(rcx, Operand(rsp, kPointerSize*2));
+ int offset = (use_new_target ? 3 : 2) * kPointerSize;
+ __ movp(rcx, Operand(rsp, offset));
__ Cmp(rcx, masm->isolate()->factory()->undefined_value());
__ j(equal, &count_incremented);
// rcx is an AllocationSite. We are creating a memento from it, so we
@@ -407,13 +402,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
- // Retrieve the function from the stack.
+ // Restore the parameters.
+ if (use_new_target) {
+ __ Pop(rdx);
+ }
__ Pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
__ movp(rax, Operand(rsp, 0));
__ SmiToInteger32(rax, rax);
+ // Push new.target onto the construct frame. This is stored just below the
+ // receiver on the stack.
+ if (use_new_target) {
+ __ Push(rdx);
+ }
+
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
@@ -445,7 +449,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ // TODO(arv): Remove the "!use_new_target" before supporting optimization
+ // of functions that reference new.target
+ if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -470,9 +476,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&use_receiver);
__ movp(rax, Operand(rsp, 0));
- // Restore the arguments count and leave the construct frame.
+ // Restore the arguments count and leave the construct frame. The arguments
+ // count is stored below the reciever and the new.target.
__ bind(&exit);
- __ movp(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
+ int offset = (use_new_target ? 2 : 1) * kPointerSize;
+ __ movp(rbx, Operand(rsp, offset));
// Leave construct frame.
}
@@ -489,12 +497,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@@ -535,8 +548,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ decp(rcx);
__ j(greater_equal, &loop);
- __ incp(rax); // Pushed new.target.
-
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@@ -561,8 +572,9 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// Restore context from the frame.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movp(rbx, Operand(rsp, 0)); // Get arguments count.
- } // Leave construct frame.
+ // Get arguments count, skipping over new.target.
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
+ } // Leave construct frame.
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
@@ -1113,6 +1125,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
const int limitOffset) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
// Copy all arguments from the array to the stack.
Label entry, loop;
@@ -1122,7 +1136,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ movp(receiver, Operand(rbp, argumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
+ FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
+ Handle<TypeFeedbackVector> feedback_vector =
+ masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
+ int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ Move(slot, Smi::FromInt(index));
+ __ Move(vector, feedback_vector);
+ Handle<Code> ic =
+ KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
@@ -1642,6 +1663,41 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
+
+ // If the function is strong we need to throw an error.
+ Label no_strong_error;
+ __ movp(kScratchRegister,
+ FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kStrongModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrongModeBitWithinByte));
+ __ j(equal, &no_strong_error, Label::kNear);
+
+ // What we really care about is the required number of arguments.
+
+ if (kPointerSize == kInt32Size) {
+ __ movp(
+ kScratchRegister,
+ FieldOperand(kScratchRegister, SharedFunctionInfo::kLengthOffset));
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ } else {
+ // See comment near kLengthOffset in src/objects.h
+ __ movsxlq(
+ kScratchRegister,
+ FieldOperand(kScratchRegister, SharedFunctionInfo::kLengthOffset));
+ __ shrq(kScratchRegister, Immediate(1));
+ }
+
+ __ cmpp(rax, kScratchRegister);
+ __ j(greater_equal, &no_strong_error, Label::kNear);
+
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ }
+
+ __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
// Copy receiver and all actual arguments.
@@ -1752,6 +1808,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 92b186e9c6..f467ea357f 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -98,15 +98,15 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- rax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ rax.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ Push(descriptor.GetEnvironmentParameterRegister(i));
+ __ Push(descriptor.GetRegisterParameter(i));
}
__ CallExternalReference(miss, param_count);
}
@@ -527,9 +527,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
- DCHECK(!FLAG_vector_ics ||
- !AreAliased(r8, r9, VectorLoadICDescriptor::VectorRegister(),
- VectorLoadICDescriptor::SlotRegister()));
+ DCHECK(!AreAliased(r8, r9, LoadWithVectorDescriptor::VectorRegister(),
+ LoadDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
r9, &miss);
@@ -540,7 +539,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- CHECK(!has_new_target());
// The key is in rdx and the parameter count is in rax.
DCHECK(rdx.is(ArgumentsAccessReadDescriptor::index()));
DCHECK(rax.is(ArgumentsAccessReadDescriptor::parameter_count()));
@@ -607,9 +605,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Registers used over the whole function:
// rbx: the mapped parameter count (untagged)
// rax: the allocated object (tagged).
-
- CHECK(!has_new_target());
-
Factory* factory = isolate()->factory();
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
@@ -679,7 +674,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
__ jmp(&instantiate, Label::kNear);
- const int kAliasedIndex = Context::ALIASED_ARGUMENTS_MAP_INDEX;
+ const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX;
__ bind(&has_mapped_parameters);
__ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
__ bind(&instantiate);
@@ -823,7 +818,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
// rsp[24] : function
- CHECK(!has_new_target());
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -848,9 +842,10 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// rsp[0] : return address
- // rsp[8] : index of rest parameter
- // rsp[16] : number of parameters
- // rsp[24] : receiver displacement
+ // rsp[8] : language mode
+ // rsp[16] : index of rest parameter
+ // rsp[24] : number of parameters
+ // rsp[32] : receiver displacement
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -860,7 +855,7 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
- StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ StackArgumentsAccessor args(rsp, 4, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movp(args.GetArgumentOperand(1), rcx);
__ SmiToInteger64(rcx, rcx);
@@ -869,7 +864,7 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ movp(args.GetArgumentOperand(0), rdx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+ __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@@ -913,9 +908,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register scratch = rdi;
Register result = rax;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
- DCHECK(!FLAG_vector_ics ||
- (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
- result.is(VectorLoadICDescriptor::SlotRegister())));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+ result.is(LoadDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@@ -961,19 +955,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- if (has_new_target()) {
- // If the constructor was [[Call]]ed, the call will not push a new.target
- // onto the stack. In that case the arguments array we construct is bogus,
- // bu we do not care as the constructor throws immediately.
- __ Cmp(rcx, Smi::FromInt(0));
- Label skip_decrement;
- __ j(equal, &skip_decrement);
- // Subtract 1 from smi-tagged arguments count.
- __ SmiToInteger32(rcx, rcx);
- __ decl(rcx);
- __ Integer32ToSmi(rcx, rcx);
- __ bind(&skip_decrement);
- }
__ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ leap(rdx, Operand(rdx, rcx, times_pointer_size,
@@ -1533,7 +1514,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
- Label check_unequal_objects, done;
+ Label runtime_call, check_unequal_objects, done;
Condition cc = GetCondition();
Factory* factory = isolate()->factory();
@@ -1566,12 +1547,17 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
- Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc));
- __ ret(0);
- __ bind(&check_for_nan);
+ if (is_strong(strength())) {
+ // In strong mode, this comparison must throw, so call the runtime.
+ __ j(equal, &runtime_call, Label::kFar);
+ } else {
+ Label check_for_nan;
+ __ j(not_equal, &check_for_nan, Label::kNear);
+ __ Set(rax, NegativeComparisonResult(cc));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
}
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
@@ -1582,12 +1568,20 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
factory->heap_number_map());
__ j(equal, &heap_number, Label::kNear);
if (cc != equal) {
+ __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
// Call runtime on identical objects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical, Label::kNear);
+ __ cmpb(rcx, Immediate(static_cast<uint8_t>(FIRST_SPEC_OBJECT_TYPE)));
+ __ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
- __ CmpObjectType(rax, SYMBOL_TYPE, rcx);
- __ j(equal, &not_identical, Label::kNear);
+ __ cmpb(rcx, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
+ __ j(equal, &runtime_call, Label::kFar);
+ if (is_strong(strength())) {
+ // We have already tested for smis and heap numbers, so if both
+ // arguments are not strings we must proceed to the slow case.
+ __ testb(rcx, Immediate(kIsNotStringMask));
+ __ j(not_zero, &runtime_call, Label::kFar);
+ }
}
__ Set(rax, EQUAL);
__ ret(0);
@@ -1734,7 +1728,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label not_both_objects, return_unequal;
+ Label return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -1742,11 +1736,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagMask == 1);
__ leap(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects, Label::kNear);
+ __ j(not_zero, &runtime_call, Label::kNear);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
- __ j(below, &not_both_objects, Label::kNear);
+ __ j(below, &runtime_call, Label::kNear);
__ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(below, &not_both_objects, Label::kNear);
+ __ j(below, &runtime_call, Label::kNear);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(zero, &return_unequal, Label::kNear);
@@ -1760,8 +1754,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Return non-equal by returning the non-zero object pointer in rax,
// or return equal if we fell through to here.
__ ret(0);
- __ bind(&not_both_objects);
}
+ __ bind(&runtime_call);
// Push arguments below the return address to prepare jump to builtin.
__ PopReturnAddressTo(rcx);
@@ -1773,7 +1767,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == equal) {
builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
- builtin = Builtins::COMPARE;
+ builtin =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
__ Push(Smi::FromInt(NegativeComparisonResult(cc)));
}
@@ -2135,6 +2130,11 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
factory->allocation_site_map());
__ j(not_equal, &miss);
+ // Increment the call count for monomorphic function calls.
+ __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Smi::FromInt(CallICNexus::kCallCountIncrement));
+
__ movp(rbx, rcx);
__ movp(rdx, rdi);
ArrayConstructorStub stub(masm->isolate(), arg_count());
@@ -2196,6 +2196,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(rdi, &extra_checks_or_miss);
+ // Increment the call count for monomorphic function calls.
+ __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Smi::FromInt(CallICNexus::kCallCountIncrement));
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2266,6 +2271,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Update stats.
__ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
+ // Initialize the call counter.
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Smi::FromInt(CallICNexus::kCallCountIncrement));
+
// Store the function. Use a stub since we need a frame for allocation.
// rbx - vector
// rdx - slot (needs to be in smi form)
@@ -2928,9 +2938,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Push(VectorLoadICDescriptor::VectorRegister());
- __ Push(VectorLoadICDescriptor::SlotRegister());
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(LoadWithVectorDescriptor::VectorRegister());
+ __ Push(LoadDescriptor::SlotRegister());
}
__ Push(object_);
__ Push(index_); // Consumed by runtime conversion function.
@@ -2947,9 +2957,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ movp(index_, rax);
}
__ Pop(object_);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ Pop(VectorLoadICDescriptor::SlotRegister());
- __ Pop(VectorLoadICDescriptor::VectorRegister());
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(LoadDescriptor::SlotRegister());
+ __ Pop(LoadWithVectorDescriptor::VectorRegister());
}
// Reload the instance type.
__ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -3598,7 +3608,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -4354,15 +4364,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawLoadStub stub(isolate(), state());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawKeyedLoadStub stub(isolate());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4441,21 +4451,19 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
-void VectorRawLoadStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // rdx
- Register name = VectorLoadICDescriptor::NameRegister(); // rcx
- Register vector = VectorLoadICDescriptor::VectorRegister(); // rbx
- Register slot = VectorLoadICDescriptor::SlotRegister(); // rax
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // rdx
+ Register name = LoadWithVectorDescriptor::NameRegister(); // rcx
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // rbx
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // rax
Register feedback = rdi;
Register integer_slot = r8;
Register receiver_map = r9;
@@ -4496,21 +4504,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
-void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // rdx
- Register key = VectorLoadICDescriptor::NameRegister(); // rcx
- Register vector = VectorLoadICDescriptor::VectorRegister(); // rbx
- Register slot = VectorLoadICDescriptor::SlotRegister(); // rax
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // rdx
+ Register key = LoadWithVectorDescriptor::NameRegister(); // rcx
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // rbx
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // rax
Register feedback = rdi;
Register integer_slot = r8;
Register receiver_map = r9;
@@ -4543,7 +4551,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &try_poly_name);
Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
@@ -4566,6 +4574,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ StoreIC::GenerateMiss(masm);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, rbx);
CallICStub stub(isolate(), state());
@@ -5366,6 +5426,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index ceee95488f..0fa7dc4848 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -724,6 +724,7 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 59a187f14c..0224b23a82 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -39,6 +39,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#endif
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index ee2b5c526a..1749760d37 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -159,53 +159,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for IC load call (from ic-x64.cc).
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- RegList regs = receiver.bit() | name.bit();
- if (FLAG_vector_ics) {
- regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
- }
- Generate_DebugBreakCallHelper(masm, regs, 0, false);
-}
-
-
-void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for IC store call (from ic-x64.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-x64.cc).
- GenerateLoadICDebugBreak(masm);
-}
-
-
-void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC store call (from ic-x64.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
- // Register state for CompareNil IC
- // ----------- S t a t e -------------
- // -- rax : value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit(), 0, false);
-}
-
-
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// Register state just before return from JS function (from codegen-x64.cc).
// ----------- S t a t e -------------
@@ -304,6 +257,7 @@ const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 02e9d2e66f..cd3324c7e4 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -346,7 +346,7 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- // No out-of-line constant pool support.
+ // No embedded constant pool support.
UNREACHABLE();
}
@@ -354,6 +354,7 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index c70684d99f..9a651c5152 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1390,7 +1390,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer(",%d", (*current) & 3);
current += 1;
} else if (third_byte == 0x16) {
- get_modrm(*current, &mod, &regop, &rm);
+ get_modrm(*current, &mod, &rm, &regop);
AppendToBuffer("pextrd "); // reg/m32, xmm, imm8
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 114945b49f..11db5b9ed6 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -31,12 +31,7 @@ Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
}
-Object*& ExitFrame::constant_pool_slot() const {
- UNREACHABLE();
- return Memory::Object_at(NULL);
-}
-
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 1f8612afae..0719baffbf 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -71,36 +71,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kImplicitReceiverOffset = -5 * kPointerSize;
- static const int kConstructorOffset = kMinInt;
- static const int kLengthOffset = -4 * kPointerSize;
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index cdd57074ce..ef8c15087f 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -93,10 +93,6 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-x64.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
- handler_table_ =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForRange(function()->handler_count()), TENURED));
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -115,7 +111,7 @@ void FullCodeGenerator::Generate() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis()) {
+ info->MayUseThis() && info->scope()->has_this_declaration()) {
Label ok;
// +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
@@ -183,17 +179,17 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info->scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
// Argument to NewContext is the function, which is still in rdi.
if (info->scope()->is_script_scope()) {
__ Push(rdi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -209,8 +205,9 @@ void FullCodeGenerator::Generate() {
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -233,10 +230,48 @@ void FullCodeGenerator::Generate() {
}
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register) {
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep is marked as such.
+ }
+ SetVar(this_function_var, rdi, rbx, rdx);
+ }
+
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+
+ __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ Label non_adaptor_frame;
+ __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &non_adaptor_frame);
+ __ movp(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+
+ __ bind(&non_adaptor_frame);
+ __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
+
+ Label non_construct_frame, done;
+ __ j(not_equal, &non_construct_frame);
+
+ // Construct frame
+ __ movp(rax,
+ Operand(rax, ConstructFrameConstants::kOriginalConstructorOffset));
+ __ jmp(&done);
+
+ // Non-construct frame
+ __ bind(&non_construct_frame);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+
+ __ bind(&done);
+ SetVar(new_target_var, rax, rbx, rdx);
+ }
// Possibly allocate RestParameters
int rest_index;
@@ -246,16 +281,13 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
- --num_parameters;
- ++rest_index;
- }
__ leap(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
__ Push(rdx);
__ Push(Smi::FromInt(num_parameters));
__ Push(Smi::FromInt(rest_index));
+ __ Push(Smi::FromInt(language_mode()));
RestParamAccessStub stub(isolate());
__ CallStub(&stub);
@@ -294,7 +326,7 @@ void FullCodeGenerator::Generate() {
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type, has_new_target);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, rax, rbx, rdx);
@@ -319,7 +351,7 @@ void FullCodeGenerator::Generate() {
VariableDeclaration* function = scope()->function();
DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
@@ -440,7 +472,7 @@ void FullCodeGenerator::EmitReturnSequence() {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ SetReturnPosition(function());
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
@@ -449,9 +481,6 @@ void FullCodeGenerator::EmitReturnSequence() {
int no_frame_start = masm_->pc_offset();
int arg_count = info_->scope()->num_parameters() + 1;
- if (IsSubclassConstructor(info_->function()->kind())) {
- arg_count++;
- }
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, rcx);
@@ -815,7 +844,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
@@ -823,8 +853,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
zone());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
@@ -832,7 +862,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
@@ -843,7 +873,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
__ Push(rsi);
__ Push(variable->name());
@@ -873,25 +903,26 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ movp(StackOperand(variable), result_register());
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
@@ -909,7 +940,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ Push(rsi);
__ Push(variable->name());
@@ -926,20 +957,21 @@ void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::UNALLOCATED:
+ case VariableLocation::GLOBAL:
// TODO(rossberg)
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::LOOKUP:
UNREACHABLE();
}
}
@@ -1017,9 +1049,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ SetExpressionPosition(clause);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
+ strength(language_mode())).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1064,8 +1096,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt, SKIP_BREAK);
+
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
- SetStatementPosition(stmt);
Label loop, exit;
ForIn loop_statement(this, stmt);
@@ -1073,7 +1106,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionPosition(stmt->enumerable());
+ SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &exit);
@@ -1177,7 +1210,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- SetExpressionPosition(stmt->each());
+ SetExpressionAsStatementPosition(stmt->each());
__ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
__ cmpp(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
@@ -1212,9 +1245,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ Push(rcx); // Enumerable.
__ Push(rbx); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kForInFilter, 2);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ Cmp(rax, Smi::FromInt(0));
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, loop_statement.continue_label());
__ movp(rbx, rax);
@@ -1224,7 +1257,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ movp(result_register(), rbx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each());
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
@@ -1284,40 +1317,16 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
-void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
- Comment cnmt(masm_, "[ SuperReference ");
-
- __ movp(LoadDescriptor::ReceiverRegister(),
- Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-
- Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
- __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
-
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->HomeObjectFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
- }
-
-
- __ Cmp(rax, isolate()->factory()->undefined_value());
- Label done;
- __ j(not_equal, &done);
- __ CallRuntime(Runtime::kThrowNonMethodError, 0);
- __ bind(&done);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset) {
+ int offset,
+ FeedbackVectorICSlot slot) {
if (NeedsHomeObject(initializer)) {
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
__ Move(StoreDescriptor::NameRegister(),
isolate()->factory()->home_object_symbol());
__ movp(StoreDescriptor::ValueRegister(),
Operand(rsp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
}
}
@@ -1372,19 +1381,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ Move(LoadDescriptor::NameRegister(), proxy->var()->name());
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- }
-
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
- ? NOT_CONTEXTUAL
- : CONTEXTUAL;
- CallLoadIC(mode);
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_state);
}
@@ -1450,30 +1449,43 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ Move(LoadDescriptor::NameRegister(), var->name());
+ __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
+ // Inside typeof use a regular load, not a contextual load, to avoid
+ // a reference error.
+ CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
// Record position before possible IC call.
- SetSourcePosition(proxy->position());
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ Move(LoadDescriptor::NameRegister(), var->name());
- __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- }
- CallGlobalLoadIC(var->name());
+ EmitGlobalVariableLoad(proxy, typeof_state);
context()->Plug(rax);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
: "[ Stack slot");
if (var->binding_needs_init()) {
@@ -1541,16 +1553,20 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
__ bind(&slow);
__ Push(rsi); // Context.
__ Push(var->name());
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Runtime::FunctionId function_id =
+ typeof_state == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ __ CallRuntime(function_id, 2);
__ bind(&done);
context()->Plug(rax);
break;
@@ -1625,7 +1641,6 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->ComputeFlags();
if (MustCreateObjectLiteralWithRuntime(expr)) {
@@ -1650,13 +1665,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// result_saved is false the result is in rax.
bool result_saved = false;
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
AccessorTable accessor_table(zone());
int property_index = 0;
+ // store_slot_index points to the vector IC slot for the next store IC used.
+ // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
+ // and must be updated if the number of store ICs emitted here changes.
+ int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1683,7 +1697,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(rax));
__ Move(StoreDescriptor::NameRegister(), key->value());
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
- CallStoreIC(key->LiteralFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ CallStoreIC();
+ } else {
+ CallStoreIC(key->LiteralFeedbackId());
+ }
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1691,6 +1710,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Move(StoreDescriptor::NameRegister(),
isolate()->factory()->home_object_symbol());
__ movp(StoreDescriptor::ValueRegister(), Operand(rsp, 0));
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ }
CallStoreIC();
}
} else {
@@ -1702,7 +1724,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
__ Push(Smi::FromInt(SLOPPY)); // Language mode
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
@@ -1736,9 +1759,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(it->second->getter, 2);
+ EmitSetHomeObjectIfNeeded(
+ it->second->getter, 2,
+ expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(it->second->setter, 3);
+ EmitSetHomeObjectIfNeeded(
+ it->second->setter, 3,
+ expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ Push(Smi::FromInt(NONE));
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
@@ -1771,7 +1798,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1813,6 +1841,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(rax);
}
+
+ // Verify that compilation exactly consumed the number of store ic slots that
+ // the ObjectLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1854,8 +1886,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ if (subexpr->IsSpread()) break;
+
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1870,7 +1905,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (has_constant_fast_elements) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ movp(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
__ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
@@ -1882,16 +1917,41 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
INLINE_SMI_CHECK);
} else {
// Store the subexpression value in the array's elements.
- __ Move(rcx, Smi::FromInt(i));
+ __ Move(rcx, Smi::FromInt(array_index));
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ __ Drop(1); // literal index
+ __ Pop(rax);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ __ Push(rax);
+ if (subexpr->IsSpread()) {
+ VisitForStackValue(subexpr->AsSpread()->expression());
+ __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ } else {
+ VisitForStackValue(subexpr);
+ __ CallRuntime(Runtime::kAppendElement, 2);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ addp(rsp, Immediate(kPointerSize)); // literal index
+ __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -1903,9 +1963,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = GetAssignType(property);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1922,8 +1983,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case NAMED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
if (expr->is_compound()) {
__ Push(MemOperand(rsp, kPointerSize));
@@ -1931,9 +1994,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
__ Push(result_register());
if (expr->is_compound()) {
@@ -1988,7 +2052,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ Push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
@@ -2004,14 +2067,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
}
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
+ expr->op(), expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
break;
@@ -2035,6 +2097,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
@@ -2120,7 +2184,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ Pop(rax); // result
- EnterTryBlock(expr->index(), &l_catch);
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ Push(rax); // result
__ jmp(&l_suspend);
@@ -2130,7 +2195,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
const int generator_object_depth = kPointerSize + try_block_size;
__ movp(rax, Operand(rsp, generator_object_depth));
__ Push(rax); // g
- __ Push(Smi::FromInt(expr->index())); // handler-index
+ __ Push(Smi::FromInt(handler_index)); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(l_continuation.pos()));
@@ -2144,7 +2209,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Pop(rax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in rax
- ExitTryBlock(expr->index());
+ ExitTryBlock(handler_index);
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2157,11 +2222,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result = receiver[f](arg);
__ bind(&l_call);
__ movp(load_receiver, Operand(rsp, kPointerSize));
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
- }
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
CallIC(ic, TypeFeedbackId::None());
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
@@ -2176,10 +2239,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Move(load_receiver, rax);
__ Push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->DoneFeedbackSlot()));
- }
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(expr->DoneFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL); // rax=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2189,10 +2250,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ Pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->ValueFeedbackSlot()));
- }
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(expr->ValueFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
break;
@@ -2326,51 +2385,44 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!prop->IsSuperAccess());
__ Move(LoadDescriptor::NameRegister(), key->value());
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
- }
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
+ CallLoadIC(NOT_CONTEXTUAL, language_mode());
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ Push(key->value());
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallIC(ic);
- } else {
- CallIC(ic, prop->PropertyFeedbackId());
- }
+ SetExpressionPosition(prop);
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
+ CallIC(ic);
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
- SetSourcePosition(prop->position());
-
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ SetExpressionPosition(prop);
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
}
@@ -2390,8 +2442,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movp(rax, rcx);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2435,7 +2487,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
+ int* used_store_slots) {
// Constructor is in rax.
DCHECK(lit != NULL);
__ Push(rax);
@@ -2467,7 +2520,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(value, 2,
+ lit->SlotForHomeObject(value, used_store_slots));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -2503,8 +2557,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ Pop(rdx);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2512,17 +2566,18 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorICSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
Property* prop = expr->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
break;
}
case NAMED_PROPERTY: {
@@ -2532,13 +2587,15 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
__ Move(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ Push(rax);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
// stack: value, this; rax: home_object
Register scratch = rcx;
Register scratch2 = rdx;
@@ -2553,9 +2610,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
case KEYED_SUPER_PROPERTY: {
__ Push(rax);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
Register scratch = rcx;
Register scratch2 = rdx;
@@ -2578,6 +2635,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Move(StoreDescriptor::NameRegister(), rax);
__ Pop(StoreDescriptor::ReceiverRegister());
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2599,12 +2657,13 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorICSlot slot) {
+ if (var->IsUnallocatedOrGlobalSlot()) {
// Global var, const, or let.
__ Move(StoreDescriptor::NameRegister(), var->name());
__ movp(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
} else if (var->mode() == LET && op != Token::INIT_LET) {
@@ -2693,11 +2752,14 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ Move(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
__ Pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2736,15 +2798,17 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
-
__ Pop(StoreDescriptor::NameRegister()); // Key.
__ Pop(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(rax));
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2753,6 +2817,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
Expression* key = expr->key();
if (key->IsPropertyName()) {
@@ -2762,9 +2828,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ movp(LoadDescriptor::ReceiverRegister(), rax);
EmitNamedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
EmitNamedSuperPropertyLoad(expr);
}
} else {
@@ -2775,9 +2841,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
@@ -2830,19 +2896,19 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
+ SetExpressionPosition(prop);
- SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ Push(rax);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ Push(rax);
__ Push(rax);
__ Push(Operand(rsp, kPointerSize * 2));
__ Push(key->value());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2850,7 +2916,8 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2891,16 +2958,16 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
// Load the function from the receiver.
- SuperReference* super_ref = prop->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ Push(rax);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ Push(rax);
__ Push(rax);
__ Push(Operand(rsp, kPointerSize * 2));
VisitForStackValue(prop->key());
+ __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2908,7 +2975,8 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2924,14 +2992,11 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
}
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
@@ -2959,10 +3024,6 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the enclosing function.
__ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- // Push the receiver of the enclosing function and do runtime call.
- StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
- __ Push(args.GetReceiverOperand());
-
// Push the language mode.
__ Push(Smi::FromInt(language_mode()));
@@ -2970,18 +3031,12 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::EmitLoadSuperConstructor() {
- __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kGetPrototype, 1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperReference* super_ref) {
+ SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
Variable* this_var = super_ref->this_var()->var();
GetVar(rcx, this_var);
__ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
@@ -2991,7 +3046,48 @@ void FullCodeGenerator::EmitInitializeThisAfterSuper(
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
- EmitVariableAssignment(this_var, Token::INIT_CONST);
+ EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
+}
+
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in rax) and
+ // the object holding it (returned in rdx).
+ __ Push(context_register());
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ Push(rax); // Function.
+ __ Push(rdx); // Receiver.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the function
+ // and receiver and have the slow path jump around this code.
+ if (done.is_linked()) {
+ Label call;
+ __ jmp(&call, Label::kNear);
+ __ bind(&done);
+ // Push function.
+ __ Push(rax);
+ // Pass undefined as the receiver, which is the WithBaseObject of a
+ // non-object environment record. If the callee is sloppy, it will patch
+ // it up to be the global receiver.
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
}
@@ -3008,13 +3104,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ PushRoot(Heap::kUndefinedValueRootIndex); // Reserved receiver slot.
+ PushCalleeAndWithBaseObject(expr);
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
@@ -3026,15 +3120,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
EmitResolvePossiblyDirectEval(arg_count);
- // The runtime call returns a pair of values in rax (function) and
- // rdx (receiver). Touch up the stack with the right values.
- __ movp(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
+ // Touch up the callee.
__ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3047,40 +3138,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
- VariableProxy* proxy = callee->AsVariableProxy();
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
- }
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in rax) and
- // the object holding it (returned in rdx).
- __ Push(context_register());
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ Push(rax); // Function.
- __ Push(rdx); // Receiver.
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ Push(rax);
- // The receiver is implicitly the global receiver. Indicate this by
- // passing the hole to the call function stub.
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found by
- // LoadContextSlot.
+ PushCalleeAndWithBaseObject(expr);
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
@@ -3092,10 +3150,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
- {
- PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
- }
if (is_named_call) {
EmitCallWithLoadIC(expr);
} else {
@@ -3107,9 +3162,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
- }
__ PushRoot(Heap::kUndefinedValueRootIndex);
// Emit function call.
EmitCall(expr);
@@ -3131,7 +3184,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- DCHECK(!expr->expression()->IsSuperReference());
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
@@ -3143,7 +3196,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
@@ -3167,11 +3220,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
+
+ VariableProxy* new_target_proxy = super_call_ref->new_target_var();
+ VisitForStackValue(new_target_proxy);
- EmitLoadSuperConstructor();
+ EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
@@ -3183,7 +3239,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into edi and eax.
__ Set(rax, arg_count);
@@ -3209,7 +3265,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
+ EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(rax);
}
@@ -3490,6 +3546,28 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, JS_TYPED_ARRAY_TYPE, rbx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3738,6 +3816,28 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = nullptr;
+ Label* if_false = nullptr;
+ Label* fall_through = nullptr;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, JS_DATE_TYPE, rbx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3746,19 +3846,20 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done, not_date_object;
Register object = rax;
Register result = rax;
Register scratch = rcx;
- __ JumpIfSmi(object, &not_date_object);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ j(not_equal, &not_date_object);
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(object);
+ __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+ __ Check(equal, kOperandIsNotADate);
+ }
if (index->value() == 0) {
__ movp(result, FieldOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
@@ -3767,7 +3868,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
@@ -3775,12 +3876,9 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ Move(arg_reg_2, index, Assembler::RelocInfoNone());
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
+ __ bind(&done);
}
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
context()->Plug(rax);
}
@@ -4076,11 +4174,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(result_register(), new_target_var);
- __ Push(result_register());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+
+ // new.target
+ VisitForStackValue(args->at(0));
- EmitLoadSuperConstructor();
+ // .this_function
+ VisitForStackValue(args->at(1));
+ __ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
// Check if the calling frame is an arguments adaptor frame.
@@ -4099,8 +4201,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiToInteger64(rcx, rcx);
- // Subtract 1 from arguments count, for new.target.
- __ subp(rcx, Immediate(1));
__ movp(rax, rcx);
__ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
@@ -4532,11 +4632,14 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr === CallRuntime("ReflectConstruct")
+ DCHECK_EQ(1, expr->arguments()->length());
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
+
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
- SuperReference* super_reference = args->at(0)->AsSuperReference();
+ SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
@@ -4545,8 +4648,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
- // Push super
- EmitLoadSuperConstructor();
+ // Push super constructor
+ EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
// Push arguments array
@@ -4562,7 +4665,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, rax);
- EmitInitializeThisAfterSuper(super_reference);
+ // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
+ EmitInitializeThisAfterSuper(super_call_ref);
}
@@ -4574,13 +4678,9 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Load the function from the receiver.
__ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
__ Move(LoadDescriptor::NameRegister(), expr->name());
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- }
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
+ CallLoadIC(NOT_CONTEXTUAL);
}
@@ -4588,8 +4688,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4614,6 +4713,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
@@ -4638,6 +4738,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime.
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(function, arg_count);
context()->Plug(rax);
}
@@ -4661,10 +4762,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- DCHECK(is_sloppy(language_mode()) || var->is_this());
- if (var->IsUnallocated()) {
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
__ Push(GlobalObjectOperand());
__ Push(var->name());
__ Push(Smi::FromInt(SLOPPY));
@@ -4674,7 +4776,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Result of deleting non-global variables is false. 'this' is
// not really a variable, though we implement it as one. The
// subexpression does not have side effects.
- context()->Plug(var->is_this());
+ context()->Plug(is_this);
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
@@ -4767,10 +4869,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4791,8 +4892,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
__ Push(result_register());
__ Push(MemOperand(rsp, kPointerSize));
__ Push(result_register());
@@ -4801,9 +4903,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
__ Push(result_register());
__ Push(MemOperand(rsp, 2 * kPointerSize));
@@ -4881,10 +4983,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call, Label::kNear);
__ bind(&slow);
}
-
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ if (!is_strong(language_mode())) {
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4912,19 +5015,21 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // Record position before stub call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Call stub for +1/-1.
__ bind(&stub_call);
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), expr->binary_op(), language_mode()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
+ strength(language_mode())).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
+ if (is_strong(language_mode())) {
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Store the value returned in rax.
switch (assign_type) {
case VARIABLE:
@@ -4932,7 +5037,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(rax);
}
@@ -4944,7 +5049,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
}
@@ -4953,7 +5058,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Move(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
__ Pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4991,7 +5101,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -5006,47 +5121,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- DCHECK(!context()->IsEffect());
- DCHECK(!context()->IsTest());
-
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
- __ Move(LoadDescriptor::NameRegister(), proxy->name());
- __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- }
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallLoadIC(NOT_CONTEXTUAL);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(rax);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ Push(rsi);
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(rax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Expression* sub_expr,
Handle<String> check) {
@@ -5122,7 +5196,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -5176,9 +5250,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic = CodeFactory::CompareIC(
+ isolate(), op, strength(language_mode())).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -5292,6 +5365,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_pending_message_obj(isolate());
__ Load(rdx, pending_message_obj);
__ Push(rdx);
+
+ ClearPendingMessage();
}
@@ -5316,6 +5391,21 @@ void FullCodeGenerator::ExitFinallyBlock() {
}
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(rdx));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ Store(pending_message_obj, rdx);
+}
+
+
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+ DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ __ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
+}
+
+
#undef __
@@ -5396,6 +5486,7 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index bb31cbd406..7602403508 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return rsi; }
const Register LoadDescriptor::ReceiverRegister() { return rdx; }
const Register LoadDescriptor::NameRegister() { return rcx; }
+const Register LoadDescriptor::SlotRegister() { return rax; }
-const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return rax; }
-
-
-const Register VectorLoadICDescriptor::VectorRegister() { return rbx; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
const Register StoreDescriptor::ReceiverRegister() { return rdx; }
@@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return rcx; }
const Register StoreDescriptor::ValueRegister() { return rax; }
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return rdi; }
+
+
+const Register VectorStoreICDescriptor::VectorRegister() { return rbx; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return rbx; }
@@ -58,110 +62,102 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
-const Register GrowArrayElementsDescriptor::CapacityRegister() { return rcx; }
-void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rbx};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rdi};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewContextDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdi};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rbx};
- data->Initialize(arraysize(registers), registers, NULL);
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// ToNumberStub invokes a function, and therefore needs a context.
- Register registers[] = {rsi, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+void NumberToStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowArrayDescriptor::Initialize(
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rax, rbx, rcx};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {rax, rbx, rcx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowObjectDescriptor::Initialize(
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rax, rbx, rcx, rdx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {rax, rbx, rcx, rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateAllocationSiteDescriptor::Initialize(
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rbx, rdx};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {rbx, rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rbx, rdx, rdi};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rbx, rdx, rdi};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreArrayLiteralElementDescriptor::Initialize(
+void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rcx, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {rcx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rdi};
- data->Initialize(arraysize(registers), registers, NULL);
+void CallFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdi};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackDescriptor::Initialize(
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rdi, rdx};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {rdi, rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rdi, rdx, rbx};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {rdi, rdx, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// rax : number of arguments
// rbx : feedback vector
// rdx : (only if rbx is not the megamorphic symbol) slot in feedback
@@ -169,209 +165,182 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// rdi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {rsi, rax, rdi, rbx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {rax, rdi, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void RegExpConstructResultDescriptor::Initialize(
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rcx, rbx, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {rcx, rbx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void TransitionElementsKindDescriptor::Initialize(
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rax, rbx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::Initialize(
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // register state
- // rsi -- context
- Register registers[] = {rsi};
- data->Initialize(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-void ArrayConstructorConstantArgCountDescriptor::Initialize(
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// rax -- number of arguments
// rdi -- function
// rbx -- allocation site with elements kind
- Register registers[] = {rsi, rdi, rbx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {rdi, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {rsi, rdi, rbx, rax};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(),
- Representation::Tagged(), Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {rdi, rbx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
- CallInterfaceDescriptorData* data) {
+void InternalArrayConstructorConstantArgCountDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
- // rsi -- context
// rax -- number of arguments
// rdi -- constructor function
- Register registers[] = {rsi, rdi};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {rdi};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorDescriptor::Initialize(
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {rsi, rdi, rax};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {rdi, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rdx, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareNilDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToBooleanDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rdx, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOpWithAllocationSiteDescriptor::Initialize(
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rcx, rdx, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {rcx, rdx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {rsi, rdx, rax};
- data->Initialize(arraysize(registers), registers, NULL);
+void StringAddDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void KeyedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- rsi, // context
rcx, // key
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void NamedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- rsi, // context
rcx, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallHandlerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- rsi, // context
rdx, // receiver
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- rsi, // context
rdi, // JSFunction
rax, // actual number of arguments
rbx, // expected number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- rsi, // context
rdi, // callee
rbx, // call_data
rcx, // holder
rdx, // api_function_address
rax, // actual number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Integer32(), // actual number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiAccessorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- rsi, // context
rdi, // callee
rbx, // call_data
rcx, // holder
rdx, // api_function_address
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void MathRoundVariantDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rdi, // math rounding function
+ rdx, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index eb8274bfd8..c799da2b66 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -129,8 +129,8 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions need to replace the receiver with the global proxy
// when called as functions (without an explicit receiver object).
- if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info_->is_native()) {
+ if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
+ !info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok;
StackArgumentsAccessor args(rsp, scope()->num_parameters());
__ movp(rcx, args.GetReceiverOperand());
@@ -213,8 +213,9 @@ bool LCodeGen::GeneratePrologue() {
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -536,52 +537,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- bool has_closure_id = !info()->closure().is_null() &&
- !info()->closure().is_identical_to(environment->closure());
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- }
+ WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
+ AddToTranslation(
+ environment, translation, value, environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@@ -888,28 +854,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (auto function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
}
-
inlined_function_count_ = deoptimization_literals_.length();
}
@@ -1782,18 +1731,19 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
Smi* index = instr->index();
- Label runtime, done, not_date_object;
DCHECK(object.is(result));
DCHECK(object.is(rax));
- Condition cc = masm()->CheckSmi(object);
- DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
- __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(object);
+ __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
+ __ Check(equal, kOperandIsNotADate);
+ }
if (index->value() == 0) {
__ movp(result, FieldOperand(object, JSDate::kValueOffset));
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
@@ -2104,8 +2054,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(rax));
DCHECK(ToRegister(instr->result()).is(rax));
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), instr->op(), instr->language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2548,7 +2498,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
@@ -2832,7 +2783,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
@@ -2888,10 +2840,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = VectorLoadICDescriptor::SlotRegister();
- DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(rax));
AllowDeferredHandleDereference vector_structure_check;
@@ -2904,6 +2855,20 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ Move(slot_register, Smi::FromInt(index));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->global_object())
@@ -2911,11 +2876,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
__ Move(LoadDescriptor::NameRegister(), instr->name());
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3030,12 +2993,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
__ Move(LoadDescriptor::NameRegister(), instr->name());
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- }
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL,
- instr->hydrogen()->initialization_state()).code();
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3176,7 +3138,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3331,9 +3294,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3562,24 +3525,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
-void LCodeGen::DoTailCallThroughMegamorphicCache(
- LTailCallThroughMegamorphicCache* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register name = ToRegister(instr->name());
- DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(name.is(LoadDescriptor::NameRegister()));
- Register scratch = rdi;
- DCHECK(!scratch.is(receiver) && !scratch.is(name));
-
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), Code::LOAD_IC,
- instr->hydrogen()->flags(), false,
- receiver, name, scratch, no_reg);
-
- LoadIC::GenerateMiss(masm());
-}
-
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
@@ -4279,10 +4224,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
__ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
- Handle<Code> ic =
- StoreIC::initialize_stub(isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state());
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4412,7 +4361,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4541,6 +4491,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@@ -4548,6 +4502,109 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = rax;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ jmp(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ cmpl(ToRegister(current_capacity), Immediate(constant_key));
+ __ j(less_equal, deferred->entry());
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ cmpl(ToRegister(key), Immediate(constant_capacity));
+ __ j(greater_equal, deferred->entry());
+ } else {
+ __ cmpl(ToRegister(key), ToRegister(current_capacity));
+ __ j(greater_equal, deferred->entry());
+ }
+
+ if (instr->elements()->IsRegister()) {
+ __ movp(result, ToRegister(instr->elements()));
+ } else {
+ __ movp(result, ToOperand(instr->elements()));
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = rax;
+ __ Move(result, Smi::FromInt(0));
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ if (instr->object()->IsConstantOperand()) {
+ LConstantOperand* constant_object =
+ LConstantOperand::cast(instr->object());
+ if (IsSmiConstant(constant_object)) {
+ Smi* immediate = ToSmi(constant_object);
+ __ Move(result, immediate);
+ } else {
+ Handle<Object> handle_value = ToHandle(constant_object);
+ __ Move(result, handle_value);
+ }
+ } else if (instr->object()->IsRegister()) {
+ __ Move(result, ToRegister(instr->object()));
+ } else {
+ __ movp(result, ToOperand(instr->object()));
+ }
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ Move(rbx, ToSmi(LConstantOperand::cast(key)));
+ } else {
+ __ Move(rbx, ToRegister(key));
+ __ Integer32ToSmi(rbx, rbx);
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ Condition is_smi = __ CheckSmi(result);
+ DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
+}
+
+
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
@@ -5809,8 +5866,6 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DeoptimizeIf(equal, instr, Deoptimizer::kNull);
-
Condition cc = masm()->CheckSmi(rax);
DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
@@ -5956,6 +6011,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 5fb3173b06..8fc7cdce21 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -28,7 +28,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@@ -95,6 +94,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -217,7 +217,6 @@ class LCodeGen: public LCodeGenBase {
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -306,6 +305,8 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
@@ -317,7 +318,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index d10e1a1e9e..79e7020816 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -314,6 +314,7 @@ void LGapResolver::EmitSwap(int index) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index b8ac5592de..3c150e21c9 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -1113,10 +1113,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
ops.Add(target, zone());
- for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op =
- UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), rsi);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@@ -1126,20 +1134,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
-LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
- HTailCallThroughMegamorphicCache* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* receiver_register =
- UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
- LOperand* name_register =
- UseFixed(instr->name(), LoadDescriptor::NameRegister());
-
- // Not marked as call. It can't deoptimize, and it never returns.
- return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
-}
-
-
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
@@ -1819,7 +1813,7 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), rax);
LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
+ return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -2103,7 +2097,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -2166,7 +2160,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
context, object, vector);
@@ -2266,7 +2260,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadKeyedGeneric* result =
@@ -2352,8 +2346,15 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LStoreKeyedGeneric* result =
- new(zone()) LStoreKeyedGeneric(context, object, key, value);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result = new (zone())
+ LStoreKeyedGeneric(context, object, key, value, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2387,6 +2388,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LOperand* object = Use(instr->object());
+ LOperand* elements = Use(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, rax);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool is_external_location = instr->access().IsExternalMemory() &&
@@ -2442,9 +2458,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
LStoreNamedGeneric* result =
- new(zone()) LStoreNamedGeneric(context, object, value);
+ new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2521,7 +2543,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@@ -2647,7 +2669,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
+ chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@@ -2718,6 +2740,7 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index b7f2ab9b01..50e0595025 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -117,6 +117,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@@ -150,7 +151,6 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
- V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -478,27 +478,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
};
-class LTailCallThroughMegamorphicCache final
- : public LTemplateInstruction<0, 3, 0> {
- public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
- inputs_[0] = context;
- inputs_[1] = receiver;
- inputs_[2] = name;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* name() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
- "tail-call-through-megamorphic-cache")
- DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -1173,6 +1152,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ Strength strength() { return hydrogen()->strength(); }
+
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1538,7 +1519,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
+ Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@@ -1857,8 +1838,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
- : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ : inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
inputs_.AddAll(operands, zone);
}
@@ -1866,6 +1851,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@@ -2169,17 +2158,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2220,22 +2214,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* object,
- LOperand* key,
- LOperand* value) {
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2294,6 +2290,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
};
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 0e70826df2..092b5bc83d 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -925,6 +925,8 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
+ // Special-casing 0 here to use xorl seems to make things slower, so we don't
+ // do it.
Move(dst, source, Assembler::RelocInfoNone());
}
@@ -4008,6 +4010,7 @@ void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
movl(scratch, r0);
shrl(scratch, Immediate(16));
xorl(r0, scratch);
+ andl(r0, Immediate(0x3fffffff));
}
@@ -4567,7 +4570,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
addp(start_offset, Immediate(kPointerSize));
bind(&entry);
cmpp(start_offset, end_offset);
- j(less, &loop);
+ j(below, &loop);
}
@@ -5094,6 +5097,7 @@ void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index e953182090..f1eb072075 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1356,6 +1356,7 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index 4f5768e684..62beab8ed3 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -412,6 +412,12 @@ void Assembler::emit(uint32_t x) {
}
+void Assembler::emit_q(uint64_t x) {
+ *reinterpret_cast<uint64_t*>(pc_) = x;
+ pc_ += sizeof(uint64_t);
+}
+
+
void Assembler::emit(Handle<Object> handle) {
AllowDeferredHandleDereference heap_object_check;
// Verify all Objects referred by code are NOT in new space.
@@ -476,14 +482,12 @@ void Assembler::emit_w(const Immediate& x) {
}
-Address Assembler::target_address_at(Address pc,
- ConstantPoolArray* constant_pool) {
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
-void Assembler::set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index 43694ded6b..b7ba0cdf9b 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -388,6 +388,14 @@ void Assembler::mov_b(Register dst, const Operand& src) {
}
+void Assembler::mov_b(const Operand& dst, const Immediate& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC6);
+ emit_operand(eax, dst);
+ EMIT(static_cast<int8_t>(src.x_));
+}
+
+
void Assembler::mov_b(const Operand& dst, int8_t imm8) {
EnsureSpace ensure_space(this);
EMIT(0xC6);
@@ -430,6 +438,16 @@ void Assembler::mov_w(const Operand& dst, int16_t imm16) {
}
+void Assembler::mov_w(const Operand& dst, const Immediate& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ EMIT(static_cast<int8_t>(src.x_ & 0xff));
+ EMIT(static_cast<int8_t>(src.x_ >> 8));
+}
+
+
void Assembler::mov(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
@@ -1698,6 +1716,20 @@ void Assembler::fsub_i(int i) {
}
+void Assembler::fsubr_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDC);
+ emit_operand(ebp, adr);
+}
+
+
+void Assembler::fsub_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDC);
+ emit_operand(esp, adr);
+}
+
+
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDA);
@@ -1717,12 +1749,33 @@ void Assembler::fmul(int i) {
}
+void Assembler::fmul_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDC);
+ emit_operand(ecx, adr);
+}
+
+
void Assembler::fdiv(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xF8, i);
}
+void Assembler::fdiv_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDC);
+ emit_operand(esi, adr);
+}
+
+
+void Assembler::fdivr_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDC);
+ emit_operand(edi, adr);
+}
+
+
void Assembler::fdiv_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xF0, i);
@@ -2028,6 +2081,12 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dq(uint64_t data) {
+ EnsureSpace ensure_space(this);
+ emit_q(data);
+}
+
+
void Assembler::dd(Label* label) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
@@ -2047,20 +2106,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return isolate->factory()->empty_constant_pool_array();
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return;
-}
-
-
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
@@ -2086,6 +2131,7 @@ void LogGeneratedCodeCoverage(const char* file_line) {
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 67af72e0c2..d8c86abf7a 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -273,6 +273,14 @@ inline Condition CommuteCondition(Condition cc) {
}
+enum RoundingMode {
+ kRoundToNearest = 0x0,
+ kRoundDown = 0x1,
+ kRoundUp = 0x2,
+ kRoundToZero = 0x3
+};
+
+
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -495,15 +503,12 @@ class Assembler : public AssemblerBase {
void GetCode(CodeDesc* desc);
// Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc,
- ConstantPoolArray* constant_pool);
- inline static void set_target_address_at(Address pc,
- ConstantPoolArray* constant_pool,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
+ inline static Address target_address_at(Address pc, Address constant_pool);
+ inline static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
static inline void set_target_address_at(Address pc,
@@ -511,7 +516,7 @@ class Assembler : public AssemblerBase {
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target);
}
@@ -593,6 +598,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -620,11 +628,14 @@ class Assembler : public AssemblerBase {
void mov_b(Register dst, const Operand& src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8);
+ void mov_b(const Operand& dst, const Immediate& src);
void mov_b(const Operand& dst, Register src);
void mov_w(Register dst, const Operand& src);
void mov_w(const Operand& dst, Register src);
void mov_w(const Operand& dst, int16_t imm16);
+ void mov_w(const Operand& dst, const Immediate& src);
+
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
@@ -886,15 +897,21 @@ class Assembler : public AssemblerBase {
void fadd_d(const Operand& adr);
void fsub(int i);
void fsub_i(int i);
+ void fsub_d(const Operand& adr);
+ void fsubr_d(const Operand& adr);
void fmul(int i);
+ void fmul_d(const Operand& adr);
void fmul_i(int i);
void fdiv(int i);
+ void fdiv_d(const Operand& adr);
+ void fdivr_d(const Operand& adr);
void fdiv_i(int i);
void fisub_s(const Operand& adr);
void faddp(int i = 1);
void fsubp(int i = 1);
+ void fsubr(int i = 1);
void fsubrp(int i = 1);
void fmulp(int i = 1);
void fdivp(int i = 1);
@@ -952,6 +969,8 @@ class Assembler : public AssemblerBase {
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dq(uint64_t data);
+ void dp(uintptr_t data) { dd(data); }
void dd(Label* label);
// Check if there is less than kGap bytes available in the buffer.
@@ -978,11 +997,12 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
- // Allocate a constant pool of the correct size for the generated code.
- Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
-
- // Generate the constant pool for the generated code.
- void PopulateConstantPool(ConstantPoolArray* constant_pool);
+ void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+ ConstantPoolEntry::Access access,
+ ConstantPoolEntry::Type type) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+ }
protected:
byte* addr_at(int pos) { return buffer_ + pos; }
@@ -1008,6 +1028,7 @@ class Assembler : public AssemblerBase {
TypeFeedbackId id = TypeFeedbackId::None());
inline void emit(const Immediate& x);
inline void emit_w(const Immediate& x);
+ inline void emit_q(uint64_t x);
// Emit the code-object-relative offset of the label's position
inline void emit_code_relative_offset(Label* label);
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index 28ead0c654..55e648cab1 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -138,6 +138,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
+ bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- eax: number of arguments
@@ -158,12 +159,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(ebx);
}
- // Store a smi-tagged arguments count on the stack.
+ // Preserve the incoming parameters on the stack.
__ SmiTag(eax);
__ push(eax);
-
- // Push the function to invoke on the stack.
__ push(edi);
+ if (use_new_target) {
+ __ push(edx);
+ }
__ cmp(edx, edi);
Label normal_new;
@@ -358,17 +360,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// ebx: JSObject
// edi: FixedArray
// ecx: start of next object
- { Label loop, entry;
- __ mov(edx, factory->undefined_value());
- __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(eax, 0), edx);
- __ add(eax, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(eax, ecx);
- __ j(below, &loop);
- }
+ __ mov(edx, factory->undefined_value());
+ __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+ __ InitializeFieldsWithFiller(eax, ecx, edx);
// Store the initialized FixedArray into the properties field of
// the JSObject
@@ -399,7 +393,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated);
if (create_memento) {
- __ mov(ecx, Operand(esp, kPointerSize * 2));
+ int offset = (use_new_target ? 3 : 2) * kPointerSize;
+ __ mov(ecx, Operand(esp, offset));
__ cmp(ecx, masm->isolate()->factory()->undefined_value());
__ j(equal, &count_incremented);
// ecx is an AllocationSite. We are creating a memento from it, so we
@@ -409,13 +404,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
- // Retrieve the function from the stack.
- __ pop(edi);
+ // Restore the parameters.
+ if (use_new_target) {
+ __ pop(edx); // new.target
+ }
+ __ pop(edi); // Constructor function.
// Retrieve smi-tagged arguments count from the stack.
__ mov(eax, Operand(esp, 0));
__ SmiUntag(eax);
+ // Push new.target onto the construct frame. This is stored just below the
+ // receiver on the stack.
+ if (use_new_target) {
+ __ push(edx);
+ }
+
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
@@ -448,7 +452,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ // TODO(arv): Remove the "!use_new_target" before supporting optimization
+ // of functions that reference new.target
+ if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -473,9 +479,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&use_receiver);
__ mov(eax, Operand(esp, 0));
- // Restore the arguments count and leave the construct frame.
+ // Restore the arguments count and leave the construct frame. The arguments
+ // count is stored below the reciever and the new.target.
__ bind(&exit);
- __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count.
+ int offset = (use_new_target ? 2 : 1) * kPointerSize;
+ __ mov(ebx, Operand(esp, offset));
// Leave construct frame.
}
@@ -491,12 +499,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@@ -538,9 +551,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ dec(ecx);
__ j(greater_equal, &loop);
- __ inc(eax); // Pushed new.target.
-
-
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@@ -564,7 +574,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(ebx, Operand(esp, 0));
+ // Get arguments count, skipping over new.target.
+ __ mov(ebx, Operand(esp, kPointerSize));
}
__ pop(ecx); // Return address.
@@ -1052,13 +1063,22 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
__ mov(key, Operand(ebp, indexOffset));
__ jmp(&entry);
__ bind(&loop);
__ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
+ FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
+ Handle<TypeFeedbackVector> feedback_vector =
+ masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
+ int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ mov(slot, Immediate(Smi::FromInt(index)));
+ __ mov(vector, Immediate(feedback_vector));
+ Handle<Code> ic =
+ KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
@@ -1577,6 +1597,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
+
+ // If the function is strong we need to throw an error.
+ Label no_strong_error;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrongModeByteOffset),
+ 1 << SharedFunctionInfo::kStrongModeBitWithinByte);
+ __ j(equal, &no_strong_error, Label::kNear);
+
+ // What we really care about is the required number of arguments.
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kLengthOffset));
+ __ SmiUntag(ecx);
+ __ cmp(eax, ecx);
+ __ j(greater_equal, &no_strong_error, Label::kNear);
+
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ }
+
+ __ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
// Copy receiver and all actual arguments.
@@ -1690,7 +1731,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
}
#undef __
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index f3d31cec20..875b798bda 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -106,15 +106,15 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetEnvironmentParameterCount();
+ int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
- eax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ eax.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor.GetEnvironmentParameterRegister(i));
+ __ push(descriptor.GetRegisterParameter(i));
}
__ CallExternalReference(miss, param_count);
}
@@ -333,20 +333,15 @@ void MathPowStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
+ // With careful management, we won't have to save slot and vector on
+ // the stack. Simply handle the possibly missing case first.
+ // TODO(mvstanton): this code can be more efficient.
+ __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
+ Immediate(isolate()->factory()->the_hole_value()));
+ __ j(equal, &miss);
+ __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
+ __ ret(0);
- if (FLAG_vector_ics) {
- // With careful management, we won't have to save slot and vector on
- // the stack. Simply handle the possibly missing case first.
- // TODO(mvstanton): this code can be more efficient.
- __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(isolate()->factory()->the_hole_value()));
- __ j(equal, &miss);
- __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
- __ ret(0);
- } else {
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
- ebx, &miss);
- }
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
@@ -393,9 +388,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
DCHECK(!scratch.is(receiver) && !scratch.is(index));
Register result = eax;
DCHECK(!result.is(scratch));
- DCHECK(!FLAG_vector_ics ||
- (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
- result.is(VectorLoadICDescriptor::SlotRegister())));
+ DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+ result.is(LoadDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@@ -420,7 +414,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- CHECK(!has_new_target());
// The key is in edx and the parameter count is in eax.
DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
@@ -487,8 +480,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// esp[8] : receiver displacement
// esp[12] : function
- CHECK(!has_new_target());
-
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -517,8 +508,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// ebx = parameter count (tagged)
__ mov(ebx, Operand(esp, 1 * kPointerSize));
- CHECK(!has_new_target());
-
// Check if the calling frame is an arguments adaptor frame.
// TODO(rossberg): Factor out some of the bits that are shared with the other
// Generate* functions.
@@ -591,9 +580,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ jmp(&instantiate, Label::kNear);
__ bind(&has_mapped_parameters);
- __ mov(
- edi,
- Operand(edi, Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX)));
+ __ mov(edi, Operand(edi, Context::SlotOffset(
+ Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
__ bind(&instantiate);
// eax = address of new object (tagged)
@@ -759,18 +747,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- if (has_new_target()) {
- // If the constructor was [[Call]]ed, the call will not push a new.target
- // onto the stack. In that case the arguments array we construct is bogus,
- // bu we do not care as the constructor throws immediately.
- __ cmp(ecx, Immediate(Smi::FromInt(0)));
- Label skip_decrement;
- __ j(equal, &skip_decrement);
- // Subtract 1 from smi-tagged arguments count.
- __ sub(ecx, Immediate(2));
- __ bind(&skip_decrement);
- }
-
__ lea(edx, Operand(edx, ecx, times_2,
StandardFrameConstants::kCallerSPOffset));
__ mov(Operand(esp, 1 * kPointerSize), ecx);
@@ -850,9 +826,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// esp[0] : return address
- // esp[4] : index of rest parameter
- // esp[8] : number of parameters
- // esp[12] : receiver displacement
+ // esp[4] : language mode
+ // esp[8] : index of rest parameter
+ // esp[12] : number of parameters
+ // esp[16] : receiver displacement
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -863,13 +840,13 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 2 * kPointerSize), ecx);
+ __ mov(Operand(esp, 3 * kPointerSize), ecx);
__ lea(edx, Operand(edx, ecx, times_2,
StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 3 * kPointerSize), edx);
+ __ mov(Operand(esp, 4 * kPointerSize), edx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+ __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@@ -1341,7 +1318,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
- Label check_unequal_objects;
+ Label runtime_call, check_unequal_objects;
Condition cc = GetCondition();
Label miss;
@@ -1375,26 +1352,39 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
- Label check_for_nan;
__ cmp(edx, isolate()->factory()->undefined_value());
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
- __ bind(&check_for_nan);
+ if (is_strong(strength())) {
+ // In strong mode, this comparison must throw, so call the runtime.
+ __ j(equal, &runtime_call, Label::kFar);
+ } else {
+ Label check_for_nan;
+ __ j(not_equal, &check_for_nan, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
}
// Test for NaN. Compare heap numbers in a general way,
- // to hanlde NaNs correctly.
+ // to handle NaNs correctly.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(isolate()->factory()->heap_number_map()));
__ j(equal, &generic_heap_number_comparison, Label::kNear);
if (cc != equal) {
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
+ __ cmpb(ecx, static_cast<uint8_t>(FIRST_SPEC_OBJECT_TYPE));
+ __ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
- __ CmpObjectType(eax, SYMBOL_TYPE, ecx);
- __ j(equal, &not_identical);
+ __ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
+ __ j(equal, &runtime_call, Label::kFar);
+ if (is_strong(strength())) {
+ // We have already tested for smis and heap numbers, so if both
+ // arguments are not strings we must proceed to the slow case.
+ __ test(ecx, Immediate(kIsNotStringMask));
+ __ j(not_zero, &runtime_call, Label::kFar);
+ }
}
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -1551,7 +1541,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label not_both_objects;
Label return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
@@ -1560,11 +1549,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagMask == 1);
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects, Label::kNear);
+ __ j(not_zero, &runtime_call, Label::kNear);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &not_both_objects, Label::kNear);
+ __ j(below, &runtime_call, Label::kNear);
__ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
- __ j(below, &not_both_objects, Label::kNear);
+ __ j(below, &runtime_call, Label::kNear);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
// The and of the undetectable flags is 1 if and only if they are equal.
@@ -1581,8 +1570,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
__ ret(0); // rax, rdx were pushed
- __ bind(&not_both_objects);
}
+ __ bind(&runtime_call);
// Push arguments below the return address.
__ pop(ecx);
@@ -1594,7 +1583,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == equal) {
builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
- builtin = Builtins::COMPARE;
+ builtin =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
__ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
}
@@ -1943,6 +1933,11 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
factory->allocation_site_map());
__ j(not_equal, &miss);
+ // Increment the call count for monomorphic function calls.
+ __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+
__ mov(ebx, ecx);
__ mov(edx, edi);
ArrayConstructorStub stub(masm->isolate(), arg_count());
@@ -2002,6 +1997,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(edi, &extra_checks_or_miss);
+ // Increment the call count for monomorphic function calls.
+ __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2073,6 +2073,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Update stats.
__ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+ // Initialize the call counter.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+
// Store the function. Use a stub since we need a frame for allocation.
// ebx - vector
// edx - slot
@@ -2652,9 +2657,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ push(VectorLoadICDescriptor::VectorRegister());
- __ push(VectorLoadICDescriptor::SlotRegister());
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ push(LoadWithVectorDescriptor::VectorRegister());
+ __ push(LoadDescriptor::SlotRegister());
}
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
@@ -2671,9 +2676,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ mov(index_, eax);
}
__ pop(object_);
- if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
- __ pop(VectorLoadICDescriptor::SlotRegister());
- __ pop(VectorLoadICDescriptor::VectorRegister());
+ if (embed_mode == PART_OF_IC_HANDLER) {
+ __ pop(LoadDescriptor::SlotRegister());
+ __ pop(LoadWithVectorDescriptor::VectorRegister());
}
// Reload the instance type.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -3300,7 +3305,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -4074,15 +4079,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawLoadStub stub(isolate(), state());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorRawKeyedLoadStub stub(isolate());
+ EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4199,21 +4204,19 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
-void VectorRawLoadStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // edx
- Register name = VectorLoadICDescriptor::NameRegister(); // ecx
- Register vector = VectorLoadICDescriptor::VectorRegister(); // ebx
- Register slot = VectorLoadICDescriptor::SlotRegister(); // eax
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
+ Register name = LoadWithVectorDescriptor::NameRegister(); // ecx
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
Register scratch = edi;
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -4248,21 +4251,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
-void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // edx
- Register key = VectorLoadICDescriptor::NameRegister(); // ecx
- Register vector = VectorLoadICDescriptor::VectorRegister(); // ebx
- Register slot = VectorLoadICDescriptor::SlotRegister(); // eax
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
+ Register key = LoadWithVectorDescriptor::NameRegister(); // ecx
+ Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
+ Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
Register feedback = edi;
__ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -4288,7 +4291,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &try_poly_name);
Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
@@ -4306,6 +4309,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ VectorKeyedStoreICStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ StoreIC::GenerateMiss(masm);
+}
+
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Label miss;
+
+ // TODO(mvstanton): Implement.
+ __ bind(&miss);
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, ebx);
CallICStub stub(isolate(), state());
@@ -5083,6 +5138,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index 1321461a14..f9f079f6f9 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -626,6 +626,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/cpu-x87.cc b/deps/v8/src/x87/cpu-x87.cc
index 03816dff6b..84e385dc47 100644
--- a/deps/v8/src/x87/cpu-x87.cc
+++ b/deps/v8/src/x87/cpu-x87.cc
@@ -39,6 +39,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#endif
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/debug-x87.cc b/deps/v8/src/x87/debug-x87.cc
index 0bbee14eaa..d0fcc82eaa 100644
--- a/deps/v8/src/x87/debug-x87.cc
+++ b/deps/v8/src/x87/debug-x87.cc
@@ -178,53 +178,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for IC load call (from ic-x87.cc).
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- RegList regs = receiver.bit() | name.bit();
- if (FLAG_vector_ics) {
- regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
- }
- Generate_DebugBreakCallHelper(masm, regs, 0, false);
-}
-
-
-void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for IC store call (from ic-x87.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-x87.cc).
- GenerateLoadICDebugBreak(masm);
-}
-
-
-void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC store call (from ic-x87.cc).
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Generate_DebugBreakCallHelper(
- masm, receiver.bit() | name.bit() | value.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
- // Register state for CompareNil IC
- // ----------- S t a t e -------------
- // -- eax : value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
-}
-
-
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// Register state just before return from JS function (from codegen-x87.cc).
// ----------- S t a t e -------------
@@ -285,8 +238,6 @@ void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
@@ -325,6 +276,7 @@ const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index edc08abe6f..533ce1abe6 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -462,7 +462,7 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- // No out-of-line constant pool support.
+ // No embedded constant pool support.
UNREACHABLE();
}
@@ -470,6 +470,7 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/disasm-x87.cc b/deps/v8/src/x87/disasm-x87.cc
index 7e0a07503f..009bebbc27 100644
--- a/deps/v8/src/x87/disasm-x87.cc
+++ b/deps/v8/src/x87/disasm-x87.cc
@@ -726,6 +726,21 @@ int DisassemblerX87::MemoryFPUInstruction(int escape_opcode,
case 0:
mnem = "fadd_d";
break;
+ case 1:
+ mnem = "fmul_d";
+ break;
+ case 4:
+ mnem = "fsub_d";
+ break;
+ case 5:
+ mnem = "fsubr_d";
+ break;
+ case 6:
+ mnem = "fdiv_d";
+ break;
+ case 7:
+ mnem = "fdivr_d";
+ break;
default:
UnimplementedInstruction();
}
@@ -1292,7 +1307,7 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*data == 0x16) {
data++;
int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
+ get_modrm(*data, &mod, &rm, &regop);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pextrd %s,%s,%d",
NameOfCPURegister(regop),
diff --git a/deps/v8/src/x87/frames-x87.cc b/deps/v8/src/x87/frames-x87.cc
index 6091b4599b..557794f3a2 100644
--- a/deps/v8/src/x87/frames-x87.cc
+++ b/deps/v8/src/x87/frames-x87.cc
@@ -31,12 +31,7 @@ Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
}
-Object*& ExitFrame::constant_pool_slot() const {
- UNREACHABLE();
- return Memory::Object_at(NULL);
-}
-
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/frames-x87.h b/deps/v8/src/x87/frames-x87.h
index e3876bc722..78209258d6 100644
--- a/deps/v8/src/x87/frames-x87.h
+++ b/deps/v8/src/x87/frames-x87.h
@@ -79,36 +79,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kImplicitReceiverOffset = -5 * kPointerSize;
- static const int kConstructorOffset = kMinInt;
- static const int kLengthOffset = -4 * kPointerSize;
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
diff --git a/deps/v8/src/x87/full-codegen-x87.cc b/deps/v8/src/x87/full-codegen-x87.cc
index f1abd5befe..7b1a35dcd7 100644
--- a/deps/v8/src/x87/full-codegen-x87.cc
+++ b/deps/v8/src/x87/full-codegen-x87.cc
@@ -93,10 +93,6 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-x87.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
- handler_table_ =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForRange(function()->handler_count()), TENURED));
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -184,17 +180,17 @@ void FullCodeGenerator::Generate() {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info->scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
// Argument to NewContext is the function, which is still in edi.
if (info->scope()->is_script_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -210,8 +206,9 @@ void FullCodeGenerator::Generate() {
// Copy parameters into context if necessary.
int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -234,10 +231,48 @@ void FullCodeGenerator::Generate() {
}
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep is marked as such.
+ }
+ SetVar(this_function_var, edi, ebx, edx);
+ }
+
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ Label non_adaptor_frame;
+ __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &non_adaptor_frame);
+ __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
+
+ __ bind(&non_adaptor_frame);
+ __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+
+ Label non_construct_frame, done;
+ __ j(not_equal, &non_construct_frame);
+
+ // Construct frame
+ __ mov(eax,
+ Operand(eax, ConstructFrameConstants::kOriginalConstructorOffset));
+ __ jmp(&done);
+
+ // Non-construct frame
+ __ bind(&non_construct_frame);
+ __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
+
+ __ bind(&done);
+ SetVar(new_target_var, eax, ebx, edx);
+ }
+
// Possibly allocate RestParameters
int rest_index;
@@ -247,16 +282,13 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
- --num_parameters;
- ++rest_index;
- }
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
__ push(Immediate(Smi::FromInt(num_parameters)));
__ push(Immediate(Smi::FromInt(rest_index)));
+ __ push(Immediate(Smi::FromInt(language_mode())));
RestParamAccessStub stub(isolate());
__ CallStub(&stub);
@@ -293,7 +325,7 @@ void FullCodeGenerator::Generate() {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type, has_new_target);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, eax, ebx, edx);
@@ -318,7 +350,7 @@ void FullCodeGenerator::Generate() {
VariableDeclaration* function = scope()->function();
DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
@@ -434,7 +466,7 @@ void FullCodeGenerator::EmitReturnSequence() {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
- SetSourcePosition(function()->end_position() - 1);
+ SetReturnPosition(function());
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
@@ -443,9 +475,6 @@ void FullCodeGenerator::EmitReturnSequence() {
__ pop(ebp);
int arg_count = info_->scope()->num_parameters() + 1;
- if (IsSubclassConstructor(info_->function()->kind())) {
- arg_count++;
- }
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
// Check that the size of the code used for returning is large enough
@@ -791,15 +820,16 @@ void FullCodeGenerator::VisitVariableDeclaration(
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(), zone());
break;
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(StackOperand(variable),
@@ -807,7 +837,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
@@ -818,7 +848,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
__ push(esi);
__ push(Immediate(variable->name()));
@@ -848,25 +878,26 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ mov(StackOperand(variable), result_register());
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
@@ -879,7 +910,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ push(esi);
__ push(Immediate(variable->name()));
@@ -896,20 +927,21 @@ void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
- case Variable::UNALLOCATED:
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
// TODO(rossberg)
break;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::LOOKUP:
UNREACHABLE();
}
}
@@ -986,10 +1018,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ bind(&slow_case);
}
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ SetExpressionPosition(clause);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
+ strength(language_mode())).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1034,9 +1065,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
- FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ SetStatementPosition(stmt, SKIP_BREAK);
- SetStatementPosition(stmt);
+ FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
Label loop, exit;
ForIn loop_statement(this, stmt);
@@ -1044,7 +1075,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
- SetExpressionPosition(stmt->enumerable());
+ SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, &exit);
@@ -1139,7 +1170,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- SetExpressionPosition(stmt->each());
+ SetExpressionAsStatementPosition(stmt->each());
__ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
__ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
@@ -1171,9 +1202,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kForInFilter, 2);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ test(eax, eax);
+ __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, loop_statement.continue_label());
__ mov(ebx, eax);
@@ -1183,7 +1214,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), ebx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each());
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
@@ -1243,39 +1274,16 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
-void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
- Comment cnmt(masm_, "[ SuperReference ");
-
- __ mov(LoadDescriptor::ReceiverRegister(),
- Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
- Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
- __ mov(LoadDescriptor::NameRegister(), home_object_symbol);
-
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
- }
-
- __ cmp(eax, isolate()->factory()->undefined_value());
- Label done;
- __ j(not_equal, &done);
- __ CallRuntime(Runtime::kThrowNonMethodError, 0);
- __ bind(&done);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset) {
+ int offset,
+ FeedbackVectorICSlot slot) {
if (NeedsHomeObject(initializer)) {
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(),
Operand(esp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
}
}
@@ -1329,20 +1337,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast);
}
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), proxy->var()->name());
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
-
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
- ? NOT_CONTEXTUAL
- : CONTEXTUAL;
-
- CallLoadIC(mode);
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_state);
}
@@ -1408,30 +1405,42 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), var->name());
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ // Inside typeof use a regular load, not a contextual load, to avoid
+ // a reference error.
+ CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofState typeof_state) {
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), var->name());
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- CallGlobalLoadIC(var->name());
+ EmitGlobalVariableLoad(proxy, typeof_state);
context()->Plug(eax);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1499,16 +1508,20 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
break;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
__ bind(&slow);
__ push(esi); // Context.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Runtime::FunctionId function_id =
+ typeof_state == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ __ CallRuntime(function_id, 2);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1583,7 +1596,6 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->ComputeFlags();
// If any of the keys would store to the elements array, then we shouldn't
@@ -1610,13 +1622,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// result_saved is false the result is in eax.
bool result_saved = false;
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
AccessorTable accessor_table(zone());
int property_index = 0;
+ // store_slot_index points to the vector IC slot for the next store IC used.
+ // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
+ // and must be updated if the number of store ICs emitted here changes.
+ int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1643,7 +1654,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
__ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- CallStoreIC(key->LiteralFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ CallStoreIC();
+ } else {
+ CallStoreIC(key->LiteralFeedbackId());
+ }
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1651,6 +1667,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, 0));
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ }
CallStoreIC();
}
} else {
@@ -1662,7 +1681,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
__ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
@@ -1696,9 +1716,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(it->second->getter, 2);
+ EmitSetHomeObjectIfNeeded(
+ it->second->getter, 2,
+ expr->SlotForHomeObject(it->second->getter, &store_slot_index));
+
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(it->second->setter, 3);
+ EmitSetHomeObjectIfNeeded(
+ it->second->setter, 3,
+ expr->SlotForHomeObject(it->second->setter, &store_slot_index));
+
__ push(Immediate(Smi::FromInt(NONE)));
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
@@ -1731,7 +1757,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(
+ value, 2, expr->SlotForHomeObject(value, &store_slot_index));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1773,6 +1800,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(eax);
}
+
+ // Verify that compilation exactly consumed the number of store ic slots that
+ // the ObjectLiteral node had to offer.
+ DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1814,8 +1845,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ if (subexpr->IsSpread()) break;
+
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
@@ -1830,7 +1864,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (has_constant_fast_elements) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
__ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
@@ -1840,16 +1874,41 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
// Store the subexpression value in the array's elements.
- __ mov(ecx, Immediate(Smi::FromInt(i)));
+ __ mov(ecx, Immediate(Smi::FromInt(array_index)));
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ __ Drop(1); // literal index
+ __ Pop(eax);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ __ Push(eax);
+ if (subexpr->IsSpread()) {
+ VisitForStackValue(subexpr->AsSpread()->expression());
+ __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ } else {
+ VisitForStackValue(subexpr);
+ __ CallRuntime(Runtime::kAppendElement, 2);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ add(esp, Immediate(kPointerSize)); // literal index
+ __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(eax);
@@ -1861,9 +1920,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = GetAssignType(property);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1871,8 +1931,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Nothing to do here.
break;
case NAMED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
__ push(result_register());
if (expr->is_compound()) {
__ push(MemOperand(esp, kPointerSize));
@@ -1889,9 +1951,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_SUPER_PROPERTY:
- VisitForStackValue(property->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(property->obj()->AsSuperReference());
- __ Push(result_register());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
__ Push(result_register());
if (expr->is_compound()) {
@@ -1947,7 +2010,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- SetSourcePosition(expr->position() + 1);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
@@ -1963,14 +2025,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
}
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
+ expr->op(), expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
break;
@@ -1994,6 +2055,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
// Evaluate yielded value first; the initial iterator definition depends on
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
@@ -2078,7 +2141,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(eax); // result
- EnterTryBlock(expr->index(), &l_catch);
+ int handler_index = NewHandlerTableEntry();
+ EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(eax); // result
__ jmp(&l_suspend);
@@ -2088,7 +2152,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
const int generator_object_depth = kPointerSize + try_block_size;
__ mov(eax, Operand(esp, generator_object_depth));
__ push(eax); // g
- __ push(Immediate(Smi::FromInt(expr->index()))); // handler-index
+ __ push(Immediate(Smi::FromInt(handler_index))); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(l_continuation.pos())));
@@ -2102,7 +2166,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(eax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in eax
- ExitTryBlock(expr->index());
+ ExitTryBlock(handler_index);
// receiver = iter; f = iter.next; arg = received;
__ bind(&l_next);
@@ -2115,11 +2179,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result = receiver[f](arg);
__ bind(&l_call);
__ mov(load_receiver, Operand(esp, kPointerSize));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- }
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
@@ -2135,10 +2197,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Move(load_receiver, eax); // result
__ mov(load_name,
isolate()->factory()->done_string()); // "done"
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2149,10 +2209,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(load_receiver); // result
__ mov(load_name,
isolate()->factory()->value_string()); // "value"
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
context()->DropAndPlug(2, eax); // drop iter and g
break;
@@ -2284,52 +2342,45 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(!prop->IsSuperAccess());
__ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL, language_mode());
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ push(Immediate(key->value()));
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ __ push(Immediate(Smi::FromInt(language_mode())));
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
- } else {
- CallIC(ic, prop->PropertyFeedbackId());
- }
+ SetExpressionPosition(prop);
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallIC(ic);
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
- SetSourcePosition(prop->position());
-
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ SetExpressionPosition(prop);
+ __ push(Immediate(Smi::FromInt(language_mode())));
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
}
@@ -2348,8 +2399,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2428,7 +2479,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
+ int* used_store_slots) {
// Constructor is in eax.
DCHECK(lit != NULL);
__ push(eax);
@@ -2460,7 +2512,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2);
+ EmitSetHomeObjectIfNeeded(value, 2,
+ lit->SlotForHomeObject(value, used_store_slots));
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -2493,8 +2546,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(edx);
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), op, language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2502,17 +2555,18 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
}
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorICSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
Property* prop = expr->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
break;
}
case NAMED_PROPERTY: {
@@ -2522,13 +2576,15 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ push(eax);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
// stack: value, this; eax: home_object
Register scratch = ecx;
Register scratch2 = edx;
@@ -2543,9 +2599,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
case KEYED_SUPER_PROPERTY: {
__ push(eax);
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
Register scratch = ecx;
Register scratch2 = edx;
@@ -2568,6 +2624,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Move(StoreDescriptor::NameRegister(), eax);
__ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2589,12 +2646,13 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorICSlot slot) {
+ if (var->IsUnallocatedOrGlobalSlot()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
__ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
} else if (var->mode() == LET && op != Token::INIT_LET) {
@@ -2681,16 +2739,18 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
// eax : value
// esp[0] : receiver
-
Property* prop = expr->target()->AsProperty();
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
__ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2734,11 +2794,14 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(StoreDescriptor::NameRegister()); // Key.
__ pop(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(eax));
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->AssignmentFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->AssignmentFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -2747,6 +2810,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
Expression* key = expr->key();
if (key->IsPropertyName()) {
@@ -2755,9 +2820,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Move(LoadDescriptor::ReceiverRegister(), result_register());
EmitNamedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
EmitNamedSuperPropertyLoad(expr);
}
} else {
@@ -2768,9 +2833,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Move(LoadDescriptor::NameRegister(), result_register()); // Key.
EmitKeyedPropertyLoad(expr);
} else {
- VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(expr->obj()->AsSuperReference());
- __ push(result_register());
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
@@ -2819,30 +2884,31 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ SetExpressionPosition(expr);
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
- SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ push(eax);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ push(eax);
__ push(eax);
__ push(Operand(esp, kPointerSize * 2));
__ push(Immediate(key->value()));
+ __ push(Immediate(Smi::FromInt(language_mode())));
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadFromSuper, 4);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2883,23 +2949,24 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
- SetSourcePosition(prop->position());
+ SetExpressionPosition(prop);
// Load the function from the receiver.
- SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
- EmitLoadHomeObject(super_ref);
- __ push(eax);
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
__ push(eax);
__ push(eax);
__ push(Operand(esp, kPointerSize * 2));
VisitForStackValue(prop->key());
+ __ push(Immediate(Smi::FromInt(language_mode())));
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+ // - language_mode
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2915,14 +2982,11 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
}
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -2949,8 +3013,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the enclosing function.
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- // Push the receiver of the enclosing function.
- __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+
// Push the language mode.
__ push(Immediate(Smi::FromInt(language_mode())));
@@ -2958,28 +3021,64 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::EmitLoadSuperConstructor() {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kGetPrototype, 1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperReference* super_ref) {
- Variable* this_var = super_ref->this_var()->var();
+ SuperCallReference* super_call_ref, FeedbackVectorICSlot slot) {
+ Variable* this_var = super_call_ref->this_var()->var();
GetVar(ecx, this_var);
__ cmp(ecx, isolate()->factory()->the_hole_value());
+
Label uninitialized_this;
__ j(equal, &uninitialized_this);
__ push(Immediate(this_var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
- EmitVariableAssignment(this_var, Token::INIT_CONST);
+ EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
+}
+
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in eax) and
+ // the object holding it (returned in edx).
+ __ push(context_register());
+ __ push(Immediate(callee->name()));
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ push(eax); // Function.
+ __ push(edx); // Receiver.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the function
+ // and receiver and have the slow path jump around this code.
+ if (done.is_linked()) {
+ Label call;
+ __ jmp(&call, Label::kNear);
+ __ bind(&done);
+ // Push function.
+ __ push(eax);
+ // The receiver is implicitly the global receiver. Indicate this by
+ // passing the hole to the call function stub.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ }
}
@@ -2996,33 +3095,29 @@ void FullCodeGenerator::VisitCall(Call* expr) {
if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- // Reserved receiver slot.
- __ push(Immediate(isolate()->factory()->undefined_value()));
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
- // The runtime call returns a pair of values in eax (function) and
- // edx (receiver). Touch up the stack with the right values.
- __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+ PushCalleeAndWithBaseObject(expr);
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // Touch up the stack with the resolved function.
+ __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3035,41 +3130,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCallWithLoadIC(expr);
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
- VariableProxy* proxy = callee->AsVariableProxy();
- Label slow, done;
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
- }
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in eax) and
- // the object holding it (returned in edx).
- __ push(context_register());
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ push(eax); // Function.
- __ push(edx); // Receiver.
- PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ push(eax);
- // The receiver is implicitly the global receiver. Indicate this by
- // passing the hole to the call function stub.
- __ push(Immediate(isolate()->factory()->undefined_value()));
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found by
- // LoadContextSlot.
+ PushCalleeAndWithBaseObject(expr);
EmitCall(expr);
-
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
bool is_named_call = property->key()->IsPropertyName();
@@ -3080,10 +3142,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
- {
- PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
+ VisitForStackValue(property->obj());
if (is_named_call) {
EmitCallWithLoadIC(expr);
} else {
@@ -3095,9 +3154,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
+ VisitForStackValue(callee);
__ push(Immediate(isolate()->factory()->undefined_value()));
// Emit function call.
EmitCall(expr);
@@ -3119,7 +3176,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- DCHECK(!expr->expression()->IsSuperReference());
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
@@ -3131,7 +3188,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
@@ -3155,11 +3212,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(eax, new_target_var);
- __ push(eax);
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor();
+ VariableProxy* new_target_proxy = super_call_ref->new_target_var();
+ VisitForStackValue(new_target_proxy);
+
+ EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
// Push the arguments ("left-to-right") on the stack.
@@ -3171,7 +3231,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
@@ -3197,7 +3257,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
+ EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(eax);
}
@@ -3462,7 +3522,6 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
}
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3485,6 +3544,28 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_TYPED_ARRAY_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3733,6 +3814,28 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = nullptr;
+ Label* if_false = nullptr;
+ Label* fall_through = nullptr;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3741,19 +3844,14 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done, not_date_object;
Register object = eax;
Register result = eax;
Register scratch = ecx;
- __ JumpIfSmi(object, &not_date_object);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ j(not_equal, &not_date_object);
-
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand::StaticVariable(stamp));
@@ -3761,19 +3859,16 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ j(not_equal, &runtime, Label::kNear);
__ mov(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
__ mov(Operand(esp, 0), object);
__ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
+ __ bind(&done);
}
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
context()->Plug(result);
}
@@ -4073,11 +4168,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- GetVar(eax, new_target_var);
- __ push(eax);
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+
+ // new.target
+ VisitForStackValue(args->at(0));
- EmitLoadSuperConstructor();
+ // .this_function
+ VisitForStackValue(args->at(1));
+ __ CallRuntime(Runtime::kGetPrototype, 1);
__ push(result_register());
// Check if the calling frame is an arguments adaptor frame.
@@ -4096,8 +4195,6 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(ecx);
- // Subtract 1 from arguments count, for new.target.
- __ sub(ecx, Immediate(1));
__ mov(eax, ecx);
__ lea(edx, Operand(edx, ecx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
@@ -4500,11 +4597,14 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr == CallRuntime("ReflectConstruct")
+ DCHECK_EQ(1, expr->arguments()->length());
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
+
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
- SuperReference* super_reference = args->at(0)->AsSuperReference();
+ SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
@@ -4513,8 +4613,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- // Push super
- EmitLoadSuperConstructor();
+ // Push super constructor
+ EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
// Push arguments array
@@ -4530,7 +4630,8 @@ void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, eax);
- EmitInitializeThisAfterSuper(super_reference);
+ // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
+ EmitInitializeThisAfterSuper(super_call_ref);
}
@@ -4542,13 +4643,9 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Load the function from the receiver.
__ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
- } else {
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- }
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
}
@@ -4556,8 +4653,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4581,6 +4677,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
@@ -4605,6 +4702,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime function.
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
context()->Plug(eax);
}
@@ -4628,10 +4726,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- DCHECK(is_sloppy(language_mode()) || var->is_this());
- if (var->IsUnallocated()) {
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(SLOPPY)));
@@ -4641,7 +4740,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Result of deleting non-global variables is false. 'this' is
// not really a variable, though we implement it as one. The
// subexpression does not have side effects.
- context()->Plug(var->is_this());
+ context()->Plug(is_this);
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
@@ -4734,10 +4833,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = GetAssignType(prop);
+ LhsKind assign_type = Property::GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4759,8 +4857,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
__ push(result_register());
__ push(MemOperand(esp, kPointerSize));
__ push(result_register());
@@ -4769,9 +4868,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
- EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ push(result_register());
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
__ push(result_register());
__ push(MemOperand(esp, 2 * kPointerSize));
@@ -4852,9 +4951,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call, Label::kNear);
__ bind(&slow);
}
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ if (!is_strong(language_mode())) {
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4882,20 +4983,21 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // Record position before stub call.
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// Call stub for +1/-1.
__ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(
- isolate(), expr->binary_op(), language_mode()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
+ strength(language_mode())).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
+ if (is_strong(language_mode())) {
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ }
// Store the value returned in eax.
switch (assign_type) {
case VARIABLE:
@@ -4903,7 +5005,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(eax);
}
@@ -4915,7 +5017,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
+ Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -4924,7 +5026,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ } else {
+ CallStoreIC(expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4962,7 +5069,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic, expr->CountStoreFeedbackId());
+ if (FLAG_vector_stores) {
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ } else {
+ CallIC(ic, expr->CountStoreFeedbackId());
+ }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4978,47 +5090,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- DCHECK(!context()->IsEffect());
- DCHECK(!context()->IsTest());
-
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Immediate(proxy->name()));
- if (FLAG_vector_ics) {
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- }
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallLoadIC(NOT_CONTEXTUAL);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(eax);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ push(esi);
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(eax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Expression* sub_expr,
Handle<String> check) {
@@ -5094,7 +5165,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
+ SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -5148,9 +5219,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic = CodeFactory::CompareIC(
+ isolate(), op, strength(language_mode())).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -5264,6 +5334,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_obj));
__ push(edx);
+
+ ClearPendingMessage();
}
@@ -5286,6 +5358,22 @@ void FullCodeGenerator::ExitFinallyBlock() {
}
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(edx));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
+ __ mov(Operand::StaticVariable(pending_message_obj), edx);
+}
+
+
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
+ DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(slot)));
+}
+
+
#undef __
@@ -5366,6 +5454,7 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 515d56c659..05fa9b8926 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -16,12 +16,9 @@ const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
const Register LoadDescriptor::ReceiverRegister() { return edx; }
const Register LoadDescriptor::NameRegister() { return ecx; }
+const Register LoadDescriptor::SlotRegister() { return eax; }
-
-const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return eax; }
-
-
-const Register VectorLoadICDescriptor::VectorRegister() { return ebx; }
+const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
const Register StoreDescriptor::ReceiverRegister() { return edx; }
@@ -29,6 +26,12 @@ const Register StoreDescriptor::NameRegister() { return ecx; }
const Register StoreDescriptor::ValueRegister() { return eax; }
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return edi; }
+
+
+const Register VectorStoreICDescriptor::VectorRegister() { return ebx; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
@@ -58,110 +61,102 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-const Register GrowArrayElementsDescriptor::CapacityRegister() { return ecx; }
-void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edi};
- data->Initialize(arraysize(registers), registers, NULL);
+void FastNewContextDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// ToNumberStub invokes a function, and therefore needs a context.
- Register registers[] = {esi, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void NumberToStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+void TypeofDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastCloneShallowArrayDescriptor::Initialize(
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax, ebx, ecx};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {eax, ebx, ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastCloneShallowObjectDescriptor::Initialize(
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax, ebx, ecx, edx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {eax, ebx, ecx, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CreateAllocationSiteDescriptor::Initialize(
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ebx, edx};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {ebx, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ebx, edx, edi};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ebx, edx, edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void StoreArrayLiteralElementDescriptor::Initialize(
+void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ecx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {ecx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edi};
- data->Initialize(arraysize(registers), registers, NULL);
+void CallFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CallFunctionWithFeedbackDescriptor::Initialize(
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edi, edx};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Smi()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {edi, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edi, edx, ebx};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
- Representation::Tagged()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {edi, edx, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
// edx : (only if ebx is not the megamorphic symbol) slot in feedback
@@ -169,208 +164,183 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {esi, eax, edi, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {eax, edi, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void RegExpConstructResultDescriptor::Initialize(
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ecx, ebx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {ecx, ebx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void TransitionElementsKindDescriptor::Initialize(
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {eax, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void AllocateHeapNumberDescriptor::Initialize(
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
- // esi -- context
- Register registers[] = {esi};
- data->Initialize(arraysize(registers), registers, nullptr);
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
}
-void ArrayConstructorConstantArgCountDescriptor::Initialize(
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
// ebx -- allocation site with elements kind
- Register registers[] = {esi, edi, ebx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {edi, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {esi, edi, ebx, eax};
- Representation representations[] = {
- Representation::Tagged(), Representation::Tagged(),
- Representation::Tagged(), Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {edi, ebx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
- CallInterfaceDescriptorData* data) {
+void InternalArrayConstructorConstantArgCountDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
- Register registers[] = {esi, edi};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void InternalArrayConstructorDescriptor::Initialize(
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {esi, edi, eax};
- Representation representations[] = {Representation::Tagged(),
- Representation::Tagged(),
- Representation::Integer32()};
- data->Initialize(arraysize(registers), registers, representations);
+ Register registers[] = {edi, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void CompareNilDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void ToBooleanDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void BinaryOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void BinaryOpWithAllocationSiteDescriptor::Initialize(
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, ecx, edx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+ Register registers[] = {ecx, edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
- Register registers[] = {esi, edx, eax};
- data->Initialize(arraysize(registers), registers, NULL);
+void StringAddDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void KeyedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
ecx, // key
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void NamedDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
ecx, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void CallHandlerDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
edx, // name
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
edi, // JSFunction
eax, // actual number of arguments
ebx, // expected number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // JSFunction
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiFunctionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
edi, // callee
ebx, // call_data
ecx, // holder
edx, // api_function_address
eax, // actual number of arguments
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Integer32(), // actual number of arguments
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+void ApiAccessorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- esi, // context
edi, // callee
ebx, // call_data
ecx, // holder
edx, // api_function_address
};
- Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- };
- data->Initialize(arraysize(registers), registers, representations);
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void MathRoundVariantDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ edi, // math rounding function
+ edx, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/lithium-codegen-x87.cc b/deps/v8/src/x87/lithium-codegen-x87.cc
index a81fd70862..6021ec74e9 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/x87/lithium-codegen-x87.cc
@@ -110,8 +110,8 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info_->is_native()) {
+ if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
+ !info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok;
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
@@ -242,8 +242,9 @@ bool LCodeGen::GeneratePrologue() {
// Copy parameters into context if necessary.
int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -883,41 +884,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
- bool has_closure_id = !info()->closure().is_null() &&
- !info()->closure().is_identical_to(environment->closure());
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- default:
- UNREACHABLE();
- }
+ WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
@@ -1250,28 +1219,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (auto function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
}
-
inlined_function_count_ = deoptimization_literals_.length();
}
@@ -2067,18 +2019,13 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
- Label runtime, done;
DCHECK(object.is(result));
DCHECK(object.is(eax));
- __ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
-
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
} else {
+ Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand::StaticVariable(stamp));
@@ -2346,8 +2293,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(eax));
DCHECK(ToRegister(instr->result()).is(eax));
- Handle<Code> code = CodeFactory::BinaryOpIC(
- isolate(), instr->op(), instr->language_mode()).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2779,7 +2726,8 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -3051,7 +2999,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -3143,10 +3092,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = VectorLoadICDescriptor::SlotRegister();
- DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(eax));
AllowDeferredHandleDereference vector_structure_check;
@@ -3159,6 +3107,20 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ mov(vector_register, vector);
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ mov(slot_register, Immediate(Smi::FromInt(index)));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->global_object())
@@ -3166,11 +3128,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
__ mov(LoadDescriptor::NameRegister(), instr->name());
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3279,12 +3239,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
__ mov(LoadDescriptor::NameRegister(), instr->name());
- if (FLAG_vector_ics) {
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- }
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL,
- instr->hydrogen()->initialization_state()).code();
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3405,7 +3364,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3523,9 +3483,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3750,27 +3710,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
-void LCodeGen::DoTailCallThroughMegamorphicCache(
- LTailCallThroughMegamorphicCache* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register name = ToRegister(instr->name());
- DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(name.is(LoadDescriptor::NameRegister()));
- Register scratch = ebx;
- Register extra = edi;
- DCHECK(!scratch.is(receiver) && !scratch.is(name));
- DCHECK(!extra.is(receiver) && !extra.is(name));
-
- // The probe will tail call to a handler if found.
- // If --vector-ics is on, then it knows to pop the two args first.
- isolate()->stub_cache()->GenerateProbe(masm(), Code::LOAD_IC,
- instr->hydrogen()->flags(), false,
- receiver, name, scratch, extra);
-
- LoadIC::GenerateMiss(masm());
-}
-
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
@@ -4564,10 +4503,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
__ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic =
- StoreIC::initialize_stub(isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state());
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4676,7 +4619,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4797,6 +4741,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@@ -4814,6 +4762,97 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
}
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen,
+ LMaybeGrowElements* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = eax;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ jmp(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ cmp(ToOperand(current_capacity), Immediate(constant_key));
+ __ j(less_equal, deferred->entry());
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ cmp(ToRegister(key), Immediate(constant_capacity));
+ __ j(greater_equal, deferred->entry());
+ } else {
+ __ cmp(ToRegister(key), ToRegister(current_capacity));
+ __ j(greater_equal, deferred->entry());
+ }
+
+ __ mov(result, ToOperand(instr->elements()));
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = eax;
+ __ Move(result, Immediate(0));
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ if (instr->object()->IsRegister()) {
+ __ Move(result, ToRegister(instr->object()));
+ } else {
+ __ mov(result, ToOperand(instr->object()));
+ }
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ mov(ebx, ToImmediate(key, Representation::Smi()));
+ } else {
+ __ Move(ebx, ToRegister(key));
+ __ SmiTag(ebx);
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+}
+
+
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
@@ -6386,6 +6425,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/lithium-codegen-x87.h b/deps/v8/src/x87/lithium-codegen-x87.h
index 5d913c83f6..9157779a95 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/x87/lithium-codegen-x87.h
@@ -30,7 +30,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@@ -135,6 +134,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@@ -247,7 +247,6 @@ class LCodeGen: public LCodeGenBase {
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -337,6 +336,8 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
@@ -362,7 +363,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
diff --git a/deps/v8/src/x87/lithium-gap-resolver-x87.cc b/deps/v8/src/x87/lithium-gap-resolver-x87.cc
index 6a6427550c..b2f9b263c6 100644
--- a/deps/v8/src/x87/lithium-gap-resolver-x87.cc
+++ b/deps/v8/src/x87/lithium-gap-resolver-x87.cc
@@ -445,6 +445,7 @@ void LGapResolver::EmitSwap(int index) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/lithium-x87.cc b/deps/v8/src/x87/lithium-x87.cc
index 1b4aa1a7bb..a57aa91576 100644
--- a/deps/v8/src/x87/lithium-x87.cc
+++ b/deps/v8/src/x87/lithium-x87.cc
@@ -1151,10 +1151,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
ops.Add(target, zone());
- for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op =
- UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), esi);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@@ -1164,20 +1172,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
-LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
- HTailCallThroughMegamorphicCache* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* receiver_register =
- UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
- LOperand* name_register =
- UseFixed(instr->name(), LoadDescriptor::NameRegister());
-
- // Not marked as call. It can't deoptimize, and it never returns.
- return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
-}
-
-
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
@@ -1835,7 +1829,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* date = UseFixed(instr->value(), eax);
LDateField* result =
new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+ return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -2139,7 +2133,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -2193,7 +2187,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
context, object, vector);
@@ -2265,7 +2259,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadKeyedGeneric* result =
new(zone()) LLoadKeyedGeneric(context, object, key, vector);
@@ -2361,8 +2355,15 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LStoreKeyedGeneric* result =
- new(zone()) LStoreKeyedGeneric(context, object, key, value);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result = new (zone())
+ LStoreKeyedGeneric(context, object, key, value, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2397,6 +2398,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = Use(instr->object());
+ LOperand* elements = Use(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, eax);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool is_external_location = instr->access().IsExternalMemory() &&
@@ -2458,9 +2474,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
LStoreNamedGeneric* result =
- new(zone()) LStoreNamedGeneric(context, object, value);
+ new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
return MarkAsCall(result, instr);
}
@@ -2537,7 +2559,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@@ -2668,7 +2690,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
+ chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@@ -2738,6 +2760,7 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/lithium-x87.h b/deps/v8/src/x87/lithium-x87.h
index 1422f65ade..1eedba1f48 100644
--- a/deps/v8/src/x87/lithium-x87.h
+++ b/deps/v8/src/x87/lithium-x87.h
@@ -122,6 +122,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@@ -155,7 +156,6 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
- V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@@ -485,26 +485,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
};
-class LTailCallThroughMegamorphicCache final
- : public LTemplateInstruction<0, 3, 0> {
- public:
- LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
- LOperand* name) {
- inputs_[0] = context;
- inputs_[1] = receiver;
- inputs_[2] = name;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* name() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
- "tail-call-through-megamorphic-cache")
- DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -1202,6 +1182,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ Strength strength() { return hydrogen()->strength(); }
+
LOperand* context() { return inputs_[0]; }
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1567,7 +1549,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
+ Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@@ -1887,8 +1869,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
- : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
- DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ : inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
inputs_.AddAll(operands, zone);
}
@@ -1896,6 +1882,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@@ -2197,17 +2187,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
};
-class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2249,22 +2244,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* object,
- LOperand* key,
- LOperand* value) {
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2323,6 +2320,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
};
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 487790d9b6..46c1830c05 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -744,9 +744,11 @@ void MacroAssembler::X87SetRC(int rc) {
void MacroAssembler::X87SetFPUCW(int cw) {
+ RecordComment("-- X87SetFPUCW start --");
push(Immediate(cw));
fldcw(MemOperand(esp, 0));
add(esp, Immediate(kPointerSize));
+ RecordComment("-- X87SetFPUCW end--");
}
@@ -1114,6 +1116,7 @@ void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
mov(scratch, r0);
shr(scratch, 16);
xor_(r0, scratch);
+ and_(r0, 0x3fffffff);
}
@@ -1714,7 +1717,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
add(start_offset, Immediate(kPointerSize));
bind(&entry);
cmp(start_offset, end_offset);
- j(less, &loop);
+ j(below, &loop);
}
@@ -3101,6 +3104,7 @@ void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/x87/regexp-macro-assembler-x87.cc
index ddad44d864..e043f6ea6e 100644
--- a/deps/v8/src/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/x87/regexp-macro-assembler-x87.cc
@@ -1223,6 +1223,7 @@ void RegExpMacroAssemblerX87::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/zone-containers.h b/deps/v8/src/zone-containers.h
index fc57c04adc..8daf0dd657 100644
--- a/deps/v8/src/zone-containers.h
+++ b/deps/v8/src/zone-containers.h
@@ -66,11 +66,13 @@ class ZoneLinkedList : public std::list<T, zone_allocator<T>> {
// A wrapper subclass std::priority_queue to make it easy to construct one
// that uses a zone allocator.
template <typename T, typename Compare = std::less<T>>
-class ZonePriorityQueue : public std::priority_queue<T, ZoneVector<T>> {
+class ZonePriorityQueue
+ : public std::priority_queue<T, ZoneVector<T>, Compare> {
public:
// Constructs an empty list.
explicit ZonePriorityQueue(Zone* zone)
- : std::priority_queue<T, ZoneVector<T>>(Compare(), ZoneVector<T>(zone)) {}
+ : std::priority_queue<T, ZoneVector<T>, Compare>(Compare(),
+ ZoneVector<T>(zone)) {}
};